scsi_debug: fix compare and write errors
[cascardo/linux.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
49
50 #include <net/checksum.h>
51
52 #include <asm/unaligned.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
62
63 #include "sd.h"
64 #include "scsi_logging.h"
65
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
68
69 #define MY_NAME "scsi_debug"
70
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97
98
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST   1
101 #define DEF_NUM_TGTS   1
102 #define DEF_MAX_LUNS   1
103 /* With these defaults, this driver will make 1 host with 1 target
104  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105  */
106 #define DEF_ATO 1
107 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW     0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 64
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE   0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB   0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
138 #define DEF_STRICT 0
139 #define DELAY_OVERRIDDEN -9999
140
141 /* bit mask values for scsi_debug_opts */
142 #define SCSI_DEBUG_OPT_NOISE   1
143 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
144 #define SCSI_DEBUG_OPT_TIMEOUT   4
145 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
146 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
147 #define SCSI_DEBUG_OPT_DIF_ERR   32
148 #define SCSI_DEBUG_OPT_DIX_ERR   64
149 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
150 #define SCSI_DEBUG_OPT_SHORT_TRANSFER   0x100
151 #define SCSI_DEBUG_OPT_Q_NOISE  0x200
152 #define SCSI_DEBUG_OPT_ALL_TSF  0x400
153 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
154 #define SCSI_DEBUG_OPT_N_WCE    0x1000
155 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
156 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
157 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
158 /* When "every_nth" > 0 then modulo "every_nth" commands:
159  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
160  *   - a RECOVERED_ERROR is simulated on successful read and write
161  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
162  *   - a TRANSPORT_ERROR is simulated on successful read and write
163  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
164  *
165  * When "every_nth" < 0 then after "- every_nth" commands:
166  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
167  *   - a RECOVERED_ERROR is simulated on successful read and write
168  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
169  *   - a TRANSPORT_ERROR is simulated on successful read and write
170  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
171  * This will continue until some other action occurs (e.g. the user
172  * writing a new value (other than -1 or 1) to every_nth via sysfs).
173  */
174
175 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
176  * priority order. In the subset implemented here lower numbers have higher
177  * priority. The UA numbers should be a sequence starting from 0 with
178  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
179 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
180 #define SDEBUG_UA_BUS_RESET 1
181 #define SDEBUG_UA_MODE_CHANGED 2
182 #define SDEBUG_UA_CAPACITY_CHANGED 3
183 #define SDEBUG_NUM_UAS 4
184
185 /* for check_readiness() */
186 #define UAS_ONLY 1      /* check for UAs only */
187 #define UAS_TUR 0       /* if no UAs then check if media access possible */
188
189 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
190  * sector on read commands: */
191 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
192 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
193
194 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
195  * or "peripheral device" addressing (value 0) */
196 #define SAM2_LUN_ADDRESS_METHOD 0
197 #define SAM2_WLUN_REPORT_LUNS 0xc101
198
199 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
200  * (for response) at one time. Can be reduced by max_queue option. Command
201  * responses are not queued when delay=0 and ndelay=0. The per-device
202  * DEF_CMD_PER_LUN can be changed via sysfs:
203  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
204  * SCSI_DEBUG_CANQUEUE. */
205 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
206 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
207 #define DEF_CMD_PER_LUN  255
208
209 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
210 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
211 #endif
212
213 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
214 enum sdeb_opcode_index {
215         SDEB_I_INVALID_OPCODE = 0,
216         SDEB_I_INQUIRY = 1,
217         SDEB_I_REPORT_LUNS = 2,
218         SDEB_I_REQUEST_SENSE = 3,
219         SDEB_I_TEST_UNIT_READY = 4,
220         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
221         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
222         SDEB_I_LOG_SENSE = 7,
223         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
224         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
225         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
226         SDEB_I_START_STOP = 11,
227         SDEB_I_SERV_ACT_IN = 12,        /* 12, 16 */
228         SDEB_I_SERV_ACT_OUT = 13,       /* 12, 16 */
229         SDEB_I_MAINT_IN = 14,
230         SDEB_I_MAINT_OUT = 15,
231         SDEB_I_VERIFY = 16,             /* 10 only */
232         SDEB_I_VARIABLE_LEN = 17,
233         SDEB_I_RESERVE = 18,            /* 6, 10 */
234         SDEB_I_RELEASE = 19,            /* 6, 10 */
235         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
236         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
237         SDEB_I_ATA_PT = 22,             /* 12, 16 */
238         SDEB_I_SEND_DIAG = 23,
239         SDEB_I_UNMAP = 24,
240         SDEB_I_XDWRITEREAD = 25,        /* 10 only */
241         SDEB_I_WRITE_BUFFER = 26,
242         SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
243         SDEB_I_SYNC_CACHE = 28,         /* 10 only */
244         SDEB_I_COMP_WRITE = 29,
245         SDEB_I_LAST_ELEMENT = 30,       /* keep this last */
246 };
247
248 static const unsigned char opcode_ind_arr[256] = {
249 /* 0x0; 0x0->0x1f: 6 byte cdbs */
250         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
251             0, 0, 0, 0,
252         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
253         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
254             SDEB_I_RELEASE,
255         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
256             SDEB_I_ALLOW_REMOVAL, 0,
257 /* 0x20; 0x20->0x3f: 10 byte cdbs */
258         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
259         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
260         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
261         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
262 /* 0x40; 0x40->0x5f: 10 byte cdbs */
263         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
264         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
265         0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
266             SDEB_I_RELEASE,
267         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
268 /* 0x60; 0x60->0x7d are reserved */
269         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271         0, SDEB_I_VARIABLE_LEN,
272 /* 0x80; 0x80->0x9f: 16 byte cdbs */
273         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
274         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
275         0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
276         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
277 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
278         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
279              SDEB_I_MAINT_OUT, 0, 0, 0,
280         SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
281              0, 0, 0, 0,
282         0, 0, 0, 0, 0, 0, 0, 0,
283         0, 0, 0, 0, 0, 0, 0, 0,
284 /* 0xc0; 0xc0->0xff: vendor specific */
285         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
286         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
287         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
288         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
289 };
290
291 #define F_D_IN                  1
292 #define F_D_OUT                 2
293 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
294 #define F_D_UNKN                8
295 #define F_RL_WLUN_OK            0x10
296 #define F_SKIP_UA               0x20
297 #define F_DELAY_OVERR           0x40
298 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
299 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
300 #define F_INV_OP                0x200
301 #define F_FAKE_RW               0x400
302 #define F_M_ACCESS              0x800   /* media access */
303
304 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
305 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
306 #define FF_SA (F_SA_HIGH | F_SA_LOW)
307
308 struct sdebug_dev_info;
309 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
310 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
311 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
312 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
313 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
314 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
315 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
316 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
317 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
329
330 struct opcode_info_t {
331         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff
332                                  * for terminating element */
333         u8 opcode;              /* if num_attached > 0, preferred */
334         u16 sa;                 /* service action */
335         u32 flags;              /* OR-ed set of SDEB_F_* */
336         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
337         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
338         u8 len_mask[16];        /* len=len_mask[0], then mask for cdb[1]... */
339                                 /* ignore cdb bytes after position 15 */
340 };
341
342 static const struct opcode_info_t msense_iarr[1] = {
343         {0, 0x1a, 0, F_D_IN, NULL, NULL,
344             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
345 };
346
347 static const struct opcode_info_t mselect_iarr[1] = {
348         {0, 0x15, 0, F_D_OUT, NULL, NULL,
349             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
350 };
351
352 static const struct opcode_info_t read_iarr[3] = {
353         {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
354             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
355              0, 0, 0, 0} },
356         {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
357             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
358         {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
359             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
360              0xc7, 0, 0, 0, 0} },
361 };
362
363 static const struct opcode_info_t write_iarr[3] = {
364         {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
365             {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
366              0, 0, 0, 0} },
367         {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
368             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
369         {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
370             {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
371              0xc7, 0, 0, 0, 0} },
372 };
373
374 static const struct opcode_info_t sa_in_iarr[1] = {
375         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
376             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
377              0xff, 0xff, 0xff, 0, 0xc7} },
378 };
379
380 static const struct opcode_info_t vl_iarr[1] = {        /* VARIABLE LENGTH */
381         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
382             NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
383                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
384 };
385
386 static const struct opcode_info_t maint_in_iarr[2] = {
387         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
388             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
389              0xc7, 0, 0, 0, 0} },
390         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
391             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
392              0, 0} },
393 };
394
395 static const struct opcode_info_t write_same_iarr[1] = {
396         {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
397             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
398              0xff, 0xff, 0xff, 0x1f, 0xc7} },
399 };
400
401 static const struct opcode_info_t reserve_iarr[1] = {
402         {0, 0x16, 0, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
403             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
404 };
405
406 static const struct opcode_info_t release_iarr[1] = {
407         {0, 0x17, 0, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
408             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
409 };
410
411
412 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
413  * plus the terminating elements for logic that scans this table such as
414  * REPORT SUPPORTED OPERATION CODES. */
415 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
416 /* 0 */
417         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
418             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
419         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
420             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
421         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
422             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
423              0, 0} },
424         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
425             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
427             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428         {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
429             {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
430              0} },
431         {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
432             {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
433         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
434             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
435              0, 0, 0} },
436         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
437             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
438              0, 0} },
439         {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
440             {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
441              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* READ(16) */
442 /* 10 */
443         {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
444             {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* WRITE(16) */
446         {0, 0x1b, 0, 0, resp_start_stop, NULL,          /* START STOP UNIT */
447             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
448         {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
449             {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
450              0xff, 0xff, 0xff, 0x1, 0xc7} },    /* READ CAPACITY(16) */
451         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
452             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
453         {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
454             {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
455              0} },
456         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
457             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
458         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
459             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
460         {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
461             vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
462                       0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
463         {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
464             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
465              0} },
466         {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
467             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
468              0} },
469 /* 20 */
470         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
471             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
472         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
473             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
475             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
476         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
477             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
478         {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
479             {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
480         {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
481             NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
482                    0, 0, 0, 0, 0, 0} },
483         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
484             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
485         {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
486             write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
487                               0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
488         {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
489             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
490              0, 0, 0, 0} },
491         {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
492             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
493              0, 0xff, 0x1f, 0xc7} },            /* COMPARE AND WRITE */
494
495 /* 30 */
496         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
497             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
498 };
499
500 struct sdebug_scmd_extra_t {
501         bool inj_recovered;
502         bool inj_transport;
503         bool inj_dif;
504         bool inj_dix;
505         bool inj_short;
506 };
507
508 static int scsi_debug_add_host = DEF_NUM_HOST;
509 static int scsi_debug_ato = DEF_ATO;
510 static int scsi_debug_delay = DEF_DELAY;
511 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
512 static int scsi_debug_dif = DEF_DIF;
513 static int scsi_debug_dix = DEF_DIX;
514 static int scsi_debug_dsense = DEF_D_SENSE;
515 static int scsi_debug_every_nth = DEF_EVERY_NTH;
516 static int scsi_debug_fake_rw = DEF_FAKE_RW;
517 static unsigned int scsi_debug_guard = DEF_GUARD;
518 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
519 static int scsi_debug_max_luns = DEF_MAX_LUNS;
520 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
521 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
522 static int scsi_debug_ndelay = DEF_NDELAY;
523 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
524 static int scsi_debug_no_uld = 0;
525 static int scsi_debug_num_parts = DEF_NUM_PARTS;
526 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
527 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
528 static int scsi_debug_opts = DEF_OPTS;
529 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
530 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
531 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
532 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
533 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
534 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
535 static unsigned int scsi_debug_lbpu = DEF_LBPU;
536 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
537 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
538 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
539 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
540 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
541 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
542 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
543 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
544 static bool scsi_debug_removable = DEF_REMOVABLE;
545 static bool scsi_debug_clustering;
546 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
547 static bool scsi_debug_strict = DEF_STRICT;
548 static bool sdebug_any_injecting_opt;
549
550 static atomic_t sdebug_cmnd_count;
551 static atomic_t sdebug_completions;
552 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
553
554 #define DEV_READONLY(TGT)      (0)
555
556 static unsigned int sdebug_store_sectors;
557 static sector_t sdebug_capacity;        /* in sectors */
558
559 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
560    may still need them */
561 static int sdebug_heads;                /* heads per disk */
562 static int sdebug_cylinders_per;        /* cylinders per surface */
563 static int sdebug_sectors_per;          /* sectors per cylinder */
564
565 #define SDEBUG_MAX_PARTS 4
566
567 #define SCSI_DEBUG_MAX_CMD_LEN 32
568
569 static unsigned int scsi_debug_lbp(void)
570 {
571         return ((0 == scsi_debug_fake_rw) &&
572                 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
573 }
574
575 struct sdebug_dev_info {
576         struct list_head dev_list;
577         unsigned int channel;
578         unsigned int target;
579         u64 lun;
580         struct sdebug_host_info *sdbg_host;
581         unsigned long uas_bm[1];
582         atomic_t num_in_q;
583         char stopped;           /* TODO: should be atomic */
584         bool used;
585 };
586
587 struct sdebug_host_info {
588         struct list_head host_list;
589         struct Scsi_Host *shost;
590         struct device dev;
591         struct list_head dev_info_list;
592 };
593
594 #define to_sdebug_host(d)       \
595         container_of(d, struct sdebug_host_info, dev)
596
597 static LIST_HEAD(sdebug_host_list);
598 static DEFINE_SPINLOCK(sdebug_host_list_lock);
599
600
601 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
602         struct hrtimer hrt;     /* must be first element */
603         int qa_indx;
604 };
605
606 struct sdebug_queued_cmd {
607         /* in_use flagged by a bit in queued_in_use_bm[] */
608         struct timer_list *cmnd_timerp;
609         struct tasklet_struct *tletp;
610         struct sdebug_hrtimer *sd_hrtp;
611         struct scsi_cmnd * a_cmnd;
612 };
613 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
614 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
615
616
617 static unsigned char * fake_storep;     /* ramdisk storage */
618 static struct sd_dif_tuple *dif_storep; /* protection info */
619 static void *map_storep;                /* provisioning map */
620
621 static unsigned long map_size;
622 static int num_aborts;
623 static int num_dev_resets;
624 static int num_target_resets;
625 static int num_bus_resets;
626 static int num_host_resets;
627 static int dix_writes;
628 static int dix_reads;
629 static int dif_errors;
630
631 static DEFINE_SPINLOCK(queued_arr_lock);
632 static DEFINE_RWLOCK(atomic_rw);
633
634 static char sdebug_proc_name[] = MY_NAME;
635 static const char *my_name = MY_NAME;
636
637 static struct bus_type pseudo_lld_bus;
638
639 static struct device_driver sdebug_driverfs_driver = {
640         .name           = sdebug_proc_name,
641         .bus            = &pseudo_lld_bus,
642 };
643
644 static const int check_condition_result =
645                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
646
647 static const int illegal_condition_result =
648         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
649
650 static const int device_qfull_result =
651         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
652
653 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
654                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
655                                      0, 0, 0, 0};
656 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
657                                     0, 0, 0x2, 0x4b};
658 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
659                                    0, 0, 0x0, 0x0};
660
661 static void *fake_store(unsigned long long lba)
662 {
663         lba = do_div(lba, sdebug_store_sectors);
664
665         return fake_storep + lba * scsi_debug_sector_size;
666 }
667
668 static struct sd_dif_tuple *dif_store(sector_t sector)
669 {
670         sector = do_div(sector, sdebug_store_sectors);
671
672         return dif_storep + sector;
673 }
674
675 static int sdebug_add_adapter(void);
676 static void sdebug_remove_adapter(void);
677
678 static void sdebug_max_tgts_luns(void)
679 {
680         struct sdebug_host_info *sdbg_host;
681         struct Scsi_Host *hpnt;
682
683         spin_lock(&sdebug_host_list_lock);
684         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
685                 hpnt = sdbg_host->shost;
686                 if ((hpnt->this_id >= 0) &&
687                     (scsi_debug_num_tgts > hpnt->this_id))
688                         hpnt->max_id = scsi_debug_num_tgts + 1;
689                 else
690                         hpnt->max_id = scsi_debug_num_tgts;
691                 /* scsi_debug_max_luns; */
692                 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
693         }
694         spin_unlock(&sdebug_host_list_lock);
695 }
696
697 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
698
699 /* Set in_bit to -1 to indicate no bit position of invalid field */
700 static void
701 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
702                      int in_byte, int in_bit)
703 {
704         unsigned char *sbuff;
705         u8 sks[4];
706         int sl, asc;
707
708         sbuff = scp->sense_buffer;
709         if (!sbuff) {
710                 sdev_printk(KERN_ERR, scp->device,
711                             "%s: sense_buffer is NULL\n", __func__);
712                 return;
713         }
714         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
715         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
716         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
717                                 asc, 0);
718         memset(sks, 0, sizeof(sks));
719         sks[0] = 0x80;
720         if (c_d)
721                 sks[0] |= 0x40;
722         if (in_bit >= 0) {
723                 sks[0] |= 0x8;
724                 sks[0] |= 0x7 & in_bit;
725         }
726         put_unaligned_be16(in_byte, sks + 1);
727         if (scsi_debug_dsense) {
728                 sl = sbuff[7] + 8;
729                 sbuff[7] = sl;
730                 sbuff[sl] = 0x2;
731                 sbuff[sl + 1] = 0x6;
732                 memcpy(sbuff + sl + 4, sks, 3);
733         } else
734                 memcpy(sbuff + 15, sks, 3);
735         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
736                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
737                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
738                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
739 }
740
741 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
742 {
743         unsigned char *sbuff;
744
745         sbuff = scp->sense_buffer;
746         if (!sbuff) {
747                 sdev_printk(KERN_ERR, scp->device,
748                             "%s: sense_buffer is NULL\n", __func__);
749                 return;
750         }
751         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
752
753         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
754
755         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
756                 sdev_printk(KERN_INFO, scp->device,
757                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
758                             my_name, key, asc, asq);
759 }
760
761 static void
762 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
763 {
764         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
765 }
766
767 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
768 {
769         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
770                 if (0x1261 == cmd)
771                         sdev_printk(KERN_INFO, dev,
772                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
773                 else if (0x5331 == cmd)
774                         sdev_printk(KERN_INFO, dev,
775                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
776                                     __func__);
777                 else
778                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
779                                     __func__, cmd);
780         }
781         return -EINVAL;
782         /* return -ENOTTY; // correct return but upsets fdisk */
783 }
784
785 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
786                            struct sdebug_dev_info * devip)
787 {
788         int k;
789         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
790
791         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
792         if (k != SDEBUG_NUM_UAS) {
793                 const char *cp = NULL;
794
795                 switch (k) {
796                 case SDEBUG_UA_POR:
797                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
798                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
799                         if (debug)
800                                 cp = "power on reset";
801                         break;
802                 case SDEBUG_UA_BUS_RESET:
803                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
804                                         UA_RESET_ASC, BUS_RESET_ASCQ);
805                         if (debug)
806                                 cp = "bus reset";
807                         break;
808                 case SDEBUG_UA_MODE_CHANGED:
809                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
810                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
811                         if (debug)
812                                 cp = "mode parameters changed";
813                         break;
814                 case SDEBUG_UA_CAPACITY_CHANGED:
815                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
816                                         UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
817                         if (debug)
818                                 cp = "capacity data changed";
819                 default:
820                         pr_warn("%s: unexpected unit attention code=%d\n",
821                                 __func__, k);
822                         if (debug)
823                                 cp = "unknown";
824                         break;
825                 }
826                 clear_bit(k, devip->uas_bm);
827                 if (debug)
828                         sdev_printk(KERN_INFO, SCpnt->device,
829                                    "%s reports: Unit attention: %s\n",
830                                    my_name, cp);
831                 return check_condition_result;
832         }
833         if ((UAS_TUR == uas_only) && devip->stopped) {
834                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
835                                 0x2);
836                 if (debug)
837                         sdev_printk(KERN_INFO, SCpnt->device,
838                                     "%s reports: Not ready: %s\n", my_name,
839                                     "initializing command required");
840                 return check_condition_result;
841         }
842         return 0;
843 }
844
845 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
846 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
847                                 int arr_len)
848 {
849         int act_len;
850         struct scsi_data_buffer *sdb = scsi_in(scp);
851
852         if (!sdb->length)
853                 return 0;
854         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
855                 return (DID_ERROR << 16);
856
857         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
858                                       arr, arr_len);
859         sdb->resid = scsi_bufflen(scp) - act_len;
860
861         return 0;
862 }
863
864 /* Returns number of bytes fetched into 'arr' or -1 if error. */
865 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
866                                int arr_len)
867 {
868         if (!scsi_bufflen(scp))
869                 return 0;
870         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
871                 return -1;
872
873         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
874 }
875
876
877 static const char * inq_vendor_id = "Linux   ";
878 static const char * inq_product_id = "scsi_debug      ";
879 static const char *inq_product_rev = "0184";    /* version less '.' */
880
881 /* Device identification VPD page. Returns number of bytes placed in arr */
882 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
883                            int target_dev_id, int dev_id_num,
884                            const char * dev_id_str,
885                            int dev_id_str_len)
886 {
887         int num, port_a;
888         char b[32];
889
890         port_a = target_dev_id + 1;
891         /* T10 vendor identifier field format (faked) */
892         arr[0] = 0x2;   /* ASCII */
893         arr[1] = 0x1;
894         arr[2] = 0x0;
895         memcpy(&arr[4], inq_vendor_id, 8);
896         memcpy(&arr[12], inq_product_id, 16);
897         memcpy(&arr[28], dev_id_str, dev_id_str_len);
898         num = 8 + 16 + dev_id_str_len;
899         arr[3] = num;
900         num += 4;
901         if (dev_id_num >= 0) {
902                 /* NAA-5, Logical unit identifier (binary) */
903                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
904                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
905                 arr[num++] = 0x0;
906                 arr[num++] = 0x8;
907                 arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
908                 arr[num++] = 0x33;
909                 arr[num++] = 0x33;
910                 arr[num++] = 0x30;
911                 arr[num++] = (dev_id_num >> 24);
912                 arr[num++] = (dev_id_num >> 16) & 0xff;
913                 arr[num++] = (dev_id_num >> 8) & 0xff;
914                 arr[num++] = dev_id_num & 0xff;
915                 /* Target relative port number */
916                 arr[num++] = 0x61;      /* proto=sas, binary */
917                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
918                 arr[num++] = 0x0;       /* reserved */
919                 arr[num++] = 0x4;       /* length */
920                 arr[num++] = 0x0;       /* reserved */
921                 arr[num++] = 0x0;       /* reserved */
922                 arr[num++] = 0x0;
923                 arr[num++] = 0x1;       /* relative port A */
924         }
925         /* NAA-5, Target port identifier */
926         arr[num++] = 0x61;      /* proto=sas, binary */
927         arr[num++] = 0x93;      /* piv=1, target port, naa */
928         arr[num++] = 0x0;
929         arr[num++] = 0x8;
930         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
931         arr[num++] = 0x22;
932         arr[num++] = 0x22;
933         arr[num++] = 0x20;
934         arr[num++] = (port_a >> 24);
935         arr[num++] = (port_a >> 16) & 0xff;
936         arr[num++] = (port_a >> 8) & 0xff;
937         arr[num++] = port_a & 0xff;
938         /* NAA-5, Target port group identifier */
939         arr[num++] = 0x61;      /* proto=sas, binary */
940         arr[num++] = 0x95;      /* piv=1, target port group id */
941         arr[num++] = 0x0;
942         arr[num++] = 0x4;
943         arr[num++] = 0;
944         arr[num++] = 0;
945         arr[num++] = (port_group_id >> 8) & 0xff;
946         arr[num++] = port_group_id & 0xff;
947         /* NAA-5, Target device identifier */
948         arr[num++] = 0x61;      /* proto=sas, binary */
949         arr[num++] = 0xa3;      /* piv=1, target device, naa */
950         arr[num++] = 0x0;
951         arr[num++] = 0x8;
952         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
953         arr[num++] = 0x22;
954         arr[num++] = 0x22;
955         arr[num++] = 0x20;
956         arr[num++] = (target_dev_id >> 24);
957         arr[num++] = (target_dev_id >> 16) & 0xff;
958         arr[num++] = (target_dev_id >> 8) & 0xff;
959         arr[num++] = target_dev_id & 0xff;
960         /* SCSI name string: Target device identifier */
961         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
962         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
963         arr[num++] = 0x0;
964         arr[num++] = 24;
965         memcpy(arr + num, "naa.52222220", 12);
966         num += 12;
967         snprintf(b, sizeof(b), "%08X", target_dev_id);
968         memcpy(arr + num, b, 8);
969         num += 8;
970         memset(arr + num, 0, 4);
971         num += 4;
972         return num;
973 }
974
975
976 static unsigned char vpd84_data[] = {
977 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
978     0x22,0x22,0x22,0x0,0xbb,0x1,
979     0x22,0x22,0x22,0x0,0xbb,0x2,
980 };
981
982 /*  Software interface identification VPD page */
983 static int inquiry_evpd_84(unsigned char * arr)
984 {
985         memcpy(arr, vpd84_data, sizeof(vpd84_data));
986         return sizeof(vpd84_data);
987 }
988
989 /* Management network addresses VPD page */
990 static int inquiry_evpd_85(unsigned char * arr)
991 {
992         int num = 0;
993         const char * na1 = "https://www.kernel.org/config";
994         const char * na2 = "http://www.kernel.org/log";
995         int plen, olen;
996
997         arr[num++] = 0x1;       /* lu, storage config */
998         arr[num++] = 0x0;       /* reserved */
999         arr[num++] = 0x0;
1000         olen = strlen(na1);
1001         plen = olen + 1;
1002         if (plen % 4)
1003                 plen = ((plen / 4) + 1) * 4;
1004         arr[num++] = plen;      /* length, null termianted, padded */
1005         memcpy(arr + num, na1, olen);
1006         memset(arr + num + olen, 0, plen - olen);
1007         num += plen;
1008
1009         arr[num++] = 0x4;       /* lu, logging */
1010         arr[num++] = 0x0;       /* reserved */
1011         arr[num++] = 0x0;
1012         olen = strlen(na2);
1013         plen = olen + 1;
1014         if (plen % 4)
1015                 plen = ((plen / 4) + 1) * 4;
1016         arr[num++] = plen;      /* length, null terminated, padded */
1017         memcpy(arr + num, na2, olen);
1018         memset(arr + num + olen, 0, plen - olen);
1019         num += plen;
1020
1021         return num;
1022 }
1023
1024 /* SCSI ports VPD page */
1025 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1026 {
1027         int num = 0;
1028         int port_a, port_b;
1029
1030         port_a = target_dev_id + 1;
1031         port_b = port_a + 1;
1032         arr[num++] = 0x0;       /* reserved */
1033         arr[num++] = 0x0;       /* reserved */
1034         arr[num++] = 0x0;
1035         arr[num++] = 0x1;       /* relative port 1 (primary) */
1036         memset(arr + num, 0, 6);
1037         num += 6;
1038         arr[num++] = 0x0;
1039         arr[num++] = 12;        /* length tp descriptor */
1040         /* naa-5 target port identifier (A) */
1041         arr[num++] = 0x61;      /* proto=sas, binary */
1042         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1043         arr[num++] = 0x0;       /* reserved */
1044         arr[num++] = 0x8;       /* length */
1045         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1046         arr[num++] = 0x22;
1047         arr[num++] = 0x22;
1048         arr[num++] = 0x20;
1049         arr[num++] = (port_a >> 24);
1050         arr[num++] = (port_a >> 16) & 0xff;
1051         arr[num++] = (port_a >> 8) & 0xff;
1052         arr[num++] = port_a & 0xff;
1053
1054         arr[num++] = 0x0;       /* reserved */
1055         arr[num++] = 0x0;       /* reserved */
1056         arr[num++] = 0x0;
1057         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1058         memset(arr + num, 0, 6);
1059         num += 6;
1060         arr[num++] = 0x0;
1061         arr[num++] = 12;        /* length tp descriptor */
1062         /* naa-5 target port identifier (B) */
1063         arr[num++] = 0x61;      /* proto=sas, binary */
1064         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1065         arr[num++] = 0x0;       /* reserved */
1066         arr[num++] = 0x8;       /* length */
1067         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1068         arr[num++] = 0x22;
1069         arr[num++] = 0x22;
1070         arr[num++] = 0x20;
1071         arr[num++] = (port_b >> 24);
1072         arr[num++] = (port_b >> 16) & 0xff;
1073         arr[num++] = (port_b >> 8) & 0xff;
1074         arr[num++] = port_b & 0xff;
1075
1076         return num;
1077 }
1078
1079
1080 static unsigned char vpd89_data[] = {
1081 /* from 4th byte */ 0,0,0,0,
1082 'l','i','n','u','x',' ',' ',' ',
1083 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1084 '1','2','3','4',
1085 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1086 0xec,0,0,0,
1087 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1088 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1089 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1090 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1091 0x53,0x41,
1092 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1093 0x20,0x20,
1094 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1095 0x10,0x80,
1096 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1097 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1098 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1099 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1100 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1101 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1102 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1103 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1104 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1105 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1106 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1107 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1108 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1109 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1110 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1111 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1112 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1113 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1114 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1115 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1116 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1117 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1118 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1119 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1120 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1121 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1122 };
1123
1124 /* ATA Information VPD page */
1125 static int inquiry_evpd_89(unsigned char * arr)
1126 {
1127         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1128         return sizeof(vpd89_data);
1129 }
1130
1131
1132 static unsigned char vpdb0_data[] = {
1133         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1134         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1135         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137 };
1138
1139 /* Block limits VPD page (SBC-3) */
1140 static int inquiry_evpd_b0(unsigned char * arr)
1141 {
1142         unsigned int gran;
1143
1144         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1145
1146         /* Optimal transfer length granularity */
1147         gran = 1 << scsi_debug_physblk_exp;
1148         arr[2] = (gran >> 8) & 0xff;
1149         arr[3] = gran & 0xff;
1150
1151         /* Maximum Transfer Length */
1152         if (sdebug_store_sectors > 0x400) {
1153                 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1154                 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1155                 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1156                 arr[7] = sdebug_store_sectors & 0xff;
1157         }
1158
1159         /* Optimal Transfer Length */
1160         put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1161
1162         if (scsi_debug_lbpu) {
1163                 /* Maximum Unmap LBA Count */
1164                 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1165
1166                 /* Maximum Unmap Block Descriptor Count */
1167                 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1168         }
1169
1170         /* Unmap Granularity Alignment */
1171         if (scsi_debug_unmap_alignment) {
1172                 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1173                 arr[28] |= 0x80; /* UGAVALID */
1174         }
1175
1176         /* Optimal Unmap Granularity */
1177         put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1178
1179         /* Maximum WRITE SAME Length */
1180         put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1181
1182         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1183
1184         return sizeof(vpdb0_data);
1185 }
1186
1187 /* Block device characteristics VPD page (SBC-3) */
1188 static int inquiry_evpd_b1(unsigned char *arr)
1189 {
1190         memset(arr, 0, 0x3c);
1191         arr[0] = 0;
1192         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1193         arr[2] = 0;
1194         arr[3] = 5;     /* less than 1.8" */
1195
1196         return 0x3c;
1197 }
1198
1199 /* Logical block provisioning VPD page (SBC-3) */
1200 static int inquiry_evpd_b2(unsigned char *arr)
1201 {
1202         memset(arr, 0, 0x4);
1203         arr[0] = 0;                     /* threshold exponent */
1204
1205         if (scsi_debug_lbpu)
1206                 arr[1] = 1 << 7;
1207
1208         if (scsi_debug_lbpws)
1209                 arr[1] |= 1 << 6;
1210
1211         if (scsi_debug_lbpws10)
1212                 arr[1] |= 1 << 5;
1213
1214         if (scsi_debug_lbprz)
1215                 arr[1] |= 1 << 2;
1216
1217         return 0x4;
1218 }
1219
1220 #define SDEBUG_LONG_INQ_SZ 96
1221 #define SDEBUG_MAX_INQ_ARR_SZ 584
1222
1223 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1224 {
1225         unsigned char pq_pdt;
1226         unsigned char * arr;
1227         unsigned char *cmd = scp->cmnd;
1228         int alloc_len, n, ret;
1229         bool have_wlun;
1230
1231         alloc_len = (cmd[3] << 8) + cmd[4];
1232         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1233         if (! arr)
1234                 return DID_REQUEUE << 16;
1235         have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1236         if (have_wlun)
1237                 pq_pdt = 0x1e;  /* present, wlun */
1238         else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1239                 pq_pdt = 0x7f;  /* not present, no device type */
1240         else
1241                 pq_pdt = (scsi_debug_ptype & 0x1f);
1242         arr[0] = pq_pdt;
1243         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1244                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1245                 kfree(arr);
1246                 return check_condition_result;
1247         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1248                 int lu_id_num, port_group_id, target_dev_id, len;
1249                 char lu_id_str[6];
1250                 int host_no = devip->sdbg_host->shost->host_no;
1251                 
1252                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1253                     (devip->channel & 0x7f);
1254                 if (0 == scsi_debug_vpd_use_hostno)
1255                         host_no = 0;
1256                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1257                             (devip->target * 1000) + devip->lun);
1258                 target_dev_id = ((host_no + 1) * 2000) +
1259                                  (devip->target * 1000) - 3;
1260                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1261                 if (0 == cmd[2]) { /* supported vital product data pages */
1262                         arr[1] = cmd[2];        /*sanity */
1263                         n = 4;
1264                         arr[n++] = 0x0;   /* this page */
1265                         arr[n++] = 0x80;  /* unit serial number */
1266                         arr[n++] = 0x83;  /* device identification */
1267                         arr[n++] = 0x84;  /* software interface ident. */
1268                         arr[n++] = 0x85;  /* management network addresses */
1269                         arr[n++] = 0x86;  /* extended inquiry */
1270                         arr[n++] = 0x87;  /* mode page policy */
1271                         arr[n++] = 0x88;  /* SCSI ports */
1272                         arr[n++] = 0x89;  /* ATA information */
1273                         arr[n++] = 0xb0;  /* Block limits (SBC) */
1274                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1275                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1276                                 arr[n++] = 0xb2;
1277                         arr[3] = n - 4;   /* number of supported VPD pages */
1278                 } else if (0x80 == cmd[2]) { /* unit serial number */
1279                         arr[1] = cmd[2];        /*sanity */
1280                         arr[3] = len;
1281                         memcpy(&arr[4], lu_id_str, len);
1282                 } else if (0x83 == cmd[2]) { /* device identification */
1283                         arr[1] = cmd[2];        /*sanity */
1284                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1285                                                  target_dev_id, lu_id_num,
1286                                                  lu_id_str, len);
1287                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1288                         arr[1] = cmd[2];        /*sanity */
1289                         arr[3] = inquiry_evpd_84(&arr[4]);
1290                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1291                         arr[1] = cmd[2];        /*sanity */
1292                         arr[3] = inquiry_evpd_85(&arr[4]);
1293                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1294                         arr[1] = cmd[2];        /*sanity */
1295                         arr[3] = 0x3c;  /* number of following entries */
1296                         if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1297                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1298                         else if (scsi_debug_dif)
1299                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1300                         else
1301                                 arr[4] = 0x0;   /* no protection stuff */
1302                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1303                 } else if (0x87 == cmd[2]) { /* mode page policy */
1304                         arr[1] = cmd[2];        /*sanity */
1305                         arr[3] = 0x8;   /* number of following entries */
1306                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1307                         arr[6] = 0x80;  /* mlus, shared */
1308                         arr[8] = 0x18;   /* protocol specific lu */
1309                         arr[10] = 0x82;  /* mlus, per initiator port */
1310                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1311                         arr[1] = cmd[2];        /*sanity */
1312                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1313                 } else if (0x89 == cmd[2]) { /* ATA information */
1314                         arr[1] = cmd[2];        /*sanity */
1315                         n = inquiry_evpd_89(&arr[4]);
1316                         arr[2] = (n >> 8);
1317                         arr[3] = (n & 0xff);
1318                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1319                         arr[1] = cmd[2];        /*sanity */
1320                         arr[3] = inquiry_evpd_b0(&arr[4]);
1321                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1322                         arr[1] = cmd[2];        /*sanity */
1323                         arr[3] = inquiry_evpd_b1(&arr[4]);
1324                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1325                         arr[1] = cmd[2];        /*sanity */
1326                         arr[3] = inquiry_evpd_b2(&arr[4]);
1327                 } else {
1328                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1329                         kfree(arr);
1330                         return check_condition_result;
1331                 }
1332                 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1333                 ret = fill_from_dev_buffer(scp, arr,
1334                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1335                 kfree(arr);
1336                 return ret;
1337         }
1338         /* drops through here for a standard inquiry */
1339         arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
1340         arr[2] = scsi_debug_scsi_level;
1341         arr[3] = 2;    /* response_data_format==2 */
1342         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1343         arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1344         if (0 == scsi_debug_vpd_use_hostno)
1345                 arr[5] = 0x10; /* claim: implicit TGPS */
1346         arr[6] = 0x10; /* claim: MultiP */
1347         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1348         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1349         memcpy(&arr[8], inq_vendor_id, 8);
1350         memcpy(&arr[16], inq_product_id, 16);
1351         memcpy(&arr[32], inq_product_rev, 4);
1352         /* version descriptors (2 bytes each) follow */
1353         arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1354         arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1355         n = 62;
1356         if (scsi_debug_ptype == 0) {
1357                 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1358         } else if (scsi_debug_ptype == 1) {
1359                 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1360         }
1361         arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1362         ret = fill_from_dev_buffer(scp, arr,
1363                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1364         kfree(arr);
1365         return ret;
1366 }
1367
1368 static int resp_requests(struct scsi_cmnd * scp,
1369                          struct sdebug_dev_info * devip)
1370 {
1371         unsigned char * sbuff;
1372         unsigned char *cmd = scp->cmnd;
1373         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1374         bool dsense, want_dsense;
1375         int len = 18;
1376
1377         memset(arr, 0, sizeof(arr));
1378         dsense = !!(cmd[1] & 1);
1379         want_dsense = dsense || scsi_debug_dsense;
1380         sbuff = scp->sense_buffer;
1381         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1382                 if (dsense) {
1383                         arr[0] = 0x72;
1384                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1385                         arr[2] = THRESHOLD_EXCEEDED;
1386                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1387                         len = 8;
1388                 } else {
1389                         arr[0] = 0x70;
1390                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1391                         arr[7] = 0xa;           /* 18 byte sense buffer */
1392                         arr[12] = THRESHOLD_EXCEEDED;
1393                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1394                 }
1395         } else {
1396                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1397                 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1398                         ;       /* have sense and formats match */
1399                 else if (arr[0] <= 0x70) {
1400                         if (dsense) {
1401                                 memset(arr, 0, 8);
1402                                 arr[0] = 0x72;
1403                                 len = 8;
1404                         } else {
1405                                 memset(arr, 0, 18);
1406                                 arr[0] = 0x70;
1407                                 arr[7] = 0xa;
1408                         }
1409                 } else if (dsense) {
1410                         memset(arr, 0, 8);
1411                         arr[0] = 0x72;
1412                         arr[1] = sbuff[2];     /* sense key */
1413                         arr[2] = sbuff[12];    /* asc */
1414                         arr[3] = sbuff[13];    /* ascq */
1415                         len = 8;
1416                 } else {
1417                         memset(arr, 0, 18);
1418                         arr[0] = 0x70;
1419                         arr[2] = sbuff[1];
1420                         arr[7] = 0xa;
1421                         arr[12] = sbuff[1];
1422                         arr[13] = sbuff[3];
1423                 }
1424
1425         }
1426         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1427         return fill_from_dev_buffer(scp, arr, len);
1428 }
1429
1430 static int resp_start_stop(struct scsi_cmnd * scp,
1431                            struct sdebug_dev_info * devip)
1432 {
1433         unsigned char *cmd = scp->cmnd;
1434         int power_cond, start;
1435
1436         power_cond = (cmd[4] & 0xf0) >> 4;
1437         if (power_cond) {
1438                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1439                 return check_condition_result;
1440         }
1441         start = cmd[4] & 1;
1442         if (start == devip->stopped)
1443                 devip->stopped = !start;
1444         return 0;
1445 }
1446
1447 static sector_t get_sdebug_capacity(void)
1448 {
1449         if (scsi_debug_virtual_gb > 0)
1450                 return (sector_t)scsi_debug_virtual_gb *
1451                         (1073741824 / scsi_debug_sector_size);
1452         else
1453                 return sdebug_store_sectors;
1454 }
1455
1456 #define SDEBUG_READCAP_ARR_SZ 8
1457 static int resp_readcap(struct scsi_cmnd * scp,
1458                         struct sdebug_dev_info * devip)
1459 {
1460         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1461         unsigned int capac;
1462
1463         /* following just in case virtual_gb changed */
1464         sdebug_capacity = get_sdebug_capacity();
1465         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1466         if (sdebug_capacity < 0xffffffff) {
1467                 capac = (unsigned int)sdebug_capacity - 1;
1468                 arr[0] = (capac >> 24);
1469                 arr[1] = (capac >> 16) & 0xff;
1470                 arr[2] = (capac >> 8) & 0xff;
1471                 arr[3] = capac & 0xff;
1472         } else {
1473                 arr[0] = 0xff;
1474                 arr[1] = 0xff;
1475                 arr[2] = 0xff;
1476                 arr[3] = 0xff;
1477         }
1478         arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1479         arr[7] = scsi_debug_sector_size & 0xff;
1480         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1481 }
1482
1483 #define SDEBUG_READCAP16_ARR_SZ 32
1484 static int resp_readcap16(struct scsi_cmnd * scp,
1485                           struct sdebug_dev_info * devip)
1486 {
1487         unsigned char *cmd = scp->cmnd;
1488         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1489         unsigned long long capac;
1490         int k, alloc_len;
1491
1492         alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1493                      + cmd[13]);
1494         /* following just in case virtual_gb changed */
1495         sdebug_capacity = get_sdebug_capacity();
1496         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1497         capac = sdebug_capacity - 1;
1498         for (k = 0; k < 8; ++k, capac >>= 8)
1499                 arr[7 - k] = capac & 0xff;
1500         arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1501         arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1502         arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1503         arr[11] = scsi_debug_sector_size & 0xff;
1504         arr[13] = scsi_debug_physblk_exp & 0xf;
1505         arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1506
1507         if (scsi_debug_lbp()) {
1508                 arr[14] |= 0x80; /* LBPME */
1509                 if (scsi_debug_lbprz)
1510                         arr[14] |= 0x40; /* LBPRZ */
1511         }
1512
1513         arr[15] = scsi_debug_lowest_aligned & 0xff;
1514
1515         if (scsi_debug_dif) {
1516                 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1517                 arr[12] |= 1; /* PROT_EN */
1518         }
1519
1520         return fill_from_dev_buffer(scp, arr,
1521                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1522 }
1523
1524 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1525
1526 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1527                               struct sdebug_dev_info * devip)
1528 {
1529         unsigned char *cmd = scp->cmnd;
1530         unsigned char * arr;
1531         int host_no = devip->sdbg_host->shost->host_no;
1532         int n, ret, alen, rlen;
1533         int port_group_a, port_group_b, port_a, port_b;
1534
1535         alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1536                 + cmd[9]);
1537
1538         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1539         if (! arr)
1540                 return DID_REQUEUE << 16;
1541         /*
1542          * EVPD page 0x88 states we have two ports, one
1543          * real and a fake port with no device connected.
1544          * So we create two port groups with one port each
1545          * and set the group with port B to unavailable.
1546          */
1547         port_a = 0x1; /* relative port A */
1548         port_b = 0x2; /* relative port B */
1549         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1550             (devip->channel & 0x7f);
1551         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1552             (devip->channel & 0x7f) + 0x80;
1553
1554         /*
1555          * The asymmetric access state is cycled according to the host_id.
1556          */
1557         n = 4;
1558         if (0 == scsi_debug_vpd_use_hostno) {
1559             arr[n++] = host_no % 3; /* Asymm access state */
1560             arr[n++] = 0x0F; /* claim: all states are supported */
1561         } else {
1562             arr[n++] = 0x0; /* Active/Optimized path */
1563             arr[n++] = 0x01; /* claim: only support active/optimized paths */
1564         }
1565         arr[n++] = (port_group_a >> 8) & 0xff;
1566         arr[n++] = port_group_a & 0xff;
1567         arr[n++] = 0;    /* Reserved */
1568         arr[n++] = 0;    /* Status code */
1569         arr[n++] = 0;    /* Vendor unique */
1570         arr[n++] = 0x1;  /* One port per group */
1571         arr[n++] = 0;    /* Reserved */
1572         arr[n++] = 0;    /* Reserved */
1573         arr[n++] = (port_a >> 8) & 0xff;
1574         arr[n++] = port_a & 0xff;
1575         arr[n++] = 3;    /* Port unavailable */
1576         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1577         arr[n++] = (port_group_b >> 8) & 0xff;
1578         arr[n++] = port_group_b & 0xff;
1579         arr[n++] = 0;    /* Reserved */
1580         arr[n++] = 0;    /* Status code */
1581         arr[n++] = 0;    /* Vendor unique */
1582         arr[n++] = 0x1;  /* One port per group */
1583         arr[n++] = 0;    /* Reserved */
1584         arr[n++] = 0;    /* Reserved */
1585         arr[n++] = (port_b >> 8) & 0xff;
1586         arr[n++] = port_b & 0xff;
1587
1588         rlen = n - 4;
1589         arr[0] = (rlen >> 24) & 0xff;
1590         arr[1] = (rlen >> 16) & 0xff;
1591         arr[2] = (rlen >> 8) & 0xff;
1592         arr[3] = rlen & 0xff;
1593
1594         /*
1595          * Return the smallest value of either
1596          * - The allocated length
1597          * - The constructed command length
1598          * - The maximum array size
1599          */
1600         rlen = min(alen,n);
1601         ret = fill_from_dev_buffer(scp, arr,
1602                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1603         kfree(arr);
1604         return ret;
1605 }
1606
1607 static int
1608 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1609 {
1610         bool rctd;
1611         u8 reporting_opts, req_opcode, sdeb_i, supp;
1612         u16 req_sa, u;
1613         u32 alloc_len, a_len;
1614         int k, offset, len, errsts, count, bump, na;
1615         const struct opcode_info_t *oip;
1616         const struct opcode_info_t *r_oip;
1617         u8 *arr;
1618         u8 *cmd = scp->cmnd;
1619
1620         rctd = !!(cmd[2] & 0x80);
1621         reporting_opts = cmd[2] & 0x7;
1622         req_opcode = cmd[3];
1623         req_sa = get_unaligned_be16(cmd + 4);
1624         alloc_len = get_unaligned_be32(cmd + 6);
1625         if (alloc_len < 4 && alloc_len > 0xffff) {
1626                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1627                 return check_condition_result;
1628         }
1629         if (alloc_len > 8192)
1630                 a_len = 8192;
1631         else
1632                 a_len = alloc_len;
1633         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
1634         if (NULL == arr) {
1635                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1636                                 INSUFF_RES_ASCQ);
1637                 return check_condition_result;
1638         }
1639         switch (reporting_opts) {
1640         case 0: /* all commands */
1641                 /* count number of commands */
1642                 for (count = 0, oip = opcode_info_arr;
1643                      oip->num_attached != 0xff; ++oip) {
1644                         if (F_INV_OP & oip->flags)
1645                                 continue;
1646                         count += (oip->num_attached + 1);
1647                 }
1648                 bump = rctd ? 20 : 8;
1649                 put_unaligned_be32(count * bump, arr);
1650                 for (offset = 4, oip = opcode_info_arr;
1651                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1652                         if (F_INV_OP & oip->flags)
1653                                 continue;
1654                         na = oip->num_attached;
1655                         arr[offset] = oip->opcode;
1656                         put_unaligned_be16(oip->sa, arr + offset + 2);
1657                         if (rctd)
1658                                 arr[offset + 5] |= 0x2;
1659                         if (FF_SA & oip->flags)
1660                                 arr[offset + 5] |= 0x1;
1661                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1662                         if (rctd)
1663                                 put_unaligned_be16(0xa, arr + offset + 8);
1664                         r_oip = oip;
1665                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1666                                 if (F_INV_OP & oip->flags)
1667                                         continue;
1668                                 offset += bump;
1669                                 arr[offset] = oip->opcode;
1670                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1671                                 if (rctd)
1672                                         arr[offset + 5] |= 0x2;
1673                                 if (FF_SA & oip->flags)
1674                                         arr[offset + 5] |= 0x1;
1675                                 put_unaligned_be16(oip->len_mask[0],
1676                                                    arr + offset + 6);
1677                                 if (rctd)
1678                                         put_unaligned_be16(0xa,
1679                                                            arr + offset + 8);
1680                         }
1681                         oip = r_oip;
1682                         offset += bump;
1683                 }
1684                 break;
1685         case 1: /* one command: opcode only */
1686         case 2: /* one command: opcode plus service action */
1687         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1688                 sdeb_i = opcode_ind_arr[req_opcode];
1689                 oip = &opcode_info_arr[sdeb_i];
1690                 if (F_INV_OP & oip->flags) {
1691                         supp = 1;
1692                         offset = 4;
1693                 } else {
1694                         if (1 == reporting_opts) {
1695                                 if (FF_SA & oip->flags) {
1696                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1697                                                              2, 2);
1698                                         kfree(arr);
1699                                         return check_condition_result;
1700                                 }
1701                                 req_sa = 0;
1702                         } else if (2 == reporting_opts &&
1703                                    0 == (FF_SA & oip->flags)) {
1704                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1705                                 kfree(arr);     /* point at requested sa */
1706                                 return check_condition_result;
1707                         }
1708                         if (0 == (FF_SA & oip->flags) &&
1709                             req_opcode == oip->opcode)
1710                                 supp = 3;
1711                         else if (0 == (FF_SA & oip->flags)) {
1712                                 na = oip->num_attached;
1713                                 for (k = 0, oip = oip->arrp; k < na;
1714                                      ++k, ++oip) {
1715                                         if (req_opcode == oip->opcode)
1716                                                 break;
1717                                 }
1718                                 supp = (k >= na) ? 1 : 3;
1719                         } else if (req_sa != oip->sa) {
1720                                 na = oip->num_attached;
1721                                 for (k = 0, oip = oip->arrp; k < na;
1722                                      ++k, ++oip) {
1723                                         if (req_sa == oip->sa)
1724                                                 break;
1725                                 }
1726                                 supp = (k >= na) ? 1 : 3;
1727                         } else
1728                                 supp = 3;
1729                         if (3 == supp) {
1730                                 u = oip->len_mask[0];
1731                                 put_unaligned_be16(u, arr + 2);
1732                                 arr[4] = oip->opcode;
1733                                 for (k = 1; k < u; ++k)
1734                                         arr[4 + k] = (k < 16) ?
1735                                                  oip->len_mask[k] : 0xff;
1736                                 offset = 4 + u;
1737                         } else
1738                                 offset = 4;
1739                 }
1740                 arr[1] = (rctd ? 0x80 : 0) | supp;
1741                 if (rctd) {
1742                         put_unaligned_be16(0xa, arr + offset);
1743                         offset += 12;
1744                 }
1745                 break;
1746         default:
1747                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1748                 kfree(arr);
1749                 return check_condition_result;
1750         }
1751         offset = (offset < a_len) ? offset : a_len;
1752         len = (offset < alloc_len) ? offset : alloc_len;
1753         errsts = fill_from_dev_buffer(scp, arr, len);
1754         kfree(arr);
1755         return errsts;
1756 }
1757
1758 static int
1759 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1760 {
1761         bool repd;
1762         u32 alloc_len, len;
1763         u8 arr[16];
1764         u8 *cmd = scp->cmnd;
1765
1766         memset(arr, 0, sizeof(arr));
1767         repd = !!(cmd[2] & 0x80);
1768         alloc_len = get_unaligned_be32(cmd + 6);
1769         if (alloc_len < 4) {
1770                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1771                 return check_condition_result;
1772         }
1773         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1774         arr[1] = 0x1;           /* ITNRS */
1775         if (repd) {
1776                 arr[3] = 0xc;
1777                 len = 16;
1778         } else
1779                 len = 4;
1780
1781         len = (len < alloc_len) ? len : alloc_len;
1782         return fill_from_dev_buffer(scp, arr, len);
1783 }
1784
1785 /* <<Following mode page info copied from ST318451LW>> */
1786
1787 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1788 {       /* Read-Write Error Recovery page for mode_sense */
1789         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1790                                         5, 0, 0xff, 0xff};
1791
1792         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1793         if (1 == pcontrol)
1794                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1795         return sizeof(err_recov_pg);
1796 }
1797
1798 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1799 {       /* Disconnect-Reconnect page for mode_sense */
1800         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1801                                          0, 0, 0, 0, 0, 0, 0, 0};
1802
1803         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1804         if (1 == pcontrol)
1805                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1806         return sizeof(disconnect_pg);
1807 }
1808
1809 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1810 {       /* Format device page for mode_sense */
1811         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1812                                      0, 0, 0, 0, 0, 0, 0, 0,
1813                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1814
1815         memcpy(p, format_pg, sizeof(format_pg));
1816         p[10] = (sdebug_sectors_per >> 8) & 0xff;
1817         p[11] = sdebug_sectors_per & 0xff;
1818         p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1819         p[13] = scsi_debug_sector_size & 0xff;
1820         if (scsi_debug_removable)
1821                 p[20] |= 0x20; /* should agree with INQUIRY */
1822         if (1 == pcontrol)
1823                 memset(p + 2, 0, sizeof(format_pg) - 2);
1824         return sizeof(format_pg);
1825 }
1826
1827 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1828 {       /* Caching page for mode_sense */
1829         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1830                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1831         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1832                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1833
1834         if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1835                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1836         memcpy(p, caching_pg, sizeof(caching_pg));
1837         if (1 == pcontrol)
1838                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1839         else if (2 == pcontrol)
1840                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1841         return sizeof(caching_pg);
1842 }
1843
1844 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1845 {       /* Control mode page for mode_sense */
1846         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1847                                         0, 0, 0, 0};
1848         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1849                                      0, 0, 0x2, 0x4b};
1850
1851         if (scsi_debug_dsense)
1852                 ctrl_m_pg[2] |= 0x4;
1853         else
1854                 ctrl_m_pg[2] &= ~0x4;
1855
1856         if (scsi_debug_ato)
1857                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1858
1859         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1860         if (1 == pcontrol)
1861                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1862         else if (2 == pcontrol)
1863                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1864         return sizeof(ctrl_m_pg);
1865 }
1866
1867
1868 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1869 {       /* Informational Exceptions control mode page for mode_sense */
1870         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1871                                        0, 0, 0x0, 0x0};
1872         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1873                                       0, 0, 0x0, 0x0};
1874
1875         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1876         if (1 == pcontrol)
1877                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1878         else if (2 == pcontrol)
1879                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1880         return sizeof(iec_m_pg);
1881 }
1882
1883 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1884 {       /* SAS SSP mode page - short format for mode_sense */
1885         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1886                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1887
1888         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1889         if (1 == pcontrol)
1890                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1891         return sizeof(sas_sf_m_pg);
1892 }
1893
1894
1895 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1896                               int target_dev_id)
1897 {       /* SAS phy control and discover mode page for mode_sense */
1898         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1899                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1900                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1901                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1902                     0x2, 0, 0, 0, 0, 0, 0, 0,
1903                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1904                     0, 0, 0, 0, 0, 0, 0, 0,
1905                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1906                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1907                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1908                     0x3, 0, 0, 0, 0, 0, 0, 0,
1909                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1910                     0, 0, 0, 0, 0, 0, 0, 0,
1911                 };
1912         int port_a, port_b;
1913
1914         port_a = target_dev_id + 1;
1915         port_b = port_a + 1;
1916         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1917         p[20] = (port_a >> 24);
1918         p[21] = (port_a >> 16) & 0xff;
1919         p[22] = (port_a >> 8) & 0xff;
1920         p[23] = port_a & 0xff;
1921         p[48 + 20] = (port_b >> 24);
1922         p[48 + 21] = (port_b >> 16) & 0xff;
1923         p[48 + 22] = (port_b >> 8) & 0xff;
1924         p[48 + 23] = port_b & 0xff;
1925         if (1 == pcontrol)
1926                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1927         return sizeof(sas_pcd_m_pg);
1928 }
1929
1930 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1931 {       /* SAS SSP shared protocol specific port mode subpage */
1932         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1933                     0, 0, 0, 0, 0, 0, 0, 0,
1934                 };
1935
1936         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1937         if (1 == pcontrol)
1938                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1939         return sizeof(sas_sha_m_pg);
1940 }
1941
1942 #define SDEBUG_MAX_MSENSE_SZ 256
1943
1944 static int
1945 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1946 {
1947         unsigned char dbd, llbaa;
1948         int pcontrol, pcode, subpcode, bd_len;
1949         unsigned char dev_spec;
1950         int k, alloc_len, msense_6, offset, len, target_dev_id;
1951         int target = scp->device->id;
1952         unsigned char * ap;
1953         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1954         unsigned char *cmd = scp->cmnd;
1955
1956         dbd = !!(cmd[1] & 0x8);
1957         pcontrol = (cmd[2] & 0xc0) >> 6;
1958         pcode = cmd[2] & 0x3f;
1959         subpcode = cmd[3];
1960         msense_6 = (MODE_SENSE == cmd[0]);
1961         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1962         if ((0 == scsi_debug_ptype) && (0 == dbd))
1963                 bd_len = llbaa ? 16 : 8;
1964         else
1965                 bd_len = 0;
1966         alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1967         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1968         if (0x3 == pcontrol) {  /* Saving values not supported */
1969                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1970                 return check_condition_result;
1971         }
1972         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1973                         (devip->target * 1000) - 3;
1974         /* set DPOFUA bit for disks */
1975         if (0 == scsi_debug_ptype)
1976                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1977         else
1978                 dev_spec = 0x0;
1979         if (msense_6) {
1980                 arr[2] = dev_spec;
1981                 arr[3] = bd_len;
1982                 offset = 4;
1983         } else {
1984                 arr[3] = dev_spec;
1985                 if (16 == bd_len)
1986                         arr[4] = 0x1;   /* set LONGLBA bit */
1987                 arr[7] = bd_len;        /* assume 255 or less */
1988                 offset = 8;
1989         }
1990         ap = arr + offset;
1991         if ((bd_len > 0) && (!sdebug_capacity))
1992                 sdebug_capacity = get_sdebug_capacity();
1993
1994         if (8 == bd_len) {
1995                 if (sdebug_capacity > 0xfffffffe) {
1996                         ap[0] = 0xff;
1997                         ap[1] = 0xff;
1998                         ap[2] = 0xff;
1999                         ap[3] = 0xff;
2000                 } else {
2001                         ap[0] = (sdebug_capacity >> 24) & 0xff;
2002                         ap[1] = (sdebug_capacity >> 16) & 0xff;
2003                         ap[2] = (sdebug_capacity >> 8) & 0xff;
2004                         ap[3] = sdebug_capacity & 0xff;
2005                 }
2006                 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2007                 ap[7] = scsi_debug_sector_size & 0xff;
2008                 offset += bd_len;
2009                 ap = arr + offset;
2010         } else if (16 == bd_len) {
2011                 unsigned long long capac = sdebug_capacity;
2012
2013                 for (k = 0; k < 8; ++k, capac >>= 8)
2014                         ap[7 - k] = capac & 0xff;
2015                 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2016                 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2017                 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2018                 ap[15] = scsi_debug_sector_size & 0xff;
2019                 offset += bd_len;
2020                 ap = arr + offset;
2021         }
2022
2023         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2024                 /* TODO: Control Extension page */
2025                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2026                 return check_condition_result;
2027         }
2028         switch (pcode) {
2029         case 0x1:       /* Read-Write error recovery page, direct access */
2030                 len = resp_err_recov_pg(ap, pcontrol, target);
2031                 offset += len;
2032                 break;
2033         case 0x2:       /* Disconnect-Reconnect page, all devices */
2034                 len = resp_disconnect_pg(ap, pcontrol, target);
2035                 offset += len;
2036                 break;
2037         case 0x3:       /* Format device page, direct access */
2038                 len = resp_format_pg(ap, pcontrol, target);
2039                 offset += len;
2040                 break;
2041         case 0x8:       /* Caching page, direct access */
2042                 len = resp_caching_pg(ap, pcontrol, target);
2043                 offset += len;
2044                 break;
2045         case 0xa:       /* Control Mode page, all devices */
2046                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2047                 offset += len;
2048                 break;
2049         case 0x19:      /* if spc==1 then sas phy, control+discover */
2050                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2051                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2052                         return check_condition_result;
2053                 }
2054                 len = 0;
2055                 if ((0x0 == subpcode) || (0xff == subpcode))
2056                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2057                 if ((0x1 == subpcode) || (0xff == subpcode))
2058                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2059                                                   target_dev_id);
2060                 if ((0x2 == subpcode) || (0xff == subpcode))
2061                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2062                 offset += len;
2063                 break;
2064         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2065                 len = resp_iec_m_pg(ap, pcontrol, target);
2066                 offset += len;
2067                 break;
2068         case 0x3f:      /* Read all Mode pages */
2069                 if ((0 == subpcode) || (0xff == subpcode)) {
2070                         len = resp_err_recov_pg(ap, pcontrol, target);
2071                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2072                         len += resp_format_pg(ap + len, pcontrol, target);
2073                         len += resp_caching_pg(ap + len, pcontrol, target);
2074                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2075                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2076                         if (0xff == subpcode) {
2077                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2078                                                   target, target_dev_id);
2079                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2080                         }
2081                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2082                 } else {
2083                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2084                         return check_condition_result;
2085                 }
2086                 offset += len;
2087                 break;
2088         default:
2089                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2090                 return check_condition_result;
2091         }
2092         if (msense_6)
2093                 arr[0] = offset - 1;
2094         else {
2095                 arr[0] = ((offset - 2) >> 8) & 0xff;
2096                 arr[1] = (offset - 2) & 0xff;
2097         }
2098         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2099 }
2100
2101 #define SDEBUG_MAX_MSELECT_SZ 512
2102
2103 static int
2104 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2105 {
2106         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2107         int param_len, res, mpage;
2108         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2109         unsigned char *cmd = scp->cmnd;
2110         int mselect6 = (MODE_SELECT == cmd[0]);
2111
2112         memset(arr, 0, sizeof(arr));
2113         pf = cmd[1] & 0x10;
2114         sp = cmd[1] & 0x1;
2115         param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2116         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2117                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2118                 return check_condition_result;
2119         }
2120         res = fetch_to_dev_buffer(scp, arr, param_len);
2121         if (-1 == res)
2122                 return (DID_ERROR << 16);
2123         else if ((res < param_len) &&
2124                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2125                 sdev_printk(KERN_INFO, scp->device,
2126                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2127                             __func__, param_len, res);
2128         md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2129         bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2130         if (md_len > 2) {
2131                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2132                 return check_condition_result;
2133         }
2134         off = bd_len + (mselect6 ? 4 : 8);
2135         mpage = arr[off] & 0x3f;
2136         ps = !!(arr[off] & 0x80);
2137         if (ps) {
2138                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2139                 return check_condition_result;
2140         }
2141         spf = !!(arr[off] & 0x40);
2142         pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2143                        (arr[off + 1] + 2);
2144         if ((pg_len + off) > param_len) {
2145                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2146                                 PARAMETER_LIST_LENGTH_ERR, 0);
2147                 return check_condition_result;
2148         }
2149         switch (mpage) {
2150         case 0x8:      /* Caching Mode page */
2151                 if (caching_pg[1] == arr[off + 1]) {
2152                         memcpy(caching_pg + 2, arr + off + 2,
2153                                sizeof(caching_pg) - 2);
2154                         goto set_mode_changed_ua;
2155                 }
2156                 break;
2157         case 0xa:      /* Control Mode page */
2158                 if (ctrl_m_pg[1] == arr[off + 1]) {
2159                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2160                                sizeof(ctrl_m_pg) - 2);
2161                         scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2162                         goto set_mode_changed_ua;
2163                 }
2164                 break;
2165         case 0x1c:      /* Informational Exceptions Mode page */
2166                 if (iec_m_pg[1] == arr[off + 1]) {
2167                         memcpy(iec_m_pg + 2, arr + off + 2,
2168                                sizeof(iec_m_pg) - 2);
2169                         goto set_mode_changed_ua;
2170                 }
2171                 break;
2172         default:
2173                 break;
2174         }
2175         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2176         return check_condition_result;
2177 set_mode_changed_ua:
2178         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2179         return 0;
2180 }
2181
2182 static int resp_temp_l_pg(unsigned char * arr)
2183 {
2184         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2185                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2186                 };
2187
2188         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2189         return sizeof(temp_l_pg);
2190 }
2191
2192 static int resp_ie_l_pg(unsigned char * arr)
2193 {
2194         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2195                 };
2196
2197         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2198         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2199                 arr[4] = THRESHOLD_EXCEEDED;
2200                 arr[5] = 0xff;
2201         }
2202         return sizeof(ie_l_pg);
2203 }
2204
2205 #define SDEBUG_MAX_LSENSE_SZ 512
2206
2207 static int resp_log_sense(struct scsi_cmnd * scp,
2208                           struct sdebug_dev_info * devip)
2209 {
2210         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2211         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2212         unsigned char *cmd = scp->cmnd;
2213
2214         memset(arr, 0, sizeof(arr));
2215         ppc = cmd[1] & 0x2;
2216         sp = cmd[1] & 0x1;
2217         if (ppc || sp) {
2218                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2219                 return check_condition_result;
2220         }
2221         pcontrol = (cmd[2] & 0xc0) >> 6;
2222         pcode = cmd[2] & 0x3f;
2223         subpcode = cmd[3] & 0xff;
2224         alloc_len = (cmd[7] << 8) + cmd[8];
2225         arr[0] = pcode;
2226         if (0 == subpcode) {
2227                 switch (pcode) {
2228                 case 0x0:       /* Supported log pages log page */
2229                         n = 4;
2230                         arr[n++] = 0x0;         /* this page */
2231                         arr[n++] = 0xd;         /* Temperature */
2232                         arr[n++] = 0x2f;        /* Informational exceptions */
2233                         arr[3] = n - 4;
2234                         break;
2235                 case 0xd:       /* Temperature log page */
2236                         arr[3] = resp_temp_l_pg(arr + 4);
2237                         break;
2238                 case 0x2f:      /* Informational exceptions log page */
2239                         arr[3] = resp_ie_l_pg(arr + 4);
2240                         break;
2241                 default:
2242                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2243                         return check_condition_result;
2244                 }
2245         } else if (0xff == subpcode) {
2246                 arr[0] |= 0x40;
2247                 arr[1] = subpcode;
2248                 switch (pcode) {
2249                 case 0x0:       /* Supported log pages and subpages log page */
2250                         n = 4;
2251                         arr[n++] = 0x0;
2252                         arr[n++] = 0x0;         /* 0,0 page */
2253                         arr[n++] = 0x0;
2254                         arr[n++] = 0xff;        /* this page */
2255                         arr[n++] = 0xd;
2256                         arr[n++] = 0x0;         /* Temperature */
2257                         arr[n++] = 0x2f;
2258                         arr[n++] = 0x0; /* Informational exceptions */
2259                         arr[3] = n - 4;
2260                         break;
2261                 case 0xd:       /* Temperature subpages */
2262                         n = 4;
2263                         arr[n++] = 0xd;
2264                         arr[n++] = 0x0;         /* Temperature */
2265                         arr[3] = n - 4;
2266                         break;
2267                 case 0x2f:      /* Informational exceptions subpages */
2268                         n = 4;
2269                         arr[n++] = 0x2f;
2270                         arr[n++] = 0x0;         /* Informational exceptions */
2271                         arr[3] = n - 4;
2272                         break;
2273                 default:
2274                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2275                         return check_condition_result;
2276                 }
2277         } else {
2278                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2279                 return check_condition_result;
2280         }
2281         len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2282         return fill_from_dev_buffer(scp, arr,
2283                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2284 }
2285
2286 static int check_device_access_params(struct scsi_cmnd *scp,
2287                                       unsigned long long lba, unsigned int num)
2288 {
2289         if (lba + num > sdebug_capacity) {
2290                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2291                 return check_condition_result;
2292         }
2293         /* transfer length excessive (tie in to block limits VPD page) */
2294         if (num > sdebug_store_sectors) {
2295                 /* needs work to find which cdb byte 'num' comes from */
2296                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2297                 return check_condition_result;
2298         }
2299         return 0;
2300 }
2301
2302 /* Returns number of bytes copied or -1 if error. */
2303 static int
2304 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2305 {
2306         int ret;
2307         u64 block, rest = 0;
2308         struct scsi_data_buffer *sdb;
2309         enum dma_data_direction dir;
2310         size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
2311                        off_t);
2312
2313         if (do_write) {
2314                 sdb = scsi_out(scmd);
2315                 dir = DMA_TO_DEVICE;
2316                 func = sg_pcopy_to_buffer;
2317         } else {
2318                 sdb = scsi_in(scmd);
2319                 dir = DMA_FROM_DEVICE;
2320                 func = sg_pcopy_from_buffer;
2321         }
2322
2323         if (!sdb->length)
2324                 return 0;
2325         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2326                 return -1;
2327
2328         block = do_div(lba, sdebug_store_sectors);
2329         if (block + num > sdebug_store_sectors)
2330                 rest = block + num - sdebug_store_sectors;
2331
2332         ret = func(sdb->table.sgl, sdb->table.nents,
2333                    fake_storep + (block * scsi_debug_sector_size),
2334                    (num - rest) * scsi_debug_sector_size, 0);
2335         if (ret != (num - rest) * scsi_debug_sector_size)
2336                 return ret;
2337
2338         if (rest) {
2339                 ret += func(sdb->table.sgl, sdb->table.nents,
2340                             fake_storep, rest * scsi_debug_sector_size,
2341                             (num - rest) * scsi_debug_sector_size);
2342         }
2343
2344         return ret;
2345 }
2346
2347 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2348  * arr into fake_store(lba,num) and return true. If comparison fails then
2349  * return false. */
2350 static bool
2351 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2352 {
2353         bool res;
2354         u64 block, rest = 0;
2355         u32 store_blks = sdebug_store_sectors;
2356         u32 lb_size = scsi_debug_sector_size;
2357
2358         block = do_div(lba, store_blks);
2359         if (block + num > store_blks)
2360                 rest = block + num - store_blks;
2361
2362         res = !memcmp(fake_storep + (block * lb_size), arr,
2363                       (num - rest) * lb_size);
2364         if (!res)
2365                 return res;
2366         if (rest)
2367                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2368                              rest * lb_size);
2369         if (!res)
2370                 return res;
2371         arr += num * lb_size;
2372         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2373         if (rest)
2374                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2375                        rest * lb_size);
2376         return res;
2377 }
2378
2379 static __be16 dif_compute_csum(const void *buf, int len)
2380 {
2381         __be16 csum;
2382
2383         if (scsi_debug_guard)
2384                 csum = (__force __be16)ip_compute_csum(buf, len);
2385         else
2386                 csum = cpu_to_be16(crc_t10dif(buf, len));
2387
2388         return csum;
2389 }
2390
2391 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2392                       sector_t sector, u32 ei_lba)
2393 {
2394         __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2395
2396         if (sdt->guard_tag != csum) {
2397                 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2398                         __func__,
2399                         (unsigned long)sector,
2400                         be16_to_cpu(sdt->guard_tag),
2401                         be16_to_cpu(csum));
2402                 return 0x01;
2403         }
2404         if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2405             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2406                 pr_err("%s: REF check failed on sector %lu\n",
2407                         __func__, (unsigned long)sector);
2408                 return 0x03;
2409         }
2410         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2411             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2412                 pr_err("%s: REF check failed on sector %lu\n",
2413                         __func__, (unsigned long)sector);
2414                 return 0x03;
2415         }
2416         return 0;
2417 }
2418
2419 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2420                           unsigned int sectors, bool read)
2421 {
2422         size_t resid;
2423         void *paddr;
2424         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2425         struct sg_mapping_iter miter;
2426
2427         /* Bytes of protection data to copy into sgl */
2428         resid = sectors * sizeof(*dif_storep);
2429
2430         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2431                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2432                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2433
2434         while (sg_miter_next(&miter) && resid > 0) {
2435                 size_t len = min(miter.length, resid);
2436                 void *start = dif_store(sector);
2437                 size_t rest = 0;
2438
2439                 if (dif_store_end < start + len)
2440                         rest = start + len - dif_store_end;
2441
2442                 paddr = miter.addr;
2443
2444                 if (read)
2445                         memcpy(paddr, start, len - rest);
2446                 else
2447                         memcpy(start, paddr, len - rest);
2448
2449                 if (rest) {
2450                         if (read)
2451                                 memcpy(paddr + len - rest, dif_storep, rest);
2452                         else
2453                                 memcpy(dif_storep, paddr + len - rest, rest);
2454                 }
2455
2456                 sector += len / sizeof(*dif_storep);
2457                 resid -= len;
2458         }
2459         sg_miter_stop(&miter);
2460 }
2461
2462 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2463                             unsigned int sectors, u32 ei_lba)
2464 {
2465         unsigned int i;
2466         struct sd_dif_tuple *sdt;
2467         sector_t sector;
2468
2469         for (i = 0; i < sectors; i++, ei_lba++) {
2470                 int ret;
2471
2472                 sector = start_sec + i;
2473                 sdt = dif_store(sector);
2474
2475                 if (sdt->app_tag == cpu_to_be16(0xffff))
2476                         continue;
2477
2478                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2479                 if (ret) {
2480                         dif_errors++;
2481                         return ret;
2482                 }
2483         }
2484
2485         dif_copy_prot(SCpnt, start_sec, sectors, true);
2486         dix_reads++;
2487
2488         return 0;
2489 }
2490
2491 static int
2492 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2493 {
2494         u8 *cmd = scp->cmnd;
2495         u64 lba;
2496         u32 num;
2497         u32 ei_lba;
2498         unsigned long iflags;
2499         int ret;
2500         bool check_prot;
2501
2502         switch (cmd[0]) {
2503         case READ_16:
2504                 ei_lba = 0;
2505                 lba = get_unaligned_be64(cmd + 2);
2506                 num = get_unaligned_be32(cmd + 10);
2507                 check_prot = true;
2508                 break;
2509         case READ_10:
2510                 ei_lba = 0;
2511                 lba = get_unaligned_be32(cmd + 2);
2512                 num = get_unaligned_be16(cmd + 7);
2513                 check_prot = true;
2514                 break;
2515         case READ_6:
2516                 ei_lba = 0;
2517                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2518                       (u32)(cmd[1] & 0x1f) << 16;
2519                 num = (0 == cmd[4]) ? 256 : cmd[4];
2520                 check_prot = true;
2521                 break;
2522         case READ_12:
2523                 ei_lba = 0;
2524                 lba = get_unaligned_be32(cmd + 2);
2525                 num = get_unaligned_be32(cmd + 6);
2526                 check_prot = true;
2527                 break;
2528         case XDWRITEREAD_10:
2529                 ei_lba = 0;
2530                 lba = get_unaligned_be32(cmd + 2);
2531                 num = get_unaligned_be16(cmd + 7);
2532                 check_prot = false;
2533                 break;
2534         default:        /* assume READ(32) */
2535                 lba = get_unaligned_be64(cmd + 12);
2536                 ei_lba = get_unaligned_be32(cmd + 20);
2537                 num = get_unaligned_be32(cmd + 28);
2538                 check_prot = false;
2539                 break;
2540         }
2541         if (check_prot) {
2542                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2543                     (cmd[1] & 0xe0)) {
2544                         mk_sense_invalid_opcode(scp);
2545                         return check_condition_result;
2546                 }
2547                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2548                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2549                     (cmd[1] & 0xe0) == 0)
2550                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2551                                     "to DIF device\n");
2552         }
2553         if (sdebug_any_injecting_opt) {
2554                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2555
2556                 if (ep->inj_short)
2557                         num /= 2;
2558         }
2559
2560         /* inline check_device_access_params() */
2561         if (lba + num > sdebug_capacity) {
2562                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2563                 return check_condition_result;
2564         }
2565         /* transfer length excessive (tie in to block limits VPD page) */
2566         if (num > sdebug_store_sectors) {
2567                 /* needs work to find which cdb byte 'num' comes from */
2568                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2569                 return check_condition_result;
2570         }
2571
2572         if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2573             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2574             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2575                 /* claim unrecoverable read error */
2576                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2577                 /* set info field and valid bit for fixed descriptor */
2578                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2579                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2580                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2581                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2582                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2583                 }
2584                 scsi_set_resid(scp, scsi_bufflen(scp));
2585                 return check_condition_result;
2586         }
2587
2588         read_lock_irqsave(&atomic_rw, iflags);
2589
2590         /* DIX + T10 DIF */
2591         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2592                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2593
2594                 if (prot_ret) {
2595                         read_unlock_irqrestore(&atomic_rw, iflags);
2596                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2597                         return illegal_condition_result;
2598                 }
2599         }
2600
2601         ret = do_device_access(scp, lba, num, false);
2602         read_unlock_irqrestore(&atomic_rw, iflags);
2603         if (ret == -1)
2604                 return DID_ERROR << 16;
2605
2606         scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2607
2608         if (sdebug_any_injecting_opt) {
2609                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2610
2611                 if (ep->inj_recovered) {
2612                         mk_sense_buffer(scp, RECOVERED_ERROR,
2613                                         THRESHOLD_EXCEEDED, 0);
2614                         return check_condition_result;
2615                 } else if (ep->inj_transport) {
2616                         mk_sense_buffer(scp, ABORTED_COMMAND,
2617                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2618                         return check_condition_result;
2619                 } else if (ep->inj_dif) {
2620                         /* Logical block guard check failed */
2621                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2622                         return illegal_condition_result;
2623                 } else if (ep->inj_dix) {
2624                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2625                         return illegal_condition_result;
2626                 }
2627         }
2628         return 0;
2629 }
2630
2631 void dump_sector(unsigned char *buf, int len)
2632 {
2633         int i, j, n;
2634
2635         pr_err(">>> Sector Dump <<<\n");
2636         for (i = 0 ; i < len ; i += 16) {
2637                 char b[128];
2638
2639                 for (j = 0, n = 0; j < 16; j++) {
2640                         unsigned char c = buf[i+j];
2641
2642                         if (c >= 0x20 && c < 0x7e)
2643                                 n += scnprintf(b + n, sizeof(b) - n,
2644                                                " %c ", buf[i+j]);
2645                         else
2646                                 n += scnprintf(b + n, sizeof(b) - n,
2647                                                "%02x ", buf[i+j]);
2648                 }
2649                 pr_err("%04d: %s\n", i, b);
2650         }
2651 }
2652
2653 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2654                              unsigned int sectors, u32 ei_lba)
2655 {
2656         int ret;
2657         struct sd_dif_tuple *sdt;
2658         void *daddr;
2659         sector_t sector = start_sec;
2660         int ppage_offset;
2661         int dpage_offset;
2662         struct sg_mapping_iter diter;
2663         struct sg_mapping_iter piter;
2664
2665         BUG_ON(scsi_sg_count(SCpnt) == 0);
2666         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2667
2668         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2669                         scsi_prot_sg_count(SCpnt),
2670                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2671         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2672                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2673
2674         /* For each protection page */
2675         while (sg_miter_next(&piter)) {
2676                 dpage_offset = 0;
2677                 if (WARN_ON(!sg_miter_next(&diter))) {
2678                         ret = 0x01;
2679                         goto out;
2680                 }
2681
2682                 for (ppage_offset = 0; ppage_offset < piter.length;
2683                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2684                         /* If we're at the end of the current
2685                          * data page advance to the next one
2686                          */
2687                         if (dpage_offset >= diter.length) {
2688                                 if (WARN_ON(!sg_miter_next(&diter))) {
2689                                         ret = 0x01;
2690                                         goto out;
2691                                 }
2692                                 dpage_offset = 0;
2693                         }
2694
2695                         sdt = piter.addr + ppage_offset;
2696                         daddr = diter.addr + dpage_offset;
2697
2698                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2699                         if (ret) {
2700                                 dump_sector(daddr, scsi_debug_sector_size);
2701                                 goto out;
2702                         }
2703
2704                         sector++;
2705                         ei_lba++;
2706                         dpage_offset += scsi_debug_sector_size;
2707                 }
2708                 diter.consumed = dpage_offset;
2709                 sg_miter_stop(&diter);
2710         }
2711         sg_miter_stop(&piter);
2712
2713         dif_copy_prot(SCpnt, start_sec, sectors, false);
2714         dix_writes++;
2715
2716         return 0;
2717
2718 out:
2719         dif_errors++;
2720         sg_miter_stop(&diter);
2721         sg_miter_stop(&piter);
2722         return ret;
2723 }
2724
2725 static unsigned long lba_to_map_index(sector_t lba)
2726 {
2727         if (scsi_debug_unmap_alignment) {
2728                 lba += scsi_debug_unmap_granularity -
2729                         scsi_debug_unmap_alignment;
2730         }
2731         do_div(lba, scsi_debug_unmap_granularity);
2732
2733         return lba;
2734 }
2735
2736 static sector_t map_index_to_lba(unsigned long index)
2737 {
2738         sector_t lba = index * scsi_debug_unmap_granularity;
2739
2740         if (scsi_debug_unmap_alignment) {
2741                 lba -= scsi_debug_unmap_granularity -
2742                         scsi_debug_unmap_alignment;
2743         }
2744
2745         return lba;
2746 }
2747
2748 static unsigned int map_state(sector_t lba, unsigned int *num)
2749 {
2750         sector_t end;
2751         unsigned int mapped;
2752         unsigned long index;
2753         unsigned long next;
2754
2755         index = lba_to_map_index(lba);
2756         mapped = test_bit(index, map_storep);
2757
2758         if (mapped)
2759                 next = find_next_zero_bit(map_storep, map_size, index);
2760         else
2761                 next = find_next_bit(map_storep, map_size, index);
2762
2763         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2764         *num = end - lba;
2765
2766         return mapped;
2767 }
2768
2769 static void map_region(sector_t lba, unsigned int len)
2770 {
2771         sector_t end = lba + len;
2772
2773         while (lba < end) {
2774                 unsigned long index = lba_to_map_index(lba);
2775
2776                 if (index < map_size)
2777                         set_bit(index, map_storep);
2778
2779                 lba = map_index_to_lba(index + 1);
2780         }
2781 }
2782
2783 static void unmap_region(sector_t lba, unsigned int len)
2784 {
2785         sector_t end = lba + len;
2786
2787         while (lba < end) {
2788                 unsigned long index = lba_to_map_index(lba);
2789
2790                 if (lba == map_index_to_lba(index) &&
2791                     lba + scsi_debug_unmap_granularity <= end &&
2792                     index < map_size) {
2793                         clear_bit(index, map_storep);
2794                         if (scsi_debug_lbprz) {
2795                                 memset(fake_storep +
2796                                        lba * scsi_debug_sector_size, 0,
2797                                        scsi_debug_sector_size *
2798                                        scsi_debug_unmap_granularity);
2799                         }
2800                         if (dif_storep) {
2801                                 memset(dif_storep + lba, 0xff,
2802                                        sizeof(*dif_storep) *
2803                                        scsi_debug_unmap_granularity);
2804                         }
2805                 }
2806                 lba = map_index_to_lba(index + 1);
2807         }
2808 }
2809
2810 static int
2811 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2812 {
2813         u8 *cmd = scp->cmnd;
2814         u64 lba;
2815         u32 num;
2816         u32 ei_lba;
2817         unsigned long iflags;
2818         int ret;
2819         bool check_prot;
2820
2821         switch (cmd[0]) {
2822         case WRITE_16:
2823                 ei_lba = 0;
2824                 lba = get_unaligned_be64(cmd + 2);
2825                 num = get_unaligned_be32(cmd + 10);
2826                 check_prot = true;
2827                 break;
2828         case WRITE_10:
2829                 ei_lba = 0;
2830                 lba = get_unaligned_be32(cmd + 2);
2831                 num = get_unaligned_be16(cmd + 7);
2832                 check_prot = true;
2833                 break;
2834         case WRITE_6:
2835                 ei_lba = 0;
2836                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2837                       (u32)(cmd[1] & 0x1f) << 16;
2838                 num = (0 == cmd[4]) ? 256 : cmd[4];
2839                 check_prot = true;
2840                 break;
2841         case WRITE_12:
2842                 ei_lba = 0;
2843                 lba = get_unaligned_be32(cmd + 2);
2844                 num = get_unaligned_be32(cmd + 6);
2845                 check_prot = true;
2846                 break;
2847         case 0x53:      /* XDWRITEREAD(10) */
2848                 ei_lba = 0;
2849                 lba = get_unaligned_be32(cmd + 2);
2850                 num = get_unaligned_be16(cmd + 7);
2851                 check_prot = false;
2852                 break;
2853         default:        /* assume WRITE(32) */
2854                 lba = get_unaligned_be64(cmd + 12);
2855                 ei_lba = get_unaligned_be32(cmd + 20);
2856                 num = get_unaligned_be32(cmd + 28);
2857                 check_prot = false;
2858                 break;
2859         }
2860         if (check_prot) {
2861                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2862                     (cmd[1] & 0xe0)) {
2863                         mk_sense_invalid_opcode(scp);
2864                         return check_condition_result;
2865                 }
2866                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2867                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2868                     (cmd[1] & 0xe0) == 0)
2869                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2870                                     "to DIF device\n");
2871         }
2872
2873         /* inline check_device_access_params() */
2874         if (lba + num > sdebug_capacity) {
2875                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2876                 return check_condition_result;
2877         }
2878         /* transfer length excessive (tie in to block limits VPD page) */
2879         if (num > sdebug_store_sectors) {
2880                 /* needs work to find which cdb byte 'num' comes from */
2881                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2882                 return check_condition_result;
2883         }
2884
2885         write_lock_irqsave(&atomic_rw, iflags);
2886
2887         /* DIX + T10 DIF */
2888         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2889                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2890
2891                 if (prot_ret) {
2892                         write_unlock_irqrestore(&atomic_rw, iflags);
2893                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2894                         return illegal_condition_result;
2895                 }
2896         }
2897
2898         ret = do_device_access(scp, lba, num, true);
2899         if (scsi_debug_lbp())
2900                 map_region(lba, num);
2901         write_unlock_irqrestore(&atomic_rw, iflags);
2902         if (-1 == ret)
2903                 return (DID_ERROR << 16);
2904         else if ((ret < (num * scsi_debug_sector_size)) &&
2905                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2906                 sdev_printk(KERN_INFO, scp->device,
2907                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2908                             my_name, num * scsi_debug_sector_size, ret);
2909
2910         if (sdebug_any_injecting_opt) {
2911                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2912
2913                 if (ep->inj_recovered) {
2914                         mk_sense_buffer(scp, RECOVERED_ERROR,
2915                                         THRESHOLD_EXCEEDED, 0);
2916                         return check_condition_result;
2917                 } else if (ep->inj_dif) {
2918                         /* Logical block guard check failed */
2919                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2920                         return illegal_condition_result;
2921                 } else if (ep->inj_dix) {
2922                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2923                         return illegal_condition_result;
2924                 }
2925         }
2926         return 0;
2927 }
2928
2929 static int
2930 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2931                 bool unmap, bool ndob)
2932 {
2933         unsigned long iflags;
2934         unsigned long long i;
2935         int ret;
2936
2937         ret = check_device_access_params(scp, lba, num);
2938         if (ret)
2939                 return ret;
2940
2941         write_lock_irqsave(&atomic_rw, iflags);
2942
2943         if (unmap && scsi_debug_lbp()) {
2944                 unmap_region(lba, num);
2945                 goto out;
2946         }
2947
2948         /* if ndob then zero 1 logical block, else fetch 1 logical block */
2949         if (ndob) {
2950                 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
2951                        scsi_debug_sector_size);
2952                 ret = 0;
2953         } else
2954                 ret = fetch_to_dev_buffer(scp, fake_storep +
2955                                                (lba * scsi_debug_sector_size),
2956                                           scsi_debug_sector_size);
2957
2958         if (-1 == ret) {
2959                 write_unlock_irqrestore(&atomic_rw, iflags);
2960                 return (DID_ERROR << 16);
2961         } else if ((ret < (num * scsi_debug_sector_size)) &&
2962                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2963                 sdev_printk(KERN_INFO, scp->device,
2964                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2965                             my_name, "write same",
2966                             num * scsi_debug_sector_size, ret);
2967
2968         /* Copy first sector to remaining blocks */
2969         for (i = 1 ; i < num ; i++)
2970                 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2971                        fake_storep + (lba * scsi_debug_sector_size),
2972                        scsi_debug_sector_size);
2973
2974         if (scsi_debug_lbp())
2975                 map_region(lba, num);
2976 out:
2977         write_unlock_irqrestore(&atomic_rw, iflags);
2978
2979         return 0;
2980 }
2981
2982 static int
2983 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2984 {
2985         u8 *cmd = scp->cmnd;
2986         u32 lba;
2987         u16 num;
2988         u32 ei_lba = 0;
2989         bool unmap = false;
2990
2991         if (cmd[1] & 0x8) {
2992                 if (scsi_debug_lbpws10 == 0) {
2993                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2994                         return check_condition_result;
2995                 } else
2996                         unmap = true;
2997         }
2998         lba = get_unaligned_be32(cmd + 2);
2999         num = get_unaligned_be16(cmd + 7);
3000         if (num > scsi_debug_write_same_length) {
3001                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3002                 return check_condition_result;
3003         }
3004         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3005 }
3006
3007 static int
3008 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3009 {
3010         u8 *cmd = scp->cmnd;
3011         u64 lba;
3012         u32 num;
3013         u32 ei_lba = 0;
3014         bool unmap = false;
3015         bool ndob = false;
3016
3017         if (cmd[1] & 0x8) {     /* UNMAP */
3018                 if (scsi_debug_lbpws == 0) {
3019                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3020                         return check_condition_result;
3021                 } else
3022                         unmap = true;
3023         }
3024         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3025                 ndob = true;
3026         lba = get_unaligned_be64(cmd + 2);
3027         num = get_unaligned_be32(cmd + 10);
3028         if (num > scsi_debug_write_same_length) {
3029                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3030                 return check_condition_result;
3031         }
3032         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3033 }
3034
3035 static int
3036 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3037 {
3038         u8 *cmd = scp->cmnd;
3039         u8 *arr;
3040         u8 *fake_storep_hold;
3041         u64 lba;
3042         u32 dnum;
3043         u32 lb_size = scsi_debug_sector_size;
3044         u8 num;
3045         unsigned long iflags;
3046         int ret;
3047         int retval = 0;
3048
3049         lba = get_unaligned_be64(cmd + 2);
3050         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3051         if (0 == num)
3052                 return 0;       /* degenerate case, not an error */
3053         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3054             (cmd[1] & 0xe0)) {
3055                 mk_sense_invalid_opcode(scp);
3056                 return check_condition_result;
3057         }
3058         if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3059              scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3060             (cmd[1] & 0xe0) == 0)
3061                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3062                             "to DIF device\n");
3063
3064         /* inline check_device_access_params() */
3065         if (lba + num > sdebug_capacity) {
3066                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3067                 return check_condition_result;
3068         }
3069         /* transfer length excessive (tie in to block limits VPD page) */
3070         if (num > sdebug_store_sectors) {
3071                 /* needs work to find which cdb byte 'num' comes from */
3072                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3073                 return check_condition_result;
3074         }
3075         dnum = 2 * num;
3076         arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3077         if (NULL == arr) {
3078                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3079                                 INSUFF_RES_ASCQ);
3080                 return check_condition_result;
3081         }
3082
3083         write_lock_irqsave(&atomic_rw, iflags);
3084
3085         /* trick do_device_access() to fetch both compare and write buffers
3086          * from data-in into arr. Safe (atomic) since write_lock held. */
3087         fake_storep_hold = fake_storep;
3088         fake_storep = arr;
3089         ret = do_device_access(scp, 0, dnum, true);
3090         fake_storep = fake_storep_hold;
3091         if (ret == -1) {
3092                 retval = DID_ERROR << 16;
3093                 goto cleanup;
3094         } else if ((ret < (dnum * lb_size)) &&
3095                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3096                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3097                             "indicated=%u, IO sent=%d bytes\n", my_name,
3098                             dnum * lb_size, ret);
3099         if (!comp_write_worker(lba, num, arr)) {
3100                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3101                 retval = check_condition_result;
3102                 goto cleanup;
3103         }
3104         if (scsi_debug_lbp())
3105                 map_region(lba, num);
3106 cleanup:
3107         write_unlock_irqrestore(&atomic_rw, iflags);
3108         kfree(arr);
3109         return retval;
3110 }
3111
3112 struct unmap_block_desc {
3113         __be64  lba;
3114         __be32  blocks;
3115         __be32  __reserved;
3116 };
3117
3118 static int
3119 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3120 {
3121         unsigned char *buf;
3122         struct unmap_block_desc *desc;
3123         unsigned int i, payload_len, descriptors;
3124         int ret;
3125         unsigned long iflags;
3126
3127
3128         if (!scsi_debug_lbp())
3129                 return 0;       /* fib and say its done */
3130         payload_len = get_unaligned_be16(scp->cmnd + 7);
3131         BUG_ON(scsi_bufflen(scp) != payload_len);
3132
3133         descriptors = (payload_len - 8) / 16;
3134         if (descriptors > scsi_debug_unmap_max_desc) {
3135                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3136                 return check_condition_result;
3137         }
3138
3139         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3140         if (!buf) {
3141                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3142                                 INSUFF_RES_ASCQ);
3143                 return check_condition_result;
3144         }
3145
3146         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3147
3148         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3149         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3150
3151         desc = (void *)&buf[8];
3152
3153         write_lock_irqsave(&atomic_rw, iflags);
3154
3155         for (i = 0 ; i < descriptors ; i++) {
3156                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3157                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3158
3159                 ret = check_device_access_params(scp, lba, num);
3160                 if (ret)
3161                         goto out;
3162
3163                 unmap_region(lba, num);
3164         }
3165
3166         ret = 0;
3167
3168 out:
3169         write_unlock_irqrestore(&atomic_rw, iflags);
3170         kfree(buf);
3171
3172         return ret;
3173 }
3174
3175 #define SDEBUG_GET_LBA_STATUS_LEN 32
3176
3177 static int
3178 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3179 {
3180         u8 *cmd = scp->cmnd;
3181         u64 lba;
3182         u32 alloc_len, mapped, num;
3183         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3184         int ret;
3185
3186         lba = get_unaligned_be64(cmd + 2);
3187         alloc_len = get_unaligned_be32(cmd + 10);
3188
3189         if (alloc_len < 24)
3190                 return 0;
3191
3192         ret = check_device_access_params(scp, lba, 1);
3193         if (ret)
3194                 return ret;
3195
3196         if (scsi_debug_lbp())
3197                 mapped = map_state(lba, &num);
3198         else {
3199                 mapped = 1;
3200                 /* following just in case virtual_gb changed */
3201                 sdebug_capacity = get_sdebug_capacity();
3202                 if (sdebug_capacity - lba <= 0xffffffff)
3203                         num = sdebug_capacity - lba;
3204                 else
3205                         num = 0xffffffff;
3206         }
3207
3208         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3209         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3210         put_unaligned_be64(lba, arr + 8);       /* LBA */
3211         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3212         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3213
3214         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3215 }
3216
3217 #define SDEBUG_RLUN_ARR_SZ 256
3218
3219 static int resp_report_luns(struct scsi_cmnd * scp,
3220                             struct sdebug_dev_info * devip)
3221 {
3222         unsigned int alloc_len;
3223         int lun_cnt, i, upper, num, n, want_wlun, shortish;
3224         u64 lun;
3225         unsigned char *cmd = scp->cmnd;
3226         int select_report = (int)cmd[2];
3227         struct scsi_lun *one_lun;
3228         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3229         unsigned char * max_addr;
3230
3231         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3232         shortish = (alloc_len < 4);
3233         if (shortish || (select_report > 2)) {
3234                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3235                 return check_condition_result;
3236         }
3237         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3238         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3239         lun_cnt = scsi_debug_max_luns;
3240         if (1 == select_report)
3241                 lun_cnt = 0;
3242         else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3243                 --lun_cnt;
3244         want_wlun = (select_report > 0) ? 1 : 0;
3245         num = lun_cnt + want_wlun;
3246         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3247         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3248         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3249                             sizeof(struct scsi_lun)), num);
3250         if (n < num) {
3251                 want_wlun = 0;
3252                 lun_cnt = n;
3253         }
3254         one_lun = (struct scsi_lun *) &arr[8];
3255         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3256         for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3257              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3258              i++, lun++) {
3259                 upper = (lun >> 8) & 0x3f;
3260                 if (upper)
3261                         one_lun[i].scsi_lun[0] =
3262                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3263                 one_lun[i].scsi_lun[1] = lun & 0xff;
3264         }
3265         if (want_wlun) {
3266                 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
3267                 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
3268                 i++;
3269         }
3270         alloc_len = (unsigned char *)(one_lun + i) - arr;
3271         return fill_from_dev_buffer(scp, arr,
3272                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3273 }
3274
3275 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3276                             unsigned int num, struct sdebug_dev_info *devip)
3277 {
3278         int j;
3279         unsigned char *kaddr, *buf;
3280         unsigned int offset;
3281         struct scsi_data_buffer *sdb = scsi_in(scp);
3282         struct sg_mapping_iter miter;
3283
3284         /* better not to use temporary buffer. */
3285         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3286         if (!buf) {
3287                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3288                                 INSUFF_RES_ASCQ);
3289                 return check_condition_result;
3290         }
3291
3292         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3293
3294         offset = 0;
3295         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3296                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
3297
3298         while (sg_miter_next(&miter)) {
3299                 kaddr = miter.addr;
3300                 for (j = 0; j < miter.length; j++)
3301                         *(kaddr + j) ^= *(buf + offset + j);
3302
3303                 offset += miter.length;
3304         }
3305         sg_miter_stop(&miter);
3306         kfree(buf);
3307
3308         return 0;
3309 }
3310
3311 static int
3312 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3313 {
3314         u8 *cmd = scp->cmnd;
3315         u64 lba;
3316         u32 num;
3317         int errsts;
3318
3319         if (!scsi_bidi_cmnd(scp)) {
3320                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3321                                 INSUFF_RES_ASCQ);
3322                 return check_condition_result;
3323         }
3324         errsts = resp_read_dt0(scp, devip);
3325         if (errsts)
3326                 return errsts;
3327         if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3328                 errsts = resp_write_dt0(scp, devip);
3329                 if (errsts)
3330                         return errsts;
3331         }
3332         lba = get_unaligned_be32(cmd + 2);
3333         num = get_unaligned_be16(cmd + 7);
3334         return resp_xdwriteread(scp, lba, num, devip);
3335 }
3336
3337 /* When timer or tasklet goes off this function is called. */
3338 static void sdebug_q_cmd_complete(unsigned long indx)
3339 {
3340         int qa_indx;
3341         int retiring = 0;
3342         unsigned long iflags;
3343         struct sdebug_queued_cmd *sqcp;
3344         struct scsi_cmnd *scp;
3345         struct sdebug_dev_info *devip;
3346
3347         atomic_inc(&sdebug_completions);
3348         qa_indx = indx;
3349         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3350                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3351                 return;
3352         }
3353         spin_lock_irqsave(&queued_arr_lock, iflags);
3354         sqcp = &queued_arr[qa_indx];
3355         scp = sqcp->a_cmnd;
3356         if (NULL == scp) {
3357                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3358                 pr_err("%s: scp is NULL\n", __func__);
3359                 return;
3360         }
3361         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3362         if (devip)
3363                 atomic_dec(&devip->num_in_q);
3364         else
3365                 pr_err("%s: devip=NULL\n", __func__);
3366         if (atomic_read(&retired_max_queue) > 0)
3367                 retiring = 1;
3368
3369         sqcp->a_cmnd = NULL;
3370         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3371                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3372                 pr_err("%s: Unexpected completion\n", __func__);
3373                 return;
3374         }
3375
3376         if (unlikely(retiring)) {       /* user has reduced max_queue */
3377                 int k, retval;
3378
3379                 retval = atomic_read(&retired_max_queue);
3380                 if (qa_indx >= retval) {
3381                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3382                         pr_err("%s: index %d too large\n", __func__, retval);
3383                         return;
3384                 }
3385                 k = find_last_bit(queued_in_use_bm, retval);
3386                 if ((k < scsi_debug_max_queue) || (k == retval))
3387                         atomic_set(&retired_max_queue, 0);
3388                 else
3389                         atomic_set(&retired_max_queue, k + 1);
3390         }
3391         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3392         scp->scsi_done(scp); /* callback to mid level */
3393 }
3394
3395 /* When high resolution timer goes off this function is called. */
3396 static enum hrtimer_restart
3397 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3398 {
3399         int qa_indx;
3400         int retiring = 0;
3401         unsigned long iflags;
3402         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3403         struct sdebug_queued_cmd *sqcp;
3404         struct scsi_cmnd *scp;
3405         struct sdebug_dev_info *devip;
3406
3407         atomic_inc(&sdebug_completions);
3408         qa_indx = sd_hrtp->qa_indx;
3409         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3410                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3411                 goto the_end;
3412         }
3413         spin_lock_irqsave(&queued_arr_lock, iflags);
3414         sqcp = &queued_arr[qa_indx];
3415         scp = sqcp->a_cmnd;
3416         if (NULL == scp) {
3417                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3418                 pr_err("%s: scp is NULL\n", __func__);
3419                 goto the_end;
3420         }
3421         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3422         if (devip)
3423                 atomic_dec(&devip->num_in_q);
3424         else
3425                 pr_err("%s: devip=NULL\n", __func__);
3426         if (atomic_read(&retired_max_queue) > 0)
3427                 retiring = 1;
3428
3429         sqcp->a_cmnd = NULL;
3430         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3431                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3432                 pr_err("%s: Unexpected completion\n", __func__);
3433                 goto the_end;
3434         }
3435
3436         if (unlikely(retiring)) {       /* user has reduced max_queue */
3437                 int k, retval;
3438
3439                 retval = atomic_read(&retired_max_queue);
3440                 if (qa_indx >= retval) {
3441                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3442                         pr_err("%s: index %d too large\n", __func__, retval);
3443                         goto the_end;
3444                 }
3445                 k = find_last_bit(queued_in_use_bm, retval);
3446                 if ((k < scsi_debug_max_queue) || (k == retval))
3447                         atomic_set(&retired_max_queue, 0);
3448                 else
3449                         atomic_set(&retired_max_queue, k + 1);
3450         }
3451         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3452         scp->scsi_done(scp); /* callback to mid level */
3453 the_end:
3454         return HRTIMER_NORESTART;
3455 }
3456
3457 static struct sdebug_dev_info *
3458 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3459 {
3460         struct sdebug_dev_info *devip;
3461
3462         devip = kzalloc(sizeof(*devip), flags);
3463         if (devip) {
3464                 devip->sdbg_host = sdbg_host;
3465                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3466         }
3467         return devip;
3468 }
3469
3470 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3471 {
3472         struct sdebug_host_info * sdbg_host;
3473         struct sdebug_dev_info * open_devip = NULL;
3474         struct sdebug_dev_info * devip =
3475                         (struct sdebug_dev_info *)sdev->hostdata;
3476
3477         if (devip)
3478                 return devip;
3479         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3480         if (!sdbg_host) {
3481                 pr_err("%s: Host info NULL\n", __func__);
3482                 return NULL;
3483         }
3484         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3485                 if ((devip->used) && (devip->channel == sdev->channel) &&
3486                     (devip->target == sdev->id) &&
3487                     (devip->lun == sdev->lun))
3488                         return devip;
3489                 else {
3490                         if ((!devip->used) && (!open_devip))
3491                                 open_devip = devip;
3492                 }
3493         }
3494         if (!open_devip) { /* try and make a new one */
3495                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3496                 if (!open_devip) {
3497                         printk(KERN_ERR "%s: out of memory at line %d\n",
3498                                 __func__, __LINE__);
3499                         return NULL;
3500                 }
3501         }
3502
3503         open_devip->channel = sdev->channel;
3504         open_devip->target = sdev->id;
3505         open_devip->lun = sdev->lun;
3506         open_devip->sdbg_host = sdbg_host;
3507         atomic_set(&open_devip->num_in_q, 0);
3508         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3509         open_devip->used = true;
3510         return open_devip;
3511 }
3512
3513 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3514 {
3515         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3516                 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
3517                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3518         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3519         return 0;
3520 }
3521
3522 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3523 {
3524         struct sdebug_dev_info *devip;
3525
3526         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3527                 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
3528                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3529         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3530                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3531         devip = devInfoReg(sdp);
3532         if (NULL == devip)
3533                 return 1;       /* no resources, will be marked offline */
3534         sdp->hostdata = devip;
3535         blk_queue_max_segment_size(sdp->request_queue, -1U);
3536         if (scsi_debug_no_uld)
3537                 sdp->no_uld_attach = 1;
3538         return 0;
3539 }
3540
3541 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3542 {
3543         struct sdebug_dev_info *devip =
3544                 (struct sdebug_dev_info *)sdp->hostdata;
3545
3546         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3547                 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
3548                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3549         if (devip) {
3550                 /* make this slot available for re-use */
3551                 devip->used = false;
3552                 sdp->hostdata = NULL;
3553         }
3554 }
3555
3556 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3557 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3558 {
3559         unsigned long iflags;
3560         int k, qmax, r_qmax;
3561         struct sdebug_queued_cmd *sqcp;
3562         struct sdebug_dev_info *devip;
3563
3564         spin_lock_irqsave(&queued_arr_lock, iflags);
3565         qmax = scsi_debug_max_queue;
3566         r_qmax = atomic_read(&retired_max_queue);
3567         if (r_qmax > qmax)
3568                 qmax = r_qmax;
3569         for (k = 0; k < qmax; ++k) {
3570                 if (test_bit(k, queued_in_use_bm)) {
3571                         sqcp = &queued_arr[k];
3572                         if (cmnd == sqcp->a_cmnd) {
3573                                 devip = (struct sdebug_dev_info *)
3574                                         cmnd->device->hostdata;
3575                                 if (devip)
3576                                         atomic_dec(&devip->num_in_q);
3577                                 sqcp->a_cmnd = NULL;
3578                                 spin_unlock_irqrestore(&queued_arr_lock,
3579                                                        iflags);
3580                                 if (scsi_debug_ndelay > 0) {
3581                                         if (sqcp->sd_hrtp)
3582                                                 hrtimer_cancel(
3583                                                         &sqcp->sd_hrtp->hrt);
3584                                 } else if (scsi_debug_delay > 0) {
3585                                         if (sqcp->cmnd_timerp)
3586                                                 del_timer_sync(
3587                                                         sqcp->cmnd_timerp);
3588                                 } else if (scsi_debug_delay < 0) {
3589                                         if (sqcp->tletp)
3590                                                 tasklet_kill(sqcp->tletp);
3591                                 }
3592                                 clear_bit(k, queued_in_use_bm);
3593                                 return 1;
3594                         }
3595                 }
3596         }
3597         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3598         return 0;
3599 }
3600
3601 /* Deletes (stops) timers or tasklets of all queued commands */
3602 static void stop_all_queued(void)
3603 {
3604         unsigned long iflags;
3605         int k;
3606         struct sdebug_queued_cmd *sqcp;
3607         struct sdebug_dev_info *devip;
3608
3609         spin_lock_irqsave(&queued_arr_lock, iflags);
3610         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3611                 if (test_bit(k, queued_in_use_bm)) {
3612                         sqcp = &queued_arr[k];
3613                         if (sqcp->a_cmnd) {
3614                                 devip = (struct sdebug_dev_info *)
3615                                         sqcp->a_cmnd->device->hostdata;
3616                                 if (devip)
3617                                         atomic_dec(&devip->num_in_q);
3618                                 sqcp->a_cmnd = NULL;
3619                                 spin_unlock_irqrestore(&queued_arr_lock,
3620                                                        iflags);
3621                                 if (scsi_debug_ndelay > 0) {
3622                                         if (sqcp->sd_hrtp)
3623                                                 hrtimer_cancel(
3624                                                         &sqcp->sd_hrtp->hrt);
3625                                 } else if (scsi_debug_delay > 0) {
3626                                         if (sqcp->cmnd_timerp)
3627                                                 del_timer_sync(
3628                                                         sqcp->cmnd_timerp);
3629                                 } else if (scsi_debug_delay < 0) {
3630                                         if (sqcp->tletp)
3631                                                 tasklet_kill(sqcp->tletp);
3632                                 }
3633                                 clear_bit(k, queued_in_use_bm);
3634                                 spin_lock_irqsave(&queued_arr_lock, iflags);
3635                         }
3636                 }
3637         }
3638         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3639 }
3640
3641 /* Free queued command memory on heap */
3642 static void free_all_queued(void)
3643 {
3644         unsigned long iflags;
3645         int k;
3646         struct sdebug_queued_cmd *sqcp;
3647
3648         spin_lock_irqsave(&queued_arr_lock, iflags);
3649         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3650                 sqcp = &queued_arr[k];
3651                 kfree(sqcp->cmnd_timerp);
3652                 sqcp->cmnd_timerp = NULL;
3653                 kfree(sqcp->tletp);
3654                 sqcp->tletp = NULL;
3655                 kfree(sqcp->sd_hrtp);
3656                 sqcp->sd_hrtp = NULL;
3657         }
3658         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3659 }
3660
3661 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3662 {
3663         ++num_aborts;
3664         if (SCpnt) {
3665                 if (SCpnt->device &&
3666                     (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3667                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3668                                     __func__);
3669                 stop_queued_cmnd(SCpnt);
3670         }
3671         return SUCCESS;
3672 }
3673
3674 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3675 {
3676         struct sdebug_dev_info * devip;
3677
3678         ++num_dev_resets;
3679         if (SCpnt && SCpnt->device) {
3680                 struct scsi_device *sdp = SCpnt->device;
3681
3682                 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3683                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3684                 devip = devInfoReg(sdp);
3685                 if (devip)
3686                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
3687         }
3688         return SUCCESS;
3689 }
3690
3691 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3692 {
3693         struct sdebug_host_info *sdbg_host;
3694         struct sdebug_dev_info *devip;
3695         struct scsi_device *sdp;
3696         struct Scsi_Host *hp;
3697         int k = 0;
3698
3699         ++num_target_resets;
3700         if (!SCpnt)
3701                 goto lie;
3702         sdp = SCpnt->device;
3703         if (!sdp)
3704                 goto lie;
3705         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3706                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3707         hp = sdp->host;
3708         if (!hp)
3709                 goto lie;
3710         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3711         if (sdbg_host) {
3712                 list_for_each_entry(devip,
3713                                     &sdbg_host->dev_info_list,
3714                                     dev_list)
3715                         if (devip->target == sdp->id) {
3716                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3717                                 ++k;
3718                         }
3719         }
3720         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3721                 sdev_printk(KERN_INFO, sdp,
3722                             "%s: %d device(s) found in target\n", __func__, k);
3723 lie:
3724         return SUCCESS;
3725 }
3726
3727 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3728 {
3729         struct sdebug_host_info *sdbg_host;
3730         struct sdebug_dev_info *devip;
3731         struct scsi_device * sdp;
3732         struct Scsi_Host * hp;
3733         int k = 0;
3734
3735         ++num_bus_resets;
3736         if (!(SCpnt && SCpnt->device))
3737                 goto lie;
3738         sdp = SCpnt->device;
3739         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3740                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3741         hp = sdp->host;
3742         if (hp) {
3743                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3744                 if (sdbg_host) {
3745                         list_for_each_entry(devip,
3746                                             &sdbg_host->dev_info_list,
3747                                             dev_list) {
3748                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3749                                 ++k;
3750                         }
3751                 }
3752         }
3753         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3754                 sdev_printk(KERN_INFO, sdp,
3755                             "%s: %d device(s) found in host\n", __func__, k);
3756 lie:
3757         return SUCCESS;
3758 }
3759
3760 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3761 {
3762         struct sdebug_host_info * sdbg_host;
3763         struct sdebug_dev_info *devip;
3764         int k = 0;
3765
3766         ++num_host_resets;
3767         if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3768                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3769         spin_lock(&sdebug_host_list_lock);
3770         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3771                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3772                                     dev_list) {
3773                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3774                         ++k;
3775                 }
3776         }
3777         spin_unlock(&sdebug_host_list_lock);
3778         stop_all_queued();
3779         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3780                 sdev_printk(KERN_INFO, SCpnt->device,
3781                             "%s: %d device(s) found\n", __func__, k);
3782         return SUCCESS;
3783 }
3784
3785 static void __init sdebug_build_parts(unsigned char *ramp,
3786                                       unsigned long store_size)
3787 {
3788         struct partition * pp;
3789         int starts[SDEBUG_MAX_PARTS + 2];
3790         int sectors_per_part, num_sectors, k;
3791         int heads_by_sects, start_sec, end_sec;
3792
3793         /* assume partition table already zeroed */
3794         if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3795                 return;
3796         if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3797                 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3798                 pr_warn("%s: reducing partitions to %d\n", __func__,
3799                         SDEBUG_MAX_PARTS);
3800         }
3801         num_sectors = (int)sdebug_store_sectors;
3802         sectors_per_part = (num_sectors - sdebug_sectors_per)
3803                            / scsi_debug_num_parts;
3804         heads_by_sects = sdebug_heads * sdebug_sectors_per;
3805         starts[0] = sdebug_sectors_per;
3806         for (k = 1; k < scsi_debug_num_parts; ++k)
3807                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3808                             * heads_by_sects;
3809         starts[scsi_debug_num_parts] = num_sectors;
3810         starts[scsi_debug_num_parts + 1] = 0;
3811
3812         ramp[510] = 0x55;       /* magic partition markings */
3813         ramp[511] = 0xAA;
3814         pp = (struct partition *)(ramp + 0x1be);
3815         for (k = 0; starts[k + 1]; ++k, ++pp) {
3816                 start_sec = starts[k];
3817                 end_sec = starts[k + 1] - 1;
3818                 pp->boot_ind = 0;
3819
3820                 pp->cyl = start_sec / heads_by_sects;
3821                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3822                            / sdebug_sectors_per;
3823                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3824
3825                 pp->end_cyl = end_sec / heads_by_sects;
3826                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3827                                / sdebug_sectors_per;
3828                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3829
3830                 pp->start_sect = cpu_to_le32(start_sec);
3831                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3832                 pp->sys_ind = 0x83;     /* plain Linux partition */
3833         }
3834 }
3835
3836 static int
3837 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3838               int scsi_result, int delta_jiff)
3839 {
3840         unsigned long iflags;
3841         int k, num_in_q, qdepth, inject;
3842         struct sdebug_queued_cmd *sqcp = NULL;
3843         struct scsi_device *sdp = cmnd->device;
3844
3845         if (NULL == cmnd || NULL == devip) {
3846                 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3847                         __func__);
3848                 /* no particularly good error to report back */
3849                 return SCSI_MLQUEUE_HOST_BUSY;
3850         }
3851         if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3852                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3853                             __func__, scsi_result);
3854         if (delta_jiff == 0)
3855                 goto respond_in_thread;
3856
3857         /* schedule the response at a later time if resources permit */
3858         spin_lock_irqsave(&queued_arr_lock, iflags);
3859         num_in_q = atomic_read(&devip->num_in_q);
3860         qdepth = cmnd->device->queue_depth;
3861         inject = 0;
3862         if ((qdepth > 0) && (num_in_q >= qdepth)) {
3863                 if (scsi_result) {
3864                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3865                         goto respond_in_thread;
3866                 } else
3867                         scsi_result = device_qfull_result;
3868         } else if ((scsi_debug_every_nth != 0) &&
3869                    (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3870                    (scsi_result == 0)) {
3871                 if ((num_in_q == (qdepth - 1)) &&
3872                     (atomic_inc_return(&sdebug_a_tsf) >=
3873                      abs(scsi_debug_every_nth))) {
3874                         atomic_set(&sdebug_a_tsf, 0);
3875                         inject = 1;
3876                         scsi_result = device_qfull_result;
3877                 }
3878         }
3879
3880         k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3881         if (k >= scsi_debug_max_queue) {
3882                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3883                 if (scsi_result)
3884                         goto respond_in_thread;
3885                 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3886                         scsi_result = device_qfull_result;
3887                 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3888                         sdev_printk(KERN_INFO, sdp,
3889                                     "%s: max_queue=%d exceeded, %s\n",
3890                                     __func__, scsi_debug_max_queue,
3891                                     (scsi_result ?  "status: TASK SET FULL" :
3892                                                     "report: host busy"));
3893                 if (scsi_result)
3894                         goto respond_in_thread;
3895                 else
3896                         return SCSI_MLQUEUE_HOST_BUSY;
3897         }
3898         __set_bit(k, queued_in_use_bm);
3899         atomic_inc(&devip->num_in_q);
3900         sqcp = &queued_arr[k];
3901         sqcp->a_cmnd = cmnd;
3902         cmnd->result = scsi_result;
3903         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3904         if (delta_jiff > 0) {
3905                 if (NULL == sqcp->cmnd_timerp) {
3906                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3907                                                     GFP_ATOMIC);
3908                         if (NULL == sqcp->cmnd_timerp)
3909                                 return SCSI_MLQUEUE_HOST_BUSY;
3910                         init_timer(sqcp->cmnd_timerp);
3911                 }
3912                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3913                 sqcp->cmnd_timerp->data = k;
3914                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3915                 add_timer(sqcp->cmnd_timerp);
3916         } else if (scsi_debug_ndelay > 0) {
3917                 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3918                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3919
3920                 if (NULL == sd_hp) {
3921                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3922                         if (NULL == sd_hp)
3923                                 return SCSI_MLQUEUE_HOST_BUSY;
3924                         sqcp->sd_hrtp = sd_hp;
3925                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3926                                      HRTIMER_MODE_REL);
3927                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3928                         sd_hp->qa_indx = k;
3929                 }
3930                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3931         } else {        /* delay < 0 */
3932                 if (NULL == sqcp->tletp) {
3933                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3934                                               GFP_ATOMIC);
3935                         if (NULL == sqcp->tletp)
3936                                 return SCSI_MLQUEUE_HOST_BUSY;
3937                         tasklet_init(sqcp->tletp,
3938                                      sdebug_q_cmd_complete, k);
3939                 }
3940                 if (-1 == delta_jiff)
3941                         tasklet_hi_schedule(sqcp->tletp);
3942                 else
3943                         tasklet_schedule(sqcp->tletp);
3944         }
3945         if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3946             (scsi_result == device_qfull_result))
3947                 sdev_printk(KERN_INFO, sdp,
3948                             "%s: num_in_q=%d +1, %s%s\n", __func__,
3949                             num_in_q, (inject ? "<inject> " : ""),
3950                             "status: TASK SET FULL");
3951         return 0;
3952
3953 respond_in_thread:      /* call back to mid-layer using invocation thread */
3954         cmnd->result = scsi_result;
3955         cmnd->scsi_done(cmnd);
3956         return 0;
3957 }
3958
3959 /* Note: The following macros create attribute files in the
3960    /sys/module/scsi_debug/parameters directory. Unfortunately this
3961    driver is unaware of a change and cannot trigger auxiliary actions
3962    as it can when the corresponding attribute in the
3963    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3964  */
3965 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3966 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3967 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3968 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3969 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3970 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3971 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3972 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3973 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3974 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3975 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3976 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3977 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3978 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3979 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3980 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3981 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3982 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3983 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3984 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3985 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3986 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3987 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3988 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3989 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3990 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3991 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3992 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3993 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3994 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3995 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3996 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
3997 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3998 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3999 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4000 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4001 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4002 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4003                    S_IRUGO | S_IWUSR);
4004 module_param_named(write_same_length, scsi_debug_write_same_length, int,
4005                    S_IRUGO | S_IWUSR);
4006
4007 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4008 MODULE_DESCRIPTION("SCSI debug adapter driver");
4009 MODULE_LICENSE("GPL");
4010 MODULE_VERSION(SCSI_DEBUG_VERSION);
4011
4012 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4013 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4014 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4015 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4016 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4017 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4018 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4019 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4020 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4021 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4022 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4023 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4024 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4025 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4026 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4027 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4028 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4029 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4030 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4031 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4032 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4033 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4034 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4035 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4036 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
4037 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4038 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4039 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4040 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4041 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4042 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4043 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4044 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4045 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4046 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4047 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4048 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4049 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4050 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4051
4052 static char sdebug_info[256];
4053
4054 static const char * scsi_debug_info(struct Scsi_Host * shp)
4055 {
4056         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4057                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4058                 scsi_debug_version_date, scsi_debug_dev_size_mb,
4059                 scsi_debug_opts);
4060         return sdebug_info;
4061 }
4062
4063 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4064 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4065 {
4066         char arr[16];
4067         int opts;
4068         int minLen = length > 15 ? 15 : length;
4069
4070         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4071                 return -EACCES;
4072         memcpy(arr, buffer, minLen);
4073         arr[minLen] = '\0';
4074         if (1 != sscanf(arr, "%d", &opts))
4075                 return -EINVAL;
4076         scsi_debug_opts = opts;
4077         if (scsi_debug_every_nth != 0)
4078                 atomic_set(&sdebug_cmnd_count, 0);
4079         return length;
4080 }
4081
4082 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4083  * same for each scsi_debug host (if more than one). Some of the counters
4084  * output are not atomics so might be inaccurate in a busy system. */
4085 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4086 {
4087         int f, l;
4088         char b[32];
4089
4090         if (scsi_debug_every_nth > 0)
4091                 snprintf(b, sizeof(b), " (curr:%d)",
4092                          ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4093                                 atomic_read(&sdebug_a_tsf) :
4094                                 atomic_read(&sdebug_cmnd_count)));
4095         else
4096                 b[0] = '\0';
4097
4098         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4099                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4100                 "every_nth=%d%s\n"
4101                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4102                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4103                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4104                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4105                 "usec_in_jiffy=%lu\n",
4106                 SCSI_DEBUG_VERSION, scsi_debug_version_date,
4107                 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4108                 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4109                 scsi_debug_max_luns, atomic_read(&sdebug_completions),
4110                 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4111                 sdebug_sectors_per, num_aborts, num_dev_resets,
4112                 num_target_resets, num_bus_resets, num_host_resets,
4113                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4114
4115         f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4116         if (f != scsi_debug_max_queue) {
4117                 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4118                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4119                            "queued_in_use_bm", f, l);
4120         }
4121         return 0;
4122 }
4123
4124 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4125 {
4126         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4127 }
4128 /* Returns -EBUSY if delay is being changed and commands are queued */
4129 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4130                            size_t count)
4131 {
4132         int delay, res;
4133
4134         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4135                 res = count;
4136                 if (scsi_debug_delay != delay) {
4137                         unsigned long iflags;
4138                         int k;
4139
4140                         spin_lock_irqsave(&queued_arr_lock, iflags);
4141                         k = find_first_bit(queued_in_use_bm,
4142                                            scsi_debug_max_queue);
4143                         if (k != scsi_debug_max_queue)
4144                                 res = -EBUSY;   /* have queued commands */
4145                         else {
4146                                 scsi_debug_delay = delay;
4147                                 scsi_debug_ndelay = 0;
4148                         }
4149                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4150                 }
4151                 return res;
4152         }
4153         return -EINVAL;
4154 }
4155 static DRIVER_ATTR_RW(delay);
4156
4157 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4158 {
4159         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4160 }
4161 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4162 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4163 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4164                            size_t count)
4165 {
4166         unsigned long iflags;
4167         int ndelay, res, k;
4168
4169         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4170             (ndelay >= 0) && (ndelay < 1000000000)) {
4171                 res = count;
4172                 if (scsi_debug_ndelay != ndelay) {
4173                         spin_lock_irqsave(&queued_arr_lock, iflags);
4174                         k = find_first_bit(queued_in_use_bm,
4175                                            scsi_debug_max_queue);
4176                         if (k != scsi_debug_max_queue)
4177                                 res = -EBUSY;   /* have queued commands */
4178                         else {
4179                                 scsi_debug_ndelay = ndelay;
4180                                 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4181                                                           : DEF_DELAY;
4182                         }
4183                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4184                 }
4185                 return res;
4186         }
4187         return -EINVAL;
4188 }
4189 static DRIVER_ATTR_RW(ndelay);
4190
4191 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4192 {
4193         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4194 }
4195
4196 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4197                           size_t count)
4198 {
4199         int opts;
4200         char work[20];
4201
4202         if (1 == sscanf(buf, "%10s", work)) {
4203                 if (0 == strncasecmp(work,"0x", 2)) {
4204                         if (1 == sscanf(&work[2], "%x", &opts))
4205                                 goto opts_done;
4206                 } else {
4207                         if (1 == sscanf(work, "%d", &opts))
4208                                 goto opts_done;
4209                 }
4210         }
4211         return -EINVAL;
4212 opts_done:
4213         scsi_debug_opts = opts;
4214         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4215                 sdebug_any_injecting_opt = true;
4216         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4217                 sdebug_any_injecting_opt = true;
4218         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4219                 sdebug_any_injecting_opt = true;
4220         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4221                 sdebug_any_injecting_opt = true;
4222         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4223                 sdebug_any_injecting_opt = true;
4224         atomic_set(&sdebug_cmnd_count, 0);
4225         atomic_set(&sdebug_a_tsf, 0);
4226         return count;
4227 }
4228 static DRIVER_ATTR_RW(opts);
4229
4230 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4231 {
4232         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4233 }
4234 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4235                            size_t count)
4236 {
4237         int n;
4238
4239         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4240                 scsi_debug_ptype = n;
4241                 return count;
4242         }
4243         return -EINVAL;
4244 }
4245 static DRIVER_ATTR_RW(ptype);
4246
4247 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4248 {
4249         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4250 }
4251 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4252                             size_t count)
4253 {
4254         int n;
4255
4256         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4257                 scsi_debug_dsense = n;
4258                 return count;
4259         }
4260         return -EINVAL;
4261 }
4262 static DRIVER_ATTR_RW(dsense);
4263
4264 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4265 {
4266         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4267 }
4268 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4269                              size_t count)
4270 {
4271         int n;
4272
4273         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4274                 n = (n > 0);
4275                 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4276                 if (scsi_debug_fake_rw != n) {
4277                         if ((0 == n) && (NULL == fake_storep)) {
4278                                 unsigned long sz =
4279                                         (unsigned long)scsi_debug_dev_size_mb *
4280                                         1048576;
4281
4282                                 fake_storep = vmalloc(sz);
4283                                 if (NULL == fake_storep) {
4284                                         pr_err("%s: out of memory, 9\n",
4285                                                __func__);
4286                                         return -ENOMEM;
4287                                 }
4288                                 memset(fake_storep, 0, sz);
4289                         }
4290                         scsi_debug_fake_rw = n;
4291                 }
4292                 return count;
4293         }
4294         return -EINVAL;
4295 }
4296 static DRIVER_ATTR_RW(fake_rw);
4297
4298 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4299 {
4300         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4301 }
4302 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4303                               size_t count)
4304 {
4305         int n;
4306
4307         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4308                 scsi_debug_no_lun_0 = n;
4309                 return count;
4310         }
4311         return -EINVAL;
4312 }
4313 static DRIVER_ATTR_RW(no_lun_0);
4314
4315 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4316 {
4317         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4318 }
4319 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4320                               size_t count)
4321 {
4322         int n;
4323
4324         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4325                 scsi_debug_num_tgts = n;
4326                 sdebug_max_tgts_luns();
4327                 return count;
4328         }
4329         return -EINVAL;
4330 }
4331 static DRIVER_ATTR_RW(num_tgts);
4332
4333 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4334 {
4335         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4336 }
4337 static DRIVER_ATTR_RO(dev_size_mb);
4338
4339 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4340 {
4341         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4342 }
4343 static DRIVER_ATTR_RO(num_parts);
4344
4345 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4346 {
4347         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4348 }
4349 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4350                                size_t count)
4351 {
4352         int nth;
4353
4354         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4355                 scsi_debug_every_nth = nth;
4356                 atomic_set(&sdebug_cmnd_count, 0);
4357                 return count;
4358         }
4359         return -EINVAL;
4360 }
4361 static DRIVER_ATTR_RW(every_nth);
4362
4363 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4364 {
4365         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4366 }
4367 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4368                               size_t count)
4369 {
4370         int n;
4371
4372         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4373                 scsi_debug_max_luns = n;
4374                 sdebug_max_tgts_luns();
4375                 return count;
4376         }
4377         return -EINVAL;
4378 }
4379 static DRIVER_ATTR_RW(max_luns);
4380
4381 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4382 {
4383         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4384 }
4385 /* N.B. max_queue can be changed while there are queued commands. In flight
4386  * commands beyond the new max_queue will be completed. */
4387 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4388                                size_t count)
4389 {
4390         unsigned long iflags;
4391         int n, k;
4392
4393         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4394             (n <= SCSI_DEBUG_CANQUEUE)) {
4395                 spin_lock_irqsave(&queued_arr_lock, iflags);
4396                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4397                 scsi_debug_max_queue = n;
4398                 if (SCSI_DEBUG_CANQUEUE == k)
4399                         atomic_set(&retired_max_queue, 0);
4400                 else if (k >= n)
4401                         atomic_set(&retired_max_queue, k + 1);
4402                 else
4403                         atomic_set(&retired_max_queue, 0);
4404                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4405                 return count;
4406         }
4407         return -EINVAL;
4408 }
4409 static DRIVER_ATTR_RW(max_queue);
4410
4411 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4412 {
4413         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4414 }
4415 static DRIVER_ATTR_RO(no_uld);
4416
4417 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4418 {
4419         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4420 }
4421 static DRIVER_ATTR_RO(scsi_level);
4422
4423 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4424 {
4425         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4426 }
4427 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4428                                 size_t count)
4429 {
4430         int n;
4431         bool changed;
4432
4433         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4434                 changed = (scsi_debug_virtual_gb != n);
4435                 scsi_debug_virtual_gb = n;
4436                 sdebug_capacity = get_sdebug_capacity();
4437                 if (changed) {
4438                         struct sdebug_host_info *sdhp;
4439                         struct sdebug_dev_info *dp;
4440
4441                         list_for_each_entry(sdhp, &sdebug_host_list,
4442                                             host_list) {
4443                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4444                                                     dev_list) {
4445                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4446                                                 dp->uas_bm);
4447                                 }
4448                         }
4449                 }
4450                 return count;
4451         }
4452         return -EINVAL;
4453 }
4454 static DRIVER_ATTR_RW(virtual_gb);
4455
4456 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4457 {
4458         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4459 }
4460
4461 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4462                               size_t count)
4463 {
4464         int delta_hosts;
4465
4466         if (sscanf(buf, "%d", &delta_hosts) != 1)
4467                 return -EINVAL;
4468         if (delta_hosts > 0) {
4469                 do {
4470                         sdebug_add_adapter();
4471                 } while (--delta_hosts);
4472         } else if (delta_hosts < 0) {
4473                 do {
4474                         sdebug_remove_adapter();
4475                 } while (++delta_hosts);
4476         }
4477         return count;
4478 }
4479 static DRIVER_ATTR_RW(add_host);
4480
4481 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4482 {
4483         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4484 }
4485 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4486                                     size_t count)
4487 {
4488         int n;
4489
4490         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4491                 scsi_debug_vpd_use_hostno = n;
4492                 return count;
4493         }
4494         return -EINVAL;
4495 }
4496 static DRIVER_ATTR_RW(vpd_use_hostno);
4497
4498 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4499 {
4500         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4501 }
4502 static DRIVER_ATTR_RO(sector_size);
4503
4504 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4505 {
4506         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4507 }
4508 static DRIVER_ATTR_RO(dix);
4509
4510 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4511 {
4512         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4513 }
4514 static DRIVER_ATTR_RO(dif);
4515
4516 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4517 {
4518         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4519 }
4520 static DRIVER_ATTR_RO(guard);
4521
4522 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4523 {
4524         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4525 }
4526 static DRIVER_ATTR_RO(ato);
4527
4528 static ssize_t map_show(struct device_driver *ddp, char *buf)
4529 {
4530         ssize_t count;
4531
4532         if (!scsi_debug_lbp())
4533                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4534                                  sdebug_store_sectors);
4535
4536         count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
4537
4538         buf[count++] = '\n';
4539         buf[count++] = 0;
4540
4541         return count;
4542 }
4543 static DRIVER_ATTR_RO(map);
4544
4545 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4546 {
4547         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4548 }
4549 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4550                                size_t count)
4551 {
4552         int n;
4553
4554         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4555                 scsi_debug_removable = (n > 0);
4556                 return count;
4557         }
4558         return -EINVAL;
4559 }
4560 static DRIVER_ATTR_RW(removable);
4561
4562 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4563 {
4564         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4565 }
4566 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4567 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4568                                size_t count)
4569 {
4570         int n, res;
4571
4572         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4573                 bool new_host_lock = (n > 0);
4574
4575                 res = count;
4576                 if (new_host_lock != scsi_debug_host_lock) {
4577                         unsigned long iflags;
4578                         int k;
4579
4580                         spin_lock_irqsave(&queued_arr_lock, iflags);
4581                         k = find_first_bit(queued_in_use_bm,
4582                                            scsi_debug_max_queue);
4583                         if (k != scsi_debug_max_queue)
4584                                 res = -EBUSY;   /* have queued commands */
4585                         else
4586                                 scsi_debug_host_lock = new_host_lock;
4587                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4588                 }
4589                 return res;
4590         }
4591         return -EINVAL;
4592 }
4593 static DRIVER_ATTR_RW(host_lock);
4594
4595 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4596 {
4597         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4598 }
4599 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4600                             size_t count)
4601 {
4602         int n;
4603
4604         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4605                 scsi_debug_strict = (n > 0);
4606                 return count;
4607         }
4608         return -EINVAL;
4609 }
4610 static DRIVER_ATTR_RW(strict);
4611
4612
4613 /* Note: The following array creates attribute files in the
4614    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4615    files (over those found in the /sys/module/scsi_debug/parameters
4616    directory) is that auxiliary actions can be triggered when an attribute
4617    is changed. For example see: sdebug_add_host_store() above.
4618  */
4619
4620 static struct attribute *sdebug_drv_attrs[] = {
4621         &driver_attr_delay.attr,
4622         &driver_attr_opts.attr,
4623         &driver_attr_ptype.attr,
4624         &driver_attr_dsense.attr,
4625         &driver_attr_fake_rw.attr,
4626         &driver_attr_no_lun_0.attr,
4627         &driver_attr_num_tgts.attr,
4628         &driver_attr_dev_size_mb.attr,
4629         &driver_attr_num_parts.attr,
4630         &driver_attr_every_nth.attr,
4631         &driver_attr_max_luns.attr,
4632         &driver_attr_max_queue.attr,
4633         &driver_attr_no_uld.attr,
4634         &driver_attr_scsi_level.attr,
4635         &driver_attr_virtual_gb.attr,
4636         &driver_attr_add_host.attr,
4637         &driver_attr_vpd_use_hostno.attr,
4638         &driver_attr_sector_size.attr,
4639         &driver_attr_dix.attr,
4640         &driver_attr_dif.attr,
4641         &driver_attr_guard.attr,
4642         &driver_attr_ato.attr,
4643         &driver_attr_map.attr,
4644         &driver_attr_removable.attr,
4645         &driver_attr_host_lock.attr,
4646         &driver_attr_ndelay.attr,
4647         &driver_attr_strict.attr,
4648         NULL,
4649 };
4650 ATTRIBUTE_GROUPS(sdebug_drv);
4651
4652 static struct device *pseudo_primary;
4653
4654 static int __init scsi_debug_init(void)
4655 {
4656         unsigned long sz;
4657         int host_to_add;
4658         int k;
4659         int ret;
4660
4661         atomic_set(&sdebug_cmnd_count, 0);
4662         atomic_set(&sdebug_completions, 0);
4663         atomic_set(&retired_max_queue, 0);
4664
4665         if (scsi_debug_ndelay >= 1000000000) {
4666                 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
4667                         __func__);
4668                 scsi_debug_ndelay = 0;
4669         } else if (scsi_debug_ndelay > 0)
4670                 scsi_debug_delay = DELAY_OVERRIDDEN;
4671
4672         switch (scsi_debug_sector_size) {
4673         case  512:
4674         case 1024:
4675         case 2048:
4676         case 4096:
4677                 break;
4678         default:
4679                 pr_err("%s: invalid sector_size %d\n", __func__,
4680                        scsi_debug_sector_size);
4681                 return -EINVAL;
4682         }
4683
4684         switch (scsi_debug_dif) {
4685
4686         case SD_DIF_TYPE0_PROTECTION:
4687         case SD_DIF_TYPE1_PROTECTION:
4688         case SD_DIF_TYPE2_PROTECTION:
4689         case SD_DIF_TYPE3_PROTECTION:
4690                 break;
4691
4692         default:
4693                 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
4694                 return -EINVAL;
4695         }
4696
4697         if (scsi_debug_guard > 1) {
4698                 pr_err("%s: guard must be 0 or 1\n", __func__);
4699                 return -EINVAL;
4700         }
4701
4702         if (scsi_debug_ato > 1) {
4703                 pr_err("%s: ato must be 0 or 1\n", __func__);
4704                 return -EINVAL;
4705         }
4706
4707         if (scsi_debug_physblk_exp > 15) {
4708                 pr_err("%s: invalid physblk_exp %u\n", __func__,
4709                        scsi_debug_physblk_exp);
4710                 return -EINVAL;
4711         }
4712
4713         if (scsi_debug_lowest_aligned > 0x3fff) {
4714                 pr_err("%s: lowest_aligned too big: %u\n", __func__,
4715                        scsi_debug_lowest_aligned);
4716                 return -EINVAL;
4717         }
4718
4719         if (scsi_debug_dev_size_mb < 1)
4720                 scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4721         sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4722         sdebug_store_sectors = sz / scsi_debug_sector_size;
4723         sdebug_capacity = get_sdebug_capacity();
4724
4725         /* play around with geometry, don't waste too much on track 0 */
4726         sdebug_heads = 8;
4727         sdebug_sectors_per = 32;
4728         if (scsi_debug_dev_size_mb >= 16)
4729                 sdebug_heads = 32;
4730         else if (scsi_debug_dev_size_mb >= 256)
4731                 sdebug_heads = 64;
4732         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4733                                (sdebug_sectors_per * sdebug_heads);
4734         if (sdebug_cylinders_per >= 1024) {
4735                 /* other LLDs do this; implies >= 1GB ram disk ... */
4736                 sdebug_heads = 255;
4737                 sdebug_sectors_per = 63;
4738                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4739                                (sdebug_sectors_per * sdebug_heads);
4740         }
4741
4742         if (0 == scsi_debug_fake_rw) {
4743                 fake_storep = vmalloc(sz);
4744                 if (NULL == fake_storep) {
4745                         pr_err("%s: out of memory, 1\n", __func__);
4746                         return -ENOMEM;
4747                 }
4748                 memset(fake_storep, 0, sz);
4749                 if (scsi_debug_num_parts > 0)
4750                         sdebug_build_parts(fake_storep, sz);
4751         }
4752
4753         if (scsi_debug_dix) {
4754                 int dif_size;
4755
4756                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4757                 dif_storep = vmalloc(dif_size);
4758
4759                 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
4760                         dif_storep);
4761
4762                 if (dif_storep == NULL) {
4763                         pr_err("%s: out of mem. (DIX)\n", __func__);
4764                         ret = -ENOMEM;
4765                         goto free_vm;
4766                 }
4767
4768                 memset(dif_storep, 0xff, dif_size);
4769         }
4770
4771         /* Logical Block Provisioning */
4772         if (scsi_debug_lbp()) {
4773                 scsi_debug_unmap_max_blocks =
4774                         clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4775
4776                 scsi_debug_unmap_max_desc =
4777                         clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4778
4779                 scsi_debug_unmap_granularity =
4780                         clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4781
4782                 if (scsi_debug_unmap_alignment &&
4783                     scsi_debug_unmap_granularity <=
4784                     scsi_debug_unmap_alignment) {
4785                         pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
4786                                __func__);
4787                         return -EINVAL;
4788                 }
4789
4790                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4791                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4792
4793                 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
4794
4795                 if (map_storep == NULL) {
4796                         pr_err("%s: out of mem. (MAP)\n", __func__);
4797                         ret = -ENOMEM;
4798                         goto free_vm;
4799                 }
4800
4801                 bitmap_zero(map_storep, map_size);
4802
4803                 /* Map first 1KB for partition table */
4804                 if (scsi_debug_num_parts)
4805                         map_region(0, 2);
4806         }
4807
4808         pseudo_primary = root_device_register("pseudo_0");
4809         if (IS_ERR(pseudo_primary)) {
4810                 pr_warn("%s: root_device_register() error\n", __func__);
4811                 ret = PTR_ERR(pseudo_primary);
4812                 goto free_vm;
4813         }
4814         ret = bus_register(&pseudo_lld_bus);
4815         if (ret < 0) {
4816                 pr_warn("%s: bus_register error: %d\n", __func__, ret);
4817                 goto dev_unreg;
4818         }
4819         ret = driver_register(&sdebug_driverfs_driver);
4820         if (ret < 0) {
4821                 pr_warn("%s: driver_register error: %d\n", __func__, ret);
4822                 goto bus_unreg;
4823         }
4824
4825         host_to_add = scsi_debug_add_host;
4826         scsi_debug_add_host = 0;
4827
4828         for (k = 0; k < host_to_add; k++) {
4829                 if (sdebug_add_adapter()) {
4830                         pr_err("%s: sdebug_add_adapter failed k=%d\n",
4831                                 __func__, k);
4832                         break;
4833                 }
4834         }
4835
4836         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4837                 pr_info("%s: built %d host(s)\n", __func__,
4838                         scsi_debug_add_host);
4839         }
4840         return 0;
4841
4842 bus_unreg:
4843         bus_unregister(&pseudo_lld_bus);
4844 dev_unreg:
4845         root_device_unregister(pseudo_primary);
4846 free_vm:
4847         if (map_storep)
4848                 vfree(map_storep);
4849         if (dif_storep)
4850                 vfree(dif_storep);
4851         vfree(fake_storep);
4852
4853         return ret;
4854 }
4855
4856 static void __exit scsi_debug_exit(void)
4857 {
4858         int k = scsi_debug_add_host;
4859
4860         stop_all_queued();
4861         free_all_queued();
4862         for (; k; k--)
4863                 sdebug_remove_adapter();
4864         driver_unregister(&sdebug_driverfs_driver);
4865         bus_unregister(&pseudo_lld_bus);
4866         root_device_unregister(pseudo_primary);
4867
4868         if (dif_storep)
4869                 vfree(dif_storep);
4870
4871         vfree(fake_storep);
4872 }
4873
4874 device_initcall(scsi_debug_init);
4875 module_exit(scsi_debug_exit);
4876
4877 static void sdebug_release_adapter(struct device * dev)
4878 {
4879         struct sdebug_host_info *sdbg_host;
4880
4881         sdbg_host = to_sdebug_host(dev);
4882         kfree(sdbg_host);
4883 }
4884
4885 static int sdebug_add_adapter(void)
4886 {
4887         int k, devs_per_host;
4888         int error = 0;
4889         struct sdebug_host_info *sdbg_host;
4890         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4891
4892         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4893         if (NULL == sdbg_host) {
4894                 printk(KERN_ERR "%s: out of memory at line %d\n",
4895                        __func__, __LINE__);
4896                 return -ENOMEM;
4897         }
4898
4899         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4900
4901         devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4902         for (k = 0; k < devs_per_host; k++) {
4903                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4904                 if (!sdbg_devinfo) {
4905                         printk(KERN_ERR "%s: out of memory at line %d\n",
4906                                __func__, __LINE__);
4907                         error = -ENOMEM;
4908                         goto clean;
4909                 }
4910         }
4911
4912         spin_lock(&sdebug_host_list_lock);
4913         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4914         spin_unlock(&sdebug_host_list_lock);
4915
4916         sdbg_host->dev.bus = &pseudo_lld_bus;
4917         sdbg_host->dev.parent = pseudo_primary;
4918         sdbg_host->dev.release = &sdebug_release_adapter;
4919         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4920
4921         error = device_register(&sdbg_host->dev);
4922
4923         if (error)
4924                 goto clean;
4925
4926         ++scsi_debug_add_host;
4927         return error;
4928
4929 clean:
4930         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4931                                  dev_list) {
4932                 list_del(&sdbg_devinfo->dev_list);
4933                 kfree(sdbg_devinfo);
4934         }
4935
4936         kfree(sdbg_host);
4937         return error;
4938 }
4939
4940 static void sdebug_remove_adapter(void)
4941 {
4942         struct sdebug_host_info * sdbg_host = NULL;
4943
4944         spin_lock(&sdebug_host_list_lock);
4945         if (!list_empty(&sdebug_host_list)) {
4946                 sdbg_host = list_entry(sdebug_host_list.prev,
4947                                        struct sdebug_host_info, host_list);
4948                 list_del(&sdbg_host->host_list);
4949         }
4950         spin_unlock(&sdebug_host_list_lock);
4951
4952         if (!sdbg_host)
4953                 return;
4954
4955         device_unregister(&sdbg_host->dev);
4956         --scsi_debug_add_host;
4957 }
4958
4959 static int
4960 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4961 {
4962         int num_in_q = 0;
4963         unsigned long iflags;
4964         struct sdebug_dev_info *devip;
4965
4966         spin_lock_irqsave(&queued_arr_lock, iflags);
4967         devip = (struct sdebug_dev_info *)sdev->hostdata;
4968         if (NULL == devip) {
4969                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4970                 return  -ENODEV;
4971         }
4972         num_in_q = atomic_read(&devip->num_in_q);
4973         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4974
4975         if (qdepth < 1)
4976                 qdepth = 1;
4977         /* allow to exceed max host queued_arr elements for testing */
4978         if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4979                 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4980         scsi_change_queue_depth(sdev, qdepth);
4981
4982         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4983                 sdev_printk(KERN_INFO, sdev,
4984                             "%s: qdepth=%d, num_in_q=%d\n",
4985                             __func__, qdepth, num_in_q);
4986         }
4987         return sdev->queue_depth;
4988 }
4989
4990 static int
4991 check_inject(struct scsi_cmnd *scp)
4992 {
4993         struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
4994
4995         memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
4996
4997         if (atomic_inc_return(&sdebug_cmnd_count) >=
4998             abs(scsi_debug_every_nth)) {
4999                 atomic_set(&sdebug_cmnd_count, 0);
5000                 if (scsi_debug_every_nth < -1)
5001                         scsi_debug_every_nth = -1;
5002                 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5003                         return 1; /* ignore command causing timeout */
5004                 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5005                          scsi_medium_access_command(scp))
5006                         return 1; /* time out reads and writes */
5007                 if (sdebug_any_injecting_opt) {
5008                         int opts = scsi_debug_opts;
5009
5010                         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5011                                 ep->inj_recovered = true;
5012                         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5013                                 ep->inj_transport = true;
5014                         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5015                                 ep->inj_dif = true;
5016                         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5017                                 ep->inj_dix = true;
5018                         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5019                                 ep->inj_short = true;
5020                 }
5021         }
5022         return 0;
5023 }
5024
5025 static int
5026 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5027 {
5028         u8 sdeb_i;
5029         struct scsi_device *sdp = scp->device;
5030         const struct opcode_info_t *oip;
5031         const struct opcode_info_t *r_oip;
5032         struct sdebug_dev_info *devip;
5033         u8 *cmd = scp->cmnd;
5034         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5035         int k, na;
5036         int errsts = 0;
5037         int errsts_no_connect = DID_NO_CONNECT << 16;
5038         u32 flags;
5039         u16 sa;
5040         u8 opcode = cmd[0];
5041         bool has_wlun_rl;
5042         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5043
5044         scsi_set_resid(scp, 0);
5045         if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5046                 char b[120];
5047                 int n, len, sb;
5048
5049                 len = scp->cmd_len;
5050                 sb = (int)sizeof(b);
5051                 if (len > 32)
5052                         strcpy(b, "too long, over 32 bytes");
5053                 else {
5054                         for (k = 0, n = 0; k < len && n < sb; ++k)
5055                                 n += scnprintf(b + n, sb - n, "%02x ",
5056                                                (u32)cmd[k]);
5057                 }
5058                 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5059         }
5060         has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5061         if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5062                 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5063
5064         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5065         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5066         devip = (struct sdebug_dev_info *)sdp->hostdata;
5067         if (!devip) {
5068                 devip = devInfoReg(sdp);
5069                 if (NULL == devip)
5070                         return schedule_resp(scp, NULL, errsts_no_connect, 0);
5071         }
5072         na = oip->num_attached;
5073         r_pfp = oip->pfp;
5074         if (na) {       /* multiple commands with this opcode */
5075                 r_oip = oip;
5076                 if (FF_SA & r_oip->flags) {
5077                         if (F_SA_LOW & oip->flags)
5078                                 sa = 0x1f & cmd[1];
5079                         else
5080                                 sa = get_unaligned_be16(cmd + 8);
5081                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5082                                 if (opcode == oip->opcode && sa == oip->sa)
5083                                         break;
5084                         }
5085                 } else {   /* since no service action only check opcode */
5086                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5087                                 if (opcode == oip->opcode)
5088                                         break;
5089                         }
5090                 }
5091                 if (k > na) {
5092                         if (F_SA_LOW & r_oip->flags)
5093                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5094                         else if (F_SA_HIGH & r_oip->flags)
5095                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5096                         else
5097                                 mk_sense_invalid_opcode(scp);
5098                         goto check_cond;
5099                 }
5100         }       /* else (when na==0) we assume the oip is a match */
5101         flags = oip->flags;
5102         if (F_INV_OP & flags) {
5103                 mk_sense_invalid_opcode(scp);
5104                 goto check_cond;
5105         }
5106         if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5107                 if (debug)
5108                         sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5109                                     "0x%x not supported for wlun\n", opcode);
5110                 mk_sense_invalid_opcode(scp);
5111                 goto check_cond;
5112         }
5113         if (scsi_debug_strict) {        /* check cdb against mask */
5114                 u8 rem;
5115                 int j;
5116
5117                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5118                         rem = ~oip->len_mask[k] & cmd[k];
5119                         if (rem) {
5120                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5121                                         if (0x80 & rem)
5122                                                 break;
5123                                 }
5124                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5125                                 goto check_cond;
5126                         }
5127                 }
5128         }
5129         if (!(F_SKIP_UA & flags) &&
5130             SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5131                 errsts = check_readiness(scp, UAS_ONLY, devip);
5132                 if (errsts)
5133                         goto check_cond;
5134         }
5135         if ((F_M_ACCESS & flags) && devip->stopped) {
5136                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5137                 if (debug)
5138                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5139                                     "%s\n", my_name, "initializing command "
5140                                     "required");
5141                 errsts = check_condition_result;
5142                 goto fini;
5143         }
5144         if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5145                 goto fini;
5146         if (scsi_debug_every_nth) {
5147                 if (check_inject(scp))
5148                         return 0;       /* ignore command: make trouble */
5149         }
5150         if (oip->pfp)   /* if this command has a resp_* function, call it */
5151                 errsts = oip->pfp(scp, devip);
5152         else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5153                 errsts = r_pfp(scp, devip);
5154
5155 fini:
5156         return schedule_resp(scp, devip, errsts,
5157                              ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5158 check_cond:
5159         return schedule_resp(scp, devip, check_condition_result, 0);
5160 }
5161
5162 static int
5163 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5164 {
5165         if (scsi_debug_host_lock) {
5166                 unsigned long iflags;
5167                 int rc;
5168
5169                 spin_lock_irqsave(shost->host_lock, iflags);
5170                 rc = scsi_debug_queuecommand(cmd);
5171                 spin_unlock_irqrestore(shost->host_lock, iflags);
5172                 return rc;
5173         } else
5174                 return scsi_debug_queuecommand(cmd);
5175 }
5176
5177 static struct scsi_host_template sdebug_driver_template = {
5178         .show_info =            scsi_debug_show_info,
5179         .write_info =           scsi_debug_write_info,
5180         .proc_name =            sdebug_proc_name,
5181         .name =                 "SCSI DEBUG",
5182         .info =                 scsi_debug_info,
5183         .slave_alloc =          scsi_debug_slave_alloc,
5184         .slave_configure =      scsi_debug_slave_configure,
5185         .slave_destroy =        scsi_debug_slave_destroy,
5186         .ioctl =                scsi_debug_ioctl,
5187         .queuecommand =         sdebug_queuecommand_lock_or_not,
5188         .change_queue_depth =   sdebug_change_qdepth,
5189         .eh_abort_handler =     scsi_debug_abort,
5190         .eh_device_reset_handler = scsi_debug_device_reset,
5191         .eh_target_reset_handler = scsi_debug_target_reset,
5192         .eh_bus_reset_handler = scsi_debug_bus_reset,
5193         .eh_host_reset_handler = scsi_debug_host_reset,
5194         .can_queue =            SCSI_DEBUG_CANQUEUE,
5195         .this_id =              7,
5196         .sg_tablesize =         SCSI_MAX_SG_CHAIN_SEGMENTS,
5197         .cmd_per_lun =          DEF_CMD_PER_LUN,
5198         .max_sectors =          -1U,
5199         .use_clustering =       DISABLE_CLUSTERING,
5200         .module =               THIS_MODULE,
5201         .track_queue_depth =    1,
5202         .cmd_size =             sizeof(struct sdebug_scmd_extra_t),
5203 };
5204
5205 static int sdebug_driver_probe(struct device * dev)
5206 {
5207         int error = 0;
5208         int opts;
5209         struct sdebug_host_info *sdbg_host;
5210         struct Scsi_Host *hpnt;
5211         int host_prot;
5212
5213         sdbg_host = to_sdebug_host(dev);
5214
5215         sdebug_driver_template.can_queue = scsi_debug_max_queue;
5216         if (scsi_debug_clustering)
5217                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5218         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5219         if (NULL == hpnt) {
5220                 pr_err("%s: scsi_host_alloc failed\n", __func__);
5221                 error = -ENODEV;
5222                 return error;
5223         }
5224
5225         sdbg_host->shost = hpnt;
5226         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5227         if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5228                 hpnt->max_id = scsi_debug_num_tgts + 1;
5229         else
5230                 hpnt->max_id = scsi_debug_num_tgts;
5231         hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;  /* = scsi_debug_max_luns; */
5232
5233         host_prot = 0;
5234
5235         switch (scsi_debug_dif) {
5236
5237         case SD_DIF_TYPE1_PROTECTION:
5238                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5239                 if (scsi_debug_dix)
5240                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5241                 break;
5242
5243         case SD_DIF_TYPE2_PROTECTION:
5244                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5245                 if (scsi_debug_dix)
5246                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5247                 break;
5248
5249         case SD_DIF_TYPE3_PROTECTION:
5250                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5251                 if (scsi_debug_dix)
5252                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5253                 break;
5254
5255         default:
5256                 if (scsi_debug_dix)
5257                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5258                 break;
5259         }
5260
5261         scsi_host_set_prot(hpnt, host_prot);
5262
5263         printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
5264                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5265                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5266                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5267                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5268                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5269                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5270                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5271
5272         if (scsi_debug_guard == 1)
5273                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5274         else
5275                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5276
5277         opts = scsi_debug_opts;
5278         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5279                 sdebug_any_injecting_opt = true;
5280         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5281                 sdebug_any_injecting_opt = true;
5282         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5283                 sdebug_any_injecting_opt = true;
5284         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5285                 sdebug_any_injecting_opt = true;
5286         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5287                 sdebug_any_injecting_opt = true;
5288
5289         error = scsi_add_host(hpnt, &sdbg_host->dev);
5290         if (error) {
5291                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
5292                 error = -ENODEV;
5293                 scsi_host_put(hpnt);
5294         } else
5295                 scsi_scan_host(hpnt);
5296
5297         return error;
5298 }
5299
5300 static int sdebug_driver_remove(struct device * dev)
5301 {
5302         struct sdebug_host_info *sdbg_host;
5303         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5304
5305         sdbg_host = to_sdebug_host(dev);
5306
5307         if (!sdbg_host) {
5308                 printk(KERN_ERR "%s: Unable to locate host info\n",
5309                        __func__);
5310                 return -ENODEV;
5311         }
5312
5313         scsi_remove_host(sdbg_host->shost);
5314
5315         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5316                                  dev_list) {
5317                 list_del(&sdbg_devinfo->dev_list);
5318                 kfree(sdbg_devinfo);
5319         }
5320
5321         scsi_host_put(sdbg_host->shost);
5322         return 0;
5323 }
5324
5325 static int pseudo_lld_bus_match(struct device *dev,
5326                                 struct device_driver *dev_driver)
5327 {
5328         return 1;
5329 }
5330
5331 static struct bus_type pseudo_lld_bus = {
5332         .name = "pseudo",
5333         .match = pseudo_lld_bus_match,
5334         .probe = sdebug_driver_probe,
5335         .remove = sdebug_driver_remove,
5336         .drv_groups = sdebug_drv_groups,
5337 };