Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
49
50 #include <net/checksum.h>
51
52 #include <asm/unaligned.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
62
63 #include "sd.h"
64 #include "scsi_logging.h"
65
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
68
69 #define MY_NAME "scsi_debug"
70
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97
98
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST   1
101 #define DEF_NUM_TGTS   1
102 #define DEF_MAX_LUNS   1
103 /* With these defaults, this driver will make 1 host with 1 target
104  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105  */
106 #define DEF_ATO 1
107 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW     0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 64
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE   0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
132 #define DEF_UNMAP_ALIGNMENT 0
133 #define DEF_UNMAP_GRANULARITY 1
134 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
135 #define DEF_UNMAP_MAX_DESC 256
136 #define DEF_VIRTUAL_GB   0
137 #define DEF_VPD_USE_HOSTNO 1
138 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DEF_STRICT 0
140 #define DELAY_OVERRIDDEN -9999
141
142 /* bit mask values for scsi_debug_opts */
143 #define SCSI_DEBUG_OPT_NOISE   1
144 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
145 #define SCSI_DEBUG_OPT_TIMEOUT   4
146 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
147 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
148 #define SCSI_DEBUG_OPT_DIF_ERR   32
149 #define SCSI_DEBUG_OPT_DIX_ERR   64
150 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
151 #define SCSI_DEBUG_OPT_SHORT_TRANSFER   0x100
152 #define SCSI_DEBUG_OPT_Q_NOISE  0x200
153 #define SCSI_DEBUG_OPT_ALL_TSF  0x400
154 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
155 #define SCSI_DEBUG_OPT_N_WCE    0x1000
156 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
157 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
158 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
159 /* When "every_nth" > 0 then modulo "every_nth" commands:
160  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
161  *   - a RECOVERED_ERROR is simulated on successful read and write
162  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
163  *   - a TRANSPORT_ERROR is simulated on successful read and write
164  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
165  *
166  * When "every_nth" < 0 then after "- every_nth" commands:
167  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
168  *   - a RECOVERED_ERROR is simulated on successful read and write
169  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
170  *   - a TRANSPORT_ERROR is simulated on successful read and write
171  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
172  * This will continue until some other action occurs (e.g. the user
173  * writing a new value (other than -1 or 1) to every_nth via sysfs).
174  */
175
176 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
177  * priority order. In the subset implemented here lower numbers have higher
178  * priority. The UA numbers should be a sequence starting from 0 with
179  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
180 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
181 #define SDEBUG_UA_BUS_RESET 1
182 #define SDEBUG_UA_MODE_CHANGED 2
183 #define SDEBUG_UA_CAPACITY_CHANGED 3
184 #define SDEBUG_NUM_UAS 4
185
186 /* for check_readiness() */
187 #define UAS_ONLY 1      /* check for UAs only */
188 #define UAS_TUR 0       /* if no UAs then check if media access possible */
189
190 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
191  * sector on read commands: */
192 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
193 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
194
195 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
196  * or "peripheral device" addressing (value 0) */
197 #define SAM2_LUN_ADDRESS_METHOD 0
198 #define SAM2_WLUN_REPORT_LUNS 0xc101
199
200 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
201  * (for response) at one time. Can be reduced by max_queue option. Command
202  * responses are not queued when delay=0 and ndelay=0. The per-device
203  * DEF_CMD_PER_LUN can be changed via sysfs:
204  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
205  * SCSI_DEBUG_CANQUEUE. */
206 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
207 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
208 #define DEF_CMD_PER_LUN  255
209
210 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
211 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
212 #endif
213
214 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
215 enum sdeb_opcode_index {
216         SDEB_I_INVALID_OPCODE = 0,
217         SDEB_I_INQUIRY = 1,
218         SDEB_I_REPORT_LUNS = 2,
219         SDEB_I_REQUEST_SENSE = 3,
220         SDEB_I_TEST_UNIT_READY = 4,
221         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
222         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
223         SDEB_I_LOG_SENSE = 7,
224         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
225         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
226         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
227         SDEB_I_START_STOP = 11,
228         SDEB_I_SERV_ACT_IN = 12,        /* 12, 16 */
229         SDEB_I_SERV_ACT_OUT = 13,       /* 12, 16 */
230         SDEB_I_MAINT_IN = 14,
231         SDEB_I_MAINT_OUT = 15,
232         SDEB_I_VERIFY = 16,             /* 10 only */
233         SDEB_I_VARIABLE_LEN = 17,
234         SDEB_I_RESERVE = 18,            /* 6, 10 */
235         SDEB_I_RELEASE = 19,            /* 6, 10 */
236         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
237         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
238         SDEB_I_ATA_PT = 22,             /* 12, 16 */
239         SDEB_I_SEND_DIAG = 23,
240         SDEB_I_UNMAP = 24,
241         SDEB_I_XDWRITEREAD = 25,        /* 10 only */
242         SDEB_I_WRITE_BUFFER = 26,
243         SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
244         SDEB_I_SYNC_CACHE = 28,         /* 10 only */
245         SDEB_I_COMP_WRITE = 29,
246         SDEB_I_LAST_ELEMENT = 30,       /* keep this last */
247 };
248
249 static const unsigned char opcode_ind_arr[256] = {
250 /* 0x0; 0x0->0x1f: 6 byte cdbs */
251         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
252             0, 0, 0, 0,
253         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
254         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
255             SDEB_I_RELEASE,
256         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
257             SDEB_I_ALLOW_REMOVAL, 0,
258 /* 0x20; 0x20->0x3f: 10 byte cdbs */
259         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
260         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
261         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
262         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
263 /* 0x40; 0x40->0x5f: 10 byte cdbs */
264         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
265         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
266         0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
267             SDEB_I_RELEASE,
268         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
269 /* 0x60; 0x60->0x7d are reserved */
270         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272         0, SDEB_I_VARIABLE_LEN,
273 /* 0x80; 0x80->0x9f: 16 byte cdbs */
274         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
275         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
276         0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
277         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
278 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
279         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
280              SDEB_I_MAINT_OUT, 0, 0, 0,
281         SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
282              0, 0, 0, 0,
283         0, 0, 0, 0, 0, 0, 0, 0,
284         0, 0, 0, 0, 0, 0, 0, 0,
285 /* 0xc0; 0xc0->0xff: vendor specific */
286         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
287         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
288         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
289         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
290 };
291
292 #define F_D_IN                  1
293 #define F_D_OUT                 2
294 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
295 #define F_D_UNKN                8
296 #define F_RL_WLUN_OK            0x10
297 #define F_SKIP_UA               0x20
298 #define F_DELAY_OVERR           0x40
299 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
300 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
301 #define F_INV_OP                0x200
302 #define F_FAKE_RW               0x400
303 #define F_M_ACCESS              0x800   /* media access */
304
305 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
306 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
307 #define FF_SA (F_SA_HIGH | F_SA_LOW)
308
309 struct sdebug_dev_info;
310 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
311 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
312 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
313 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
314 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
315 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
316 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
317 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
329 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
330
331 struct opcode_info_t {
332         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff
333                                  * for terminating element */
334         u8 opcode;              /* if num_attached > 0, preferred */
335         u16 sa;                 /* service action */
336         u32 flags;              /* OR-ed set of SDEB_F_* */
337         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
338         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
339         u8 len_mask[16];        /* len=len_mask[0], then mask for cdb[1]... */
340                                 /* ignore cdb bytes after position 15 */
341 };
342
343 static const struct opcode_info_t msense_iarr[1] = {
344         {0, 0x1a, 0, F_D_IN, NULL, NULL,
345             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
346 };
347
348 static const struct opcode_info_t mselect_iarr[1] = {
349         {0, 0x15, 0, F_D_OUT, NULL, NULL,
350             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
351 };
352
353 static const struct opcode_info_t read_iarr[3] = {
354         {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
355             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
356              0, 0, 0, 0} },
357         {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
358             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
359         {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
360             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
361              0xc7, 0, 0, 0, 0} },
362 };
363
364 static const struct opcode_info_t write_iarr[3] = {
365         {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
366             {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
367              0, 0, 0, 0} },
368         {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
369             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
370         {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
371             {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
372              0xc7, 0, 0, 0, 0} },
373 };
374
375 static const struct opcode_info_t sa_in_iarr[1] = {
376         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
377             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
378              0xff, 0xff, 0xff, 0, 0xc7} },
379 };
380
381 static const struct opcode_info_t vl_iarr[1] = {        /* VARIABLE LENGTH */
382         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
383             NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
384                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
385 };
386
387 static const struct opcode_info_t maint_in_iarr[2] = {
388         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
389             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
390              0xc7, 0, 0, 0, 0} },
391         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
392             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
393              0, 0} },
394 };
395
396 static const struct opcode_info_t write_same_iarr[1] = {
397         {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
398             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
399              0xff, 0xff, 0xff, 0x1f, 0xc7} },
400 };
401
402 static const struct opcode_info_t reserve_iarr[1] = {
403         {0, 0x16, 0, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
404             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
405 };
406
407 static const struct opcode_info_t release_iarr[1] = {
408         {0, 0x17, 0, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
409             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
410 };
411
412
413 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
414  * plus the terminating elements for logic that scans this table such as
415  * REPORT SUPPORTED OPERATION CODES. */
416 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
417 /* 0 */
418         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
419             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
420         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
421             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
422         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
423             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
424              0, 0} },
425         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
426             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
427         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
428             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
429         {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
430             {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
431              0} },
432         {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
433             {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
434         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
435             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
436              0, 0, 0} },
437         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
438             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
439              0, 0} },
440         {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
441             {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
442              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* READ(16) */
443 /* 10 */
444         {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
445             {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
446              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* WRITE(16) */
447         {0, 0x1b, 0, 0, resp_start_stop, NULL,          /* START STOP UNIT */
448             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
449         {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
450             {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
451              0xff, 0xff, 0xff, 0x1, 0xc7} },    /* READ CAPACITY(16) */
452         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
453             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
454         {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
455             {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
456              0} },
457         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
458             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
459         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
460             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
461         {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
462             vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
463                       0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
464         {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
465             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
466              0} },
467         {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
468             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
469              0} },
470 /* 20 */
471         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
472             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
473         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
474             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
475         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
476             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
477         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
478             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
479         {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
480             {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
481         {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
482             NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
483                    0, 0, 0, 0, 0, 0} },
484         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
485             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486         {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
487             write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
488                               0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
489         {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
490             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
491              0, 0, 0, 0} },
492         {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
493             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
494              0, 0xff, 0x1f, 0xc7} },            /* COMPARE AND WRITE */
495
496 /* 30 */
497         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
498             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
499 };
500
501 struct sdebug_scmd_extra_t {
502         bool inj_recovered;
503         bool inj_transport;
504         bool inj_dif;
505         bool inj_dix;
506         bool inj_short;
507 };
508
509 static int scsi_debug_add_host = DEF_NUM_HOST;
510 static int scsi_debug_ato = DEF_ATO;
511 static int scsi_debug_delay = DEF_DELAY;
512 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
513 static int scsi_debug_dif = DEF_DIF;
514 static int scsi_debug_dix = DEF_DIX;
515 static int scsi_debug_dsense = DEF_D_SENSE;
516 static int scsi_debug_every_nth = DEF_EVERY_NTH;
517 static int scsi_debug_fake_rw = DEF_FAKE_RW;
518 static unsigned int scsi_debug_guard = DEF_GUARD;
519 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
520 static int scsi_debug_max_luns = DEF_MAX_LUNS;
521 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
522 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
523 static int scsi_debug_ndelay = DEF_NDELAY;
524 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
525 static int scsi_debug_no_uld = 0;
526 static int scsi_debug_num_parts = DEF_NUM_PARTS;
527 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
528 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
529 static int scsi_debug_opts = DEF_OPTS;
530 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
531 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
532 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
533 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
534 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
535 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
536 static unsigned int scsi_debug_lbpu = DEF_LBPU;
537 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
538 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
539 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
540 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
541 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
542 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
543 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
544 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
545 static bool scsi_debug_removable = DEF_REMOVABLE;
546 static bool scsi_debug_clustering;
547 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
548 static bool scsi_debug_strict = DEF_STRICT;
549 static bool sdebug_any_injecting_opt;
550
551 static atomic_t sdebug_cmnd_count;
552 static atomic_t sdebug_completions;
553 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
554
555 #define DEV_READONLY(TGT)      (0)
556
557 static unsigned int sdebug_store_sectors;
558 static sector_t sdebug_capacity;        /* in sectors */
559
560 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
561    may still need them */
562 static int sdebug_heads;                /* heads per disk */
563 static int sdebug_cylinders_per;        /* cylinders per surface */
564 static int sdebug_sectors_per;          /* sectors per cylinder */
565
566 #define SDEBUG_MAX_PARTS 4
567
568 #define SCSI_DEBUG_MAX_CMD_LEN 32
569
570 static unsigned int scsi_debug_lbp(void)
571 {
572         return ((0 == scsi_debug_fake_rw) &&
573                 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
574 }
575
576 struct sdebug_dev_info {
577         struct list_head dev_list;
578         unsigned int channel;
579         unsigned int target;
580         u64 lun;
581         struct sdebug_host_info *sdbg_host;
582         unsigned long uas_bm[1];
583         atomic_t num_in_q;
584         char stopped;           /* TODO: should be atomic */
585         bool used;
586 };
587
588 struct sdebug_host_info {
589         struct list_head host_list;
590         struct Scsi_Host *shost;
591         struct device dev;
592         struct list_head dev_info_list;
593 };
594
595 #define to_sdebug_host(d)       \
596         container_of(d, struct sdebug_host_info, dev)
597
598 static LIST_HEAD(sdebug_host_list);
599 static DEFINE_SPINLOCK(sdebug_host_list_lock);
600
601
602 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
603         struct hrtimer hrt;     /* must be first element */
604         int qa_indx;
605 };
606
607 struct sdebug_queued_cmd {
608         /* in_use flagged by a bit in queued_in_use_bm[] */
609         struct timer_list *cmnd_timerp;
610         struct tasklet_struct *tletp;
611         struct sdebug_hrtimer *sd_hrtp;
612         struct scsi_cmnd * a_cmnd;
613 };
614 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
615 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
616
617
618 static unsigned char * fake_storep;     /* ramdisk storage */
619 static struct sd_dif_tuple *dif_storep; /* protection info */
620 static void *map_storep;                /* provisioning map */
621
622 static unsigned long map_size;
623 static int num_aborts;
624 static int num_dev_resets;
625 static int num_target_resets;
626 static int num_bus_resets;
627 static int num_host_resets;
628 static int dix_writes;
629 static int dix_reads;
630 static int dif_errors;
631
632 static DEFINE_SPINLOCK(queued_arr_lock);
633 static DEFINE_RWLOCK(atomic_rw);
634
635 static char sdebug_proc_name[] = MY_NAME;
636 static const char *my_name = MY_NAME;
637
638 static struct bus_type pseudo_lld_bus;
639
640 static struct device_driver sdebug_driverfs_driver = {
641         .name           = sdebug_proc_name,
642         .bus            = &pseudo_lld_bus,
643 };
644
645 static const int check_condition_result =
646                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
647
648 static const int illegal_condition_result =
649         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
650
651 static const int device_qfull_result =
652         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
653
654 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
655                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
656                                      0, 0, 0, 0};
657 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
658                                     0, 0, 0x2, 0x4b};
659 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
660                                    0, 0, 0x0, 0x0};
661
662 static void *fake_store(unsigned long long lba)
663 {
664         lba = do_div(lba, sdebug_store_sectors);
665
666         return fake_storep + lba * scsi_debug_sector_size;
667 }
668
669 static struct sd_dif_tuple *dif_store(sector_t sector)
670 {
671         sector = do_div(sector, sdebug_store_sectors);
672
673         return dif_storep + sector;
674 }
675
676 static int sdebug_add_adapter(void);
677 static void sdebug_remove_adapter(void);
678
679 static void sdebug_max_tgts_luns(void)
680 {
681         struct sdebug_host_info *sdbg_host;
682         struct Scsi_Host *hpnt;
683
684         spin_lock(&sdebug_host_list_lock);
685         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
686                 hpnt = sdbg_host->shost;
687                 if ((hpnt->this_id >= 0) &&
688                     (scsi_debug_num_tgts > hpnt->this_id))
689                         hpnt->max_id = scsi_debug_num_tgts + 1;
690                 else
691                         hpnt->max_id = scsi_debug_num_tgts;
692                 /* scsi_debug_max_luns; */
693                 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
694         }
695         spin_unlock(&sdebug_host_list_lock);
696 }
697
698 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
699
700 /* Set in_bit to -1 to indicate no bit position of invalid field */
701 static void
702 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
703                      int in_byte, int in_bit)
704 {
705         unsigned char *sbuff;
706         u8 sks[4];
707         int sl, asc;
708
709         sbuff = scp->sense_buffer;
710         if (!sbuff) {
711                 sdev_printk(KERN_ERR, scp->device,
712                             "%s: sense_buffer is NULL\n", __func__);
713                 return;
714         }
715         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
716         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
717         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
718                                 asc, 0);
719         memset(sks, 0, sizeof(sks));
720         sks[0] = 0x80;
721         if (c_d)
722                 sks[0] |= 0x40;
723         if (in_bit >= 0) {
724                 sks[0] |= 0x8;
725                 sks[0] |= 0x7 & in_bit;
726         }
727         put_unaligned_be16(in_byte, sks + 1);
728         if (scsi_debug_dsense) {
729                 sl = sbuff[7] + 8;
730                 sbuff[7] = sl;
731                 sbuff[sl] = 0x2;
732                 sbuff[sl + 1] = 0x6;
733                 memcpy(sbuff + sl + 4, sks, 3);
734         } else
735                 memcpy(sbuff + 15, sks, 3);
736         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
737                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
738                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
739                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
740 }
741
742 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
743 {
744         unsigned char *sbuff;
745
746         sbuff = scp->sense_buffer;
747         if (!sbuff) {
748                 sdev_printk(KERN_ERR, scp->device,
749                             "%s: sense_buffer is NULL\n", __func__);
750                 return;
751         }
752         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
753
754         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
755
756         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
757                 sdev_printk(KERN_INFO, scp->device,
758                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
759                             my_name, key, asc, asq);
760 }
761
762 static void
763 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
764 {
765         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
766 }
767
768 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
769 {
770         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
771                 if (0x1261 == cmd)
772                         sdev_printk(KERN_INFO, dev,
773                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
774                 else if (0x5331 == cmd)
775                         sdev_printk(KERN_INFO, dev,
776                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
777                                     __func__);
778                 else
779                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
780                                     __func__, cmd);
781         }
782         return -EINVAL;
783         /* return -ENOTTY; // correct return but upsets fdisk */
784 }
785
786 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
787                            struct sdebug_dev_info * devip)
788 {
789         int k;
790         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
791
792         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
793         if (k != SDEBUG_NUM_UAS) {
794                 const char *cp = NULL;
795
796                 switch (k) {
797                 case SDEBUG_UA_POR:
798                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
799                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
800                         if (debug)
801                                 cp = "power on reset";
802                         break;
803                 case SDEBUG_UA_BUS_RESET:
804                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
805                                         UA_RESET_ASC, BUS_RESET_ASCQ);
806                         if (debug)
807                                 cp = "bus reset";
808                         break;
809                 case SDEBUG_UA_MODE_CHANGED:
810                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
811                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
812                         if (debug)
813                                 cp = "mode parameters changed";
814                         break;
815                 case SDEBUG_UA_CAPACITY_CHANGED:
816                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
817                                         UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
818                         if (debug)
819                                 cp = "capacity data changed";
820                 default:
821                         pr_warn("%s: unexpected unit attention code=%d\n",
822                                 __func__, k);
823                         if (debug)
824                                 cp = "unknown";
825                         break;
826                 }
827                 clear_bit(k, devip->uas_bm);
828                 if (debug)
829                         sdev_printk(KERN_INFO, SCpnt->device,
830                                    "%s reports: Unit attention: %s\n",
831                                    my_name, cp);
832                 return check_condition_result;
833         }
834         if ((UAS_TUR == uas_only) && devip->stopped) {
835                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
836                                 0x2);
837                 if (debug)
838                         sdev_printk(KERN_INFO, SCpnt->device,
839                                     "%s reports: Not ready: %s\n", my_name,
840                                     "initializing command required");
841                 return check_condition_result;
842         }
843         return 0;
844 }
845
846 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
847 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
848                                 int arr_len)
849 {
850         int act_len;
851         struct scsi_data_buffer *sdb = scsi_in(scp);
852
853         if (!sdb->length)
854                 return 0;
855         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
856                 return (DID_ERROR << 16);
857
858         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
859                                       arr, arr_len);
860         sdb->resid = scsi_bufflen(scp) - act_len;
861
862         return 0;
863 }
864
865 /* Returns number of bytes fetched into 'arr' or -1 if error. */
866 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
867                                int arr_len)
868 {
869         if (!scsi_bufflen(scp))
870                 return 0;
871         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
872                 return -1;
873
874         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
875 }
876
877
878 static const char * inq_vendor_id = "Linux   ";
879 static const char * inq_product_id = "scsi_debug      ";
880 static const char *inq_product_rev = "0184";    /* version less '.' */
881
882 /* Device identification VPD page. Returns number of bytes placed in arr */
883 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
884                            int target_dev_id, int dev_id_num,
885                            const char * dev_id_str,
886                            int dev_id_str_len)
887 {
888         int num, port_a;
889         char b[32];
890
891         port_a = target_dev_id + 1;
892         /* T10 vendor identifier field format (faked) */
893         arr[0] = 0x2;   /* ASCII */
894         arr[1] = 0x1;
895         arr[2] = 0x0;
896         memcpy(&arr[4], inq_vendor_id, 8);
897         memcpy(&arr[12], inq_product_id, 16);
898         memcpy(&arr[28], dev_id_str, dev_id_str_len);
899         num = 8 + 16 + dev_id_str_len;
900         arr[3] = num;
901         num += 4;
902         if (dev_id_num >= 0) {
903                 /* NAA-5, Logical unit identifier (binary) */
904                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
905                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
906                 arr[num++] = 0x0;
907                 arr[num++] = 0x8;
908                 arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
909                 arr[num++] = 0x33;
910                 arr[num++] = 0x33;
911                 arr[num++] = 0x30;
912                 arr[num++] = (dev_id_num >> 24);
913                 arr[num++] = (dev_id_num >> 16) & 0xff;
914                 arr[num++] = (dev_id_num >> 8) & 0xff;
915                 arr[num++] = dev_id_num & 0xff;
916                 /* Target relative port number */
917                 arr[num++] = 0x61;      /* proto=sas, binary */
918                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
919                 arr[num++] = 0x0;       /* reserved */
920                 arr[num++] = 0x4;       /* length */
921                 arr[num++] = 0x0;       /* reserved */
922                 arr[num++] = 0x0;       /* reserved */
923                 arr[num++] = 0x0;
924                 arr[num++] = 0x1;       /* relative port A */
925         }
926         /* NAA-5, Target port identifier */
927         arr[num++] = 0x61;      /* proto=sas, binary */
928         arr[num++] = 0x93;      /* piv=1, target port, naa */
929         arr[num++] = 0x0;
930         arr[num++] = 0x8;
931         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
932         arr[num++] = 0x22;
933         arr[num++] = 0x22;
934         arr[num++] = 0x20;
935         arr[num++] = (port_a >> 24);
936         arr[num++] = (port_a >> 16) & 0xff;
937         arr[num++] = (port_a >> 8) & 0xff;
938         arr[num++] = port_a & 0xff;
939         /* NAA-5, Target port group identifier */
940         arr[num++] = 0x61;      /* proto=sas, binary */
941         arr[num++] = 0x95;      /* piv=1, target port group id */
942         arr[num++] = 0x0;
943         arr[num++] = 0x4;
944         arr[num++] = 0;
945         arr[num++] = 0;
946         arr[num++] = (port_group_id >> 8) & 0xff;
947         arr[num++] = port_group_id & 0xff;
948         /* NAA-5, Target device identifier */
949         arr[num++] = 0x61;      /* proto=sas, binary */
950         arr[num++] = 0xa3;      /* piv=1, target device, naa */
951         arr[num++] = 0x0;
952         arr[num++] = 0x8;
953         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
954         arr[num++] = 0x22;
955         arr[num++] = 0x22;
956         arr[num++] = 0x20;
957         arr[num++] = (target_dev_id >> 24);
958         arr[num++] = (target_dev_id >> 16) & 0xff;
959         arr[num++] = (target_dev_id >> 8) & 0xff;
960         arr[num++] = target_dev_id & 0xff;
961         /* SCSI name string: Target device identifier */
962         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
963         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
964         arr[num++] = 0x0;
965         arr[num++] = 24;
966         memcpy(arr + num, "naa.52222220", 12);
967         num += 12;
968         snprintf(b, sizeof(b), "%08X", target_dev_id);
969         memcpy(arr + num, b, 8);
970         num += 8;
971         memset(arr + num, 0, 4);
972         num += 4;
973         return num;
974 }
975
976
977 static unsigned char vpd84_data[] = {
978 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
979     0x22,0x22,0x22,0x0,0xbb,0x1,
980     0x22,0x22,0x22,0x0,0xbb,0x2,
981 };
982
983 /*  Software interface identification VPD page */
984 static int inquiry_evpd_84(unsigned char * arr)
985 {
986         memcpy(arr, vpd84_data, sizeof(vpd84_data));
987         return sizeof(vpd84_data);
988 }
989
990 /* Management network addresses VPD page */
991 static int inquiry_evpd_85(unsigned char * arr)
992 {
993         int num = 0;
994         const char * na1 = "https://www.kernel.org/config";
995         const char * na2 = "http://www.kernel.org/log";
996         int plen, olen;
997
998         arr[num++] = 0x1;       /* lu, storage config */
999         arr[num++] = 0x0;       /* reserved */
1000         arr[num++] = 0x0;
1001         olen = strlen(na1);
1002         plen = olen + 1;
1003         if (plen % 4)
1004                 plen = ((plen / 4) + 1) * 4;
1005         arr[num++] = plen;      /* length, null termianted, padded */
1006         memcpy(arr + num, na1, olen);
1007         memset(arr + num + olen, 0, plen - olen);
1008         num += plen;
1009
1010         arr[num++] = 0x4;       /* lu, logging */
1011         arr[num++] = 0x0;       /* reserved */
1012         arr[num++] = 0x0;
1013         olen = strlen(na2);
1014         plen = olen + 1;
1015         if (plen % 4)
1016                 plen = ((plen / 4) + 1) * 4;
1017         arr[num++] = plen;      /* length, null terminated, padded */
1018         memcpy(arr + num, na2, olen);
1019         memset(arr + num + olen, 0, plen - olen);
1020         num += plen;
1021
1022         return num;
1023 }
1024
1025 /* SCSI ports VPD page */
1026 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1027 {
1028         int num = 0;
1029         int port_a, port_b;
1030
1031         port_a = target_dev_id + 1;
1032         port_b = port_a + 1;
1033         arr[num++] = 0x0;       /* reserved */
1034         arr[num++] = 0x0;       /* reserved */
1035         arr[num++] = 0x0;
1036         arr[num++] = 0x1;       /* relative port 1 (primary) */
1037         memset(arr + num, 0, 6);
1038         num += 6;
1039         arr[num++] = 0x0;
1040         arr[num++] = 12;        /* length tp descriptor */
1041         /* naa-5 target port identifier (A) */
1042         arr[num++] = 0x61;      /* proto=sas, binary */
1043         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1044         arr[num++] = 0x0;       /* reserved */
1045         arr[num++] = 0x8;       /* length */
1046         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1047         arr[num++] = 0x22;
1048         arr[num++] = 0x22;
1049         arr[num++] = 0x20;
1050         arr[num++] = (port_a >> 24);
1051         arr[num++] = (port_a >> 16) & 0xff;
1052         arr[num++] = (port_a >> 8) & 0xff;
1053         arr[num++] = port_a & 0xff;
1054
1055         arr[num++] = 0x0;       /* reserved */
1056         arr[num++] = 0x0;       /* reserved */
1057         arr[num++] = 0x0;
1058         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1059         memset(arr + num, 0, 6);
1060         num += 6;
1061         arr[num++] = 0x0;
1062         arr[num++] = 12;        /* length tp descriptor */
1063         /* naa-5 target port identifier (B) */
1064         arr[num++] = 0x61;      /* proto=sas, binary */
1065         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1066         arr[num++] = 0x0;       /* reserved */
1067         arr[num++] = 0x8;       /* length */
1068         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1069         arr[num++] = 0x22;
1070         arr[num++] = 0x22;
1071         arr[num++] = 0x20;
1072         arr[num++] = (port_b >> 24);
1073         arr[num++] = (port_b >> 16) & 0xff;
1074         arr[num++] = (port_b >> 8) & 0xff;
1075         arr[num++] = port_b & 0xff;
1076
1077         return num;
1078 }
1079
1080
1081 static unsigned char vpd89_data[] = {
1082 /* from 4th byte */ 0,0,0,0,
1083 'l','i','n','u','x',' ',' ',' ',
1084 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1085 '1','2','3','4',
1086 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1087 0xec,0,0,0,
1088 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1089 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1090 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1091 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1092 0x53,0x41,
1093 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1094 0x20,0x20,
1095 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1096 0x10,0x80,
1097 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1098 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1099 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1100 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1101 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1102 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1103 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1104 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1105 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1106 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1107 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1108 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1109 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1110 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1111 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1112 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1113 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1114 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1115 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1116 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1117 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1118 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1119 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1120 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1121 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1122 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1123 };
1124
1125 /* ATA Information VPD page */
1126 static int inquiry_evpd_89(unsigned char * arr)
1127 {
1128         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1129         return sizeof(vpd89_data);
1130 }
1131
1132
1133 static unsigned char vpdb0_data[] = {
1134         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1135         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1138 };
1139
1140 /* Block limits VPD page (SBC-3) */
1141 static int inquiry_evpd_b0(unsigned char * arr)
1142 {
1143         unsigned int gran;
1144
1145         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1146
1147         /* Optimal transfer length granularity */
1148         gran = 1 << scsi_debug_physblk_exp;
1149         arr[2] = (gran >> 8) & 0xff;
1150         arr[3] = gran & 0xff;
1151
1152         /* Maximum Transfer Length */
1153         if (sdebug_store_sectors > 0x400) {
1154                 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1155                 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1156                 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1157                 arr[7] = sdebug_store_sectors & 0xff;
1158         }
1159
1160         /* Optimal Transfer Length */
1161         put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1162
1163         if (scsi_debug_lbpu) {
1164                 /* Maximum Unmap LBA Count */
1165                 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1166
1167                 /* Maximum Unmap Block Descriptor Count */
1168                 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1169         }
1170
1171         /* Unmap Granularity Alignment */
1172         if (scsi_debug_unmap_alignment) {
1173                 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1174                 arr[28] |= 0x80; /* UGAVALID */
1175         }
1176
1177         /* Optimal Unmap Granularity */
1178         put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1179
1180         /* Maximum WRITE SAME Length */
1181         put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1182
1183         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1184
1185         return sizeof(vpdb0_data);
1186 }
1187
1188 /* Block device characteristics VPD page (SBC-3) */
1189 static int inquiry_evpd_b1(unsigned char *arr)
1190 {
1191         memset(arr, 0, 0x3c);
1192         arr[0] = 0;
1193         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1194         arr[2] = 0;
1195         arr[3] = 5;     /* less than 1.8" */
1196
1197         return 0x3c;
1198 }
1199
1200 /* Logical block provisioning VPD page (SBC-3) */
1201 static int inquiry_evpd_b2(unsigned char *arr)
1202 {
1203         memset(arr, 0, 0x4);
1204         arr[0] = 0;                     /* threshold exponent */
1205
1206         if (scsi_debug_lbpu)
1207                 arr[1] = 1 << 7;
1208
1209         if (scsi_debug_lbpws)
1210                 arr[1] |= 1 << 6;
1211
1212         if (scsi_debug_lbpws10)
1213                 arr[1] |= 1 << 5;
1214
1215         if (scsi_debug_lbprz)
1216                 arr[1] |= 1 << 2;
1217
1218         return 0x4;
1219 }
1220
1221 #define SDEBUG_LONG_INQ_SZ 96
1222 #define SDEBUG_MAX_INQ_ARR_SZ 584
1223
1224 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1225 {
1226         unsigned char pq_pdt;
1227         unsigned char * arr;
1228         unsigned char *cmd = scp->cmnd;
1229         int alloc_len, n, ret;
1230         bool have_wlun;
1231
1232         alloc_len = (cmd[3] << 8) + cmd[4];
1233         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1234         if (! arr)
1235                 return DID_REQUEUE << 16;
1236         have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1237         if (have_wlun)
1238                 pq_pdt = 0x1e;  /* present, wlun */
1239         else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1240                 pq_pdt = 0x7f;  /* not present, no device type */
1241         else
1242                 pq_pdt = (scsi_debug_ptype & 0x1f);
1243         arr[0] = pq_pdt;
1244         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1245                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1246                 kfree(arr);
1247                 return check_condition_result;
1248         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1249                 int lu_id_num, port_group_id, target_dev_id, len;
1250                 char lu_id_str[6];
1251                 int host_no = devip->sdbg_host->shost->host_no;
1252                 
1253                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1254                     (devip->channel & 0x7f);
1255                 if (0 == scsi_debug_vpd_use_hostno)
1256                         host_no = 0;
1257                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1258                             (devip->target * 1000) + devip->lun);
1259                 target_dev_id = ((host_no + 1) * 2000) +
1260                                  (devip->target * 1000) - 3;
1261                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1262                 if (0 == cmd[2]) { /* supported vital product data pages */
1263                         arr[1] = cmd[2];        /*sanity */
1264                         n = 4;
1265                         arr[n++] = 0x0;   /* this page */
1266                         arr[n++] = 0x80;  /* unit serial number */
1267                         arr[n++] = 0x83;  /* device identification */
1268                         arr[n++] = 0x84;  /* software interface ident. */
1269                         arr[n++] = 0x85;  /* management network addresses */
1270                         arr[n++] = 0x86;  /* extended inquiry */
1271                         arr[n++] = 0x87;  /* mode page policy */
1272                         arr[n++] = 0x88;  /* SCSI ports */
1273                         arr[n++] = 0x89;  /* ATA information */
1274                         arr[n++] = 0xb0;  /* Block limits (SBC) */
1275                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1276                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1277                                 arr[n++] = 0xb2;
1278                         arr[3] = n - 4;   /* number of supported VPD pages */
1279                 } else if (0x80 == cmd[2]) { /* unit serial number */
1280                         arr[1] = cmd[2];        /*sanity */
1281                         arr[3] = len;
1282                         memcpy(&arr[4], lu_id_str, len);
1283                 } else if (0x83 == cmd[2]) { /* device identification */
1284                         arr[1] = cmd[2];        /*sanity */
1285                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1286                                                  target_dev_id, lu_id_num,
1287                                                  lu_id_str, len);
1288                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1289                         arr[1] = cmd[2];        /*sanity */
1290                         arr[3] = inquiry_evpd_84(&arr[4]);
1291                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1292                         arr[1] = cmd[2];        /*sanity */
1293                         arr[3] = inquiry_evpd_85(&arr[4]);
1294                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1295                         arr[1] = cmd[2];        /*sanity */
1296                         arr[3] = 0x3c;  /* number of following entries */
1297                         if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1298                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1299                         else if (scsi_debug_dif)
1300                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1301                         else
1302                                 arr[4] = 0x0;   /* no protection stuff */
1303                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1304                 } else if (0x87 == cmd[2]) { /* mode page policy */
1305                         arr[1] = cmd[2];        /*sanity */
1306                         arr[3] = 0x8;   /* number of following entries */
1307                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1308                         arr[6] = 0x80;  /* mlus, shared */
1309                         arr[8] = 0x18;   /* protocol specific lu */
1310                         arr[10] = 0x82;  /* mlus, per initiator port */
1311                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1312                         arr[1] = cmd[2];        /*sanity */
1313                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1314                 } else if (0x89 == cmd[2]) { /* ATA information */
1315                         arr[1] = cmd[2];        /*sanity */
1316                         n = inquiry_evpd_89(&arr[4]);
1317                         arr[2] = (n >> 8);
1318                         arr[3] = (n & 0xff);
1319                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1320                         arr[1] = cmd[2];        /*sanity */
1321                         arr[3] = inquiry_evpd_b0(&arr[4]);
1322                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1323                         arr[1] = cmd[2];        /*sanity */
1324                         arr[3] = inquiry_evpd_b1(&arr[4]);
1325                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1326                         arr[1] = cmd[2];        /*sanity */
1327                         arr[3] = inquiry_evpd_b2(&arr[4]);
1328                 } else {
1329                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1330                         kfree(arr);
1331                         return check_condition_result;
1332                 }
1333                 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1334                 ret = fill_from_dev_buffer(scp, arr,
1335                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1336                 kfree(arr);
1337                 return ret;
1338         }
1339         /* drops through here for a standard inquiry */
1340         arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
1341         arr[2] = scsi_debug_scsi_level;
1342         arr[3] = 2;    /* response_data_format==2 */
1343         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1344         arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1345         if (0 == scsi_debug_vpd_use_hostno)
1346                 arr[5] = 0x10; /* claim: implicit TGPS */
1347         arr[6] = 0x10; /* claim: MultiP */
1348         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1349         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1350         memcpy(&arr[8], inq_vendor_id, 8);
1351         memcpy(&arr[16], inq_product_id, 16);
1352         memcpy(&arr[32], inq_product_rev, 4);
1353         /* version descriptors (2 bytes each) follow */
1354         arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1355         arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1356         n = 62;
1357         if (scsi_debug_ptype == 0) {
1358                 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1359         } else if (scsi_debug_ptype == 1) {
1360                 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1361         }
1362         arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1363         ret = fill_from_dev_buffer(scp, arr,
1364                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1365         kfree(arr);
1366         return ret;
1367 }
1368
1369 static int resp_requests(struct scsi_cmnd * scp,
1370                          struct sdebug_dev_info * devip)
1371 {
1372         unsigned char * sbuff;
1373         unsigned char *cmd = scp->cmnd;
1374         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1375         bool dsense, want_dsense;
1376         int len = 18;
1377
1378         memset(arr, 0, sizeof(arr));
1379         dsense = !!(cmd[1] & 1);
1380         want_dsense = dsense || scsi_debug_dsense;
1381         sbuff = scp->sense_buffer;
1382         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1383                 if (dsense) {
1384                         arr[0] = 0x72;
1385                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1386                         arr[2] = THRESHOLD_EXCEEDED;
1387                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1388                         len = 8;
1389                 } else {
1390                         arr[0] = 0x70;
1391                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1392                         arr[7] = 0xa;           /* 18 byte sense buffer */
1393                         arr[12] = THRESHOLD_EXCEEDED;
1394                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1395                 }
1396         } else {
1397                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1398                 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1399                         ;       /* have sense and formats match */
1400                 else if (arr[0] <= 0x70) {
1401                         if (dsense) {
1402                                 memset(arr, 0, 8);
1403                                 arr[0] = 0x72;
1404                                 len = 8;
1405                         } else {
1406                                 memset(arr, 0, 18);
1407                                 arr[0] = 0x70;
1408                                 arr[7] = 0xa;
1409                         }
1410                 } else if (dsense) {
1411                         memset(arr, 0, 8);
1412                         arr[0] = 0x72;
1413                         arr[1] = sbuff[2];     /* sense key */
1414                         arr[2] = sbuff[12];    /* asc */
1415                         arr[3] = sbuff[13];    /* ascq */
1416                         len = 8;
1417                 } else {
1418                         memset(arr, 0, 18);
1419                         arr[0] = 0x70;
1420                         arr[2] = sbuff[1];
1421                         arr[7] = 0xa;
1422                         arr[12] = sbuff[1];
1423                         arr[13] = sbuff[3];
1424                 }
1425
1426         }
1427         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1428         return fill_from_dev_buffer(scp, arr, len);
1429 }
1430
1431 static int resp_start_stop(struct scsi_cmnd * scp,
1432                            struct sdebug_dev_info * devip)
1433 {
1434         unsigned char *cmd = scp->cmnd;
1435         int power_cond, start;
1436
1437         power_cond = (cmd[4] & 0xf0) >> 4;
1438         if (power_cond) {
1439                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1440                 return check_condition_result;
1441         }
1442         start = cmd[4] & 1;
1443         if (start == devip->stopped)
1444                 devip->stopped = !start;
1445         return 0;
1446 }
1447
1448 static sector_t get_sdebug_capacity(void)
1449 {
1450         if (scsi_debug_virtual_gb > 0)
1451                 return (sector_t)scsi_debug_virtual_gb *
1452                         (1073741824 / scsi_debug_sector_size);
1453         else
1454                 return sdebug_store_sectors;
1455 }
1456
1457 #define SDEBUG_READCAP_ARR_SZ 8
1458 static int resp_readcap(struct scsi_cmnd * scp,
1459                         struct sdebug_dev_info * devip)
1460 {
1461         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1462         unsigned int capac;
1463
1464         /* following just in case virtual_gb changed */
1465         sdebug_capacity = get_sdebug_capacity();
1466         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1467         if (sdebug_capacity < 0xffffffff) {
1468                 capac = (unsigned int)sdebug_capacity - 1;
1469                 arr[0] = (capac >> 24);
1470                 arr[1] = (capac >> 16) & 0xff;
1471                 arr[2] = (capac >> 8) & 0xff;
1472                 arr[3] = capac & 0xff;
1473         } else {
1474                 arr[0] = 0xff;
1475                 arr[1] = 0xff;
1476                 arr[2] = 0xff;
1477                 arr[3] = 0xff;
1478         }
1479         arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1480         arr[7] = scsi_debug_sector_size & 0xff;
1481         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1482 }
1483
1484 #define SDEBUG_READCAP16_ARR_SZ 32
1485 static int resp_readcap16(struct scsi_cmnd * scp,
1486                           struct sdebug_dev_info * devip)
1487 {
1488         unsigned char *cmd = scp->cmnd;
1489         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1490         unsigned long long capac;
1491         int k, alloc_len;
1492
1493         alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1494                      + cmd[13]);
1495         /* following just in case virtual_gb changed */
1496         sdebug_capacity = get_sdebug_capacity();
1497         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1498         capac = sdebug_capacity - 1;
1499         for (k = 0; k < 8; ++k, capac >>= 8)
1500                 arr[7 - k] = capac & 0xff;
1501         arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1502         arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1503         arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1504         arr[11] = scsi_debug_sector_size & 0xff;
1505         arr[13] = scsi_debug_physblk_exp & 0xf;
1506         arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1507
1508         if (scsi_debug_lbp()) {
1509                 arr[14] |= 0x80; /* LBPME */
1510                 if (scsi_debug_lbprz)
1511                         arr[14] |= 0x40; /* LBPRZ */
1512         }
1513
1514         arr[15] = scsi_debug_lowest_aligned & 0xff;
1515
1516         if (scsi_debug_dif) {
1517                 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1518                 arr[12] |= 1; /* PROT_EN */
1519         }
1520
1521         return fill_from_dev_buffer(scp, arr,
1522                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1523 }
1524
1525 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1526
1527 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1528                               struct sdebug_dev_info * devip)
1529 {
1530         unsigned char *cmd = scp->cmnd;
1531         unsigned char * arr;
1532         int host_no = devip->sdbg_host->shost->host_no;
1533         int n, ret, alen, rlen;
1534         int port_group_a, port_group_b, port_a, port_b;
1535
1536         alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1537                 + cmd[9]);
1538
1539         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1540         if (! arr)
1541                 return DID_REQUEUE << 16;
1542         /*
1543          * EVPD page 0x88 states we have two ports, one
1544          * real and a fake port with no device connected.
1545          * So we create two port groups with one port each
1546          * and set the group with port B to unavailable.
1547          */
1548         port_a = 0x1; /* relative port A */
1549         port_b = 0x2; /* relative port B */
1550         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1551             (devip->channel & 0x7f);
1552         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1553             (devip->channel & 0x7f) + 0x80;
1554
1555         /*
1556          * The asymmetric access state is cycled according to the host_id.
1557          */
1558         n = 4;
1559         if (0 == scsi_debug_vpd_use_hostno) {
1560             arr[n++] = host_no % 3; /* Asymm access state */
1561             arr[n++] = 0x0F; /* claim: all states are supported */
1562         } else {
1563             arr[n++] = 0x0; /* Active/Optimized path */
1564             arr[n++] = 0x01; /* claim: only support active/optimized paths */
1565         }
1566         arr[n++] = (port_group_a >> 8) & 0xff;
1567         arr[n++] = port_group_a & 0xff;
1568         arr[n++] = 0;    /* Reserved */
1569         arr[n++] = 0;    /* Status code */
1570         arr[n++] = 0;    /* Vendor unique */
1571         arr[n++] = 0x1;  /* One port per group */
1572         arr[n++] = 0;    /* Reserved */
1573         arr[n++] = 0;    /* Reserved */
1574         arr[n++] = (port_a >> 8) & 0xff;
1575         arr[n++] = port_a & 0xff;
1576         arr[n++] = 3;    /* Port unavailable */
1577         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1578         arr[n++] = (port_group_b >> 8) & 0xff;
1579         arr[n++] = port_group_b & 0xff;
1580         arr[n++] = 0;    /* Reserved */
1581         arr[n++] = 0;    /* Status code */
1582         arr[n++] = 0;    /* Vendor unique */
1583         arr[n++] = 0x1;  /* One port per group */
1584         arr[n++] = 0;    /* Reserved */
1585         arr[n++] = 0;    /* Reserved */
1586         arr[n++] = (port_b >> 8) & 0xff;
1587         arr[n++] = port_b & 0xff;
1588
1589         rlen = n - 4;
1590         arr[0] = (rlen >> 24) & 0xff;
1591         arr[1] = (rlen >> 16) & 0xff;
1592         arr[2] = (rlen >> 8) & 0xff;
1593         arr[3] = rlen & 0xff;
1594
1595         /*
1596          * Return the smallest value of either
1597          * - The allocated length
1598          * - The constructed command length
1599          * - The maximum array size
1600          */
1601         rlen = min(alen,n);
1602         ret = fill_from_dev_buffer(scp, arr,
1603                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1604         kfree(arr);
1605         return ret;
1606 }
1607
1608 static int
1609 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1610 {
1611         bool rctd;
1612         u8 reporting_opts, req_opcode, sdeb_i, supp;
1613         u16 req_sa, u;
1614         u32 alloc_len, a_len;
1615         int k, offset, len, errsts, count, bump, na;
1616         const struct opcode_info_t *oip;
1617         const struct opcode_info_t *r_oip;
1618         u8 *arr;
1619         u8 *cmd = scp->cmnd;
1620
1621         rctd = !!(cmd[2] & 0x80);
1622         reporting_opts = cmd[2] & 0x7;
1623         req_opcode = cmd[3];
1624         req_sa = get_unaligned_be16(cmd + 4);
1625         alloc_len = get_unaligned_be32(cmd + 6);
1626         if (alloc_len < 4 && alloc_len > 0xffff) {
1627                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1628                 return check_condition_result;
1629         }
1630         if (alloc_len > 8192)
1631                 a_len = 8192;
1632         else
1633                 a_len = alloc_len;
1634         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
1635         if (NULL == arr) {
1636                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1637                                 INSUFF_RES_ASCQ);
1638                 return check_condition_result;
1639         }
1640         switch (reporting_opts) {
1641         case 0: /* all commands */
1642                 /* count number of commands */
1643                 for (count = 0, oip = opcode_info_arr;
1644                      oip->num_attached != 0xff; ++oip) {
1645                         if (F_INV_OP & oip->flags)
1646                                 continue;
1647                         count += (oip->num_attached + 1);
1648                 }
1649                 bump = rctd ? 20 : 8;
1650                 put_unaligned_be32(count * bump, arr);
1651                 for (offset = 4, oip = opcode_info_arr;
1652                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1653                         if (F_INV_OP & oip->flags)
1654                                 continue;
1655                         na = oip->num_attached;
1656                         arr[offset] = oip->opcode;
1657                         put_unaligned_be16(oip->sa, arr + offset + 2);
1658                         if (rctd)
1659                                 arr[offset + 5] |= 0x2;
1660                         if (FF_SA & oip->flags)
1661                                 arr[offset + 5] |= 0x1;
1662                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1663                         if (rctd)
1664                                 put_unaligned_be16(0xa, arr + offset + 8);
1665                         r_oip = oip;
1666                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1667                                 if (F_INV_OP & oip->flags)
1668                                         continue;
1669                                 offset += bump;
1670                                 arr[offset] = oip->opcode;
1671                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1672                                 if (rctd)
1673                                         arr[offset + 5] |= 0x2;
1674                                 if (FF_SA & oip->flags)
1675                                         arr[offset + 5] |= 0x1;
1676                                 put_unaligned_be16(oip->len_mask[0],
1677                                                    arr + offset + 6);
1678                                 if (rctd)
1679                                         put_unaligned_be16(0xa,
1680                                                            arr + offset + 8);
1681                         }
1682                         oip = r_oip;
1683                         offset += bump;
1684                 }
1685                 break;
1686         case 1: /* one command: opcode only */
1687         case 2: /* one command: opcode plus service action */
1688         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1689                 sdeb_i = opcode_ind_arr[req_opcode];
1690                 oip = &opcode_info_arr[sdeb_i];
1691                 if (F_INV_OP & oip->flags) {
1692                         supp = 1;
1693                         offset = 4;
1694                 } else {
1695                         if (1 == reporting_opts) {
1696                                 if (FF_SA & oip->flags) {
1697                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1698                                                              2, 2);
1699                                         kfree(arr);
1700                                         return check_condition_result;
1701                                 }
1702                                 req_sa = 0;
1703                         } else if (2 == reporting_opts &&
1704                                    0 == (FF_SA & oip->flags)) {
1705                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1706                                 kfree(arr);     /* point at requested sa */
1707                                 return check_condition_result;
1708                         }
1709                         if (0 == (FF_SA & oip->flags) &&
1710                             req_opcode == oip->opcode)
1711                                 supp = 3;
1712                         else if (0 == (FF_SA & oip->flags)) {
1713                                 na = oip->num_attached;
1714                                 for (k = 0, oip = oip->arrp; k < na;
1715                                      ++k, ++oip) {
1716                                         if (req_opcode == oip->opcode)
1717                                                 break;
1718                                 }
1719                                 supp = (k >= na) ? 1 : 3;
1720                         } else if (req_sa != oip->sa) {
1721                                 na = oip->num_attached;
1722                                 for (k = 0, oip = oip->arrp; k < na;
1723                                      ++k, ++oip) {
1724                                         if (req_sa == oip->sa)
1725                                                 break;
1726                                 }
1727                                 supp = (k >= na) ? 1 : 3;
1728                         } else
1729                                 supp = 3;
1730                         if (3 == supp) {
1731                                 u = oip->len_mask[0];
1732                                 put_unaligned_be16(u, arr + 2);
1733                                 arr[4] = oip->opcode;
1734                                 for (k = 1; k < u; ++k)
1735                                         arr[4 + k] = (k < 16) ?
1736                                                  oip->len_mask[k] : 0xff;
1737                                 offset = 4 + u;
1738                         } else
1739                                 offset = 4;
1740                 }
1741                 arr[1] = (rctd ? 0x80 : 0) | supp;
1742                 if (rctd) {
1743                         put_unaligned_be16(0xa, arr + offset);
1744                         offset += 12;
1745                 }
1746                 break;
1747         default:
1748                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1749                 kfree(arr);
1750                 return check_condition_result;
1751         }
1752         offset = (offset < a_len) ? offset : a_len;
1753         len = (offset < alloc_len) ? offset : alloc_len;
1754         errsts = fill_from_dev_buffer(scp, arr, len);
1755         kfree(arr);
1756         return errsts;
1757 }
1758
1759 static int
1760 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1761 {
1762         bool repd;
1763         u32 alloc_len, len;
1764         u8 arr[16];
1765         u8 *cmd = scp->cmnd;
1766
1767         memset(arr, 0, sizeof(arr));
1768         repd = !!(cmd[2] & 0x80);
1769         alloc_len = get_unaligned_be32(cmd + 6);
1770         if (alloc_len < 4) {
1771                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1772                 return check_condition_result;
1773         }
1774         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1775         arr[1] = 0x1;           /* ITNRS */
1776         if (repd) {
1777                 arr[3] = 0xc;
1778                 len = 16;
1779         } else
1780                 len = 4;
1781
1782         len = (len < alloc_len) ? len : alloc_len;
1783         return fill_from_dev_buffer(scp, arr, len);
1784 }
1785
1786 /* <<Following mode page info copied from ST318451LW>> */
1787
1788 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1789 {       /* Read-Write Error Recovery page for mode_sense */
1790         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1791                                         5, 0, 0xff, 0xff};
1792
1793         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1794         if (1 == pcontrol)
1795                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1796         return sizeof(err_recov_pg);
1797 }
1798
1799 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1800 {       /* Disconnect-Reconnect page for mode_sense */
1801         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1802                                          0, 0, 0, 0, 0, 0, 0, 0};
1803
1804         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1805         if (1 == pcontrol)
1806                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1807         return sizeof(disconnect_pg);
1808 }
1809
1810 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1811 {       /* Format device page for mode_sense */
1812         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1813                                      0, 0, 0, 0, 0, 0, 0, 0,
1814                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1815
1816         memcpy(p, format_pg, sizeof(format_pg));
1817         p[10] = (sdebug_sectors_per >> 8) & 0xff;
1818         p[11] = sdebug_sectors_per & 0xff;
1819         p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1820         p[13] = scsi_debug_sector_size & 0xff;
1821         if (scsi_debug_removable)
1822                 p[20] |= 0x20; /* should agree with INQUIRY */
1823         if (1 == pcontrol)
1824                 memset(p + 2, 0, sizeof(format_pg) - 2);
1825         return sizeof(format_pg);
1826 }
1827
1828 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1829 {       /* Caching page for mode_sense */
1830         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1831                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1832         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1833                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1834
1835         if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1836                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1837         memcpy(p, caching_pg, sizeof(caching_pg));
1838         if (1 == pcontrol)
1839                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1840         else if (2 == pcontrol)
1841                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1842         return sizeof(caching_pg);
1843 }
1844
1845 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1846 {       /* Control mode page for mode_sense */
1847         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1848                                         0, 0, 0, 0};
1849         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1850                                      0, 0, 0x2, 0x4b};
1851
1852         if (scsi_debug_dsense)
1853                 ctrl_m_pg[2] |= 0x4;
1854         else
1855                 ctrl_m_pg[2] &= ~0x4;
1856
1857         if (scsi_debug_ato)
1858                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1859
1860         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1861         if (1 == pcontrol)
1862                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1863         else if (2 == pcontrol)
1864                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1865         return sizeof(ctrl_m_pg);
1866 }
1867
1868
1869 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1870 {       /* Informational Exceptions control mode page for mode_sense */
1871         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1872                                        0, 0, 0x0, 0x0};
1873         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1874                                       0, 0, 0x0, 0x0};
1875
1876         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1877         if (1 == pcontrol)
1878                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1879         else if (2 == pcontrol)
1880                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1881         return sizeof(iec_m_pg);
1882 }
1883
1884 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1885 {       /* SAS SSP mode page - short format for mode_sense */
1886         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1887                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1888
1889         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1890         if (1 == pcontrol)
1891                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1892         return sizeof(sas_sf_m_pg);
1893 }
1894
1895
1896 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1897                               int target_dev_id)
1898 {       /* SAS phy control and discover mode page for mode_sense */
1899         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1900                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1901                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1902                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1903                     0x2, 0, 0, 0, 0, 0, 0, 0,
1904                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1905                     0, 0, 0, 0, 0, 0, 0, 0,
1906                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1907                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1908                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1909                     0x3, 0, 0, 0, 0, 0, 0, 0,
1910                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1911                     0, 0, 0, 0, 0, 0, 0, 0,
1912                 };
1913         int port_a, port_b;
1914
1915         port_a = target_dev_id + 1;
1916         port_b = port_a + 1;
1917         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1918         p[20] = (port_a >> 24);
1919         p[21] = (port_a >> 16) & 0xff;
1920         p[22] = (port_a >> 8) & 0xff;
1921         p[23] = port_a & 0xff;
1922         p[48 + 20] = (port_b >> 24);
1923         p[48 + 21] = (port_b >> 16) & 0xff;
1924         p[48 + 22] = (port_b >> 8) & 0xff;
1925         p[48 + 23] = port_b & 0xff;
1926         if (1 == pcontrol)
1927                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1928         return sizeof(sas_pcd_m_pg);
1929 }
1930
1931 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1932 {       /* SAS SSP shared protocol specific port mode subpage */
1933         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1934                     0, 0, 0, 0, 0, 0, 0, 0,
1935                 };
1936
1937         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1938         if (1 == pcontrol)
1939                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1940         return sizeof(sas_sha_m_pg);
1941 }
1942
1943 #define SDEBUG_MAX_MSENSE_SZ 256
1944
1945 static int
1946 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1947 {
1948         unsigned char dbd, llbaa;
1949         int pcontrol, pcode, subpcode, bd_len;
1950         unsigned char dev_spec;
1951         int k, alloc_len, msense_6, offset, len, target_dev_id;
1952         int target = scp->device->id;
1953         unsigned char * ap;
1954         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1955         unsigned char *cmd = scp->cmnd;
1956
1957         dbd = !!(cmd[1] & 0x8);
1958         pcontrol = (cmd[2] & 0xc0) >> 6;
1959         pcode = cmd[2] & 0x3f;
1960         subpcode = cmd[3];
1961         msense_6 = (MODE_SENSE == cmd[0]);
1962         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1963         if ((0 == scsi_debug_ptype) && (0 == dbd))
1964                 bd_len = llbaa ? 16 : 8;
1965         else
1966                 bd_len = 0;
1967         alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1968         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1969         if (0x3 == pcontrol) {  /* Saving values not supported */
1970                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1971                 return check_condition_result;
1972         }
1973         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1974                         (devip->target * 1000) - 3;
1975         /* set DPOFUA bit for disks */
1976         if (0 == scsi_debug_ptype)
1977                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1978         else
1979                 dev_spec = 0x0;
1980         if (msense_6) {
1981                 arr[2] = dev_spec;
1982                 arr[3] = bd_len;
1983                 offset = 4;
1984         } else {
1985                 arr[3] = dev_spec;
1986                 if (16 == bd_len)
1987                         arr[4] = 0x1;   /* set LONGLBA bit */
1988                 arr[7] = bd_len;        /* assume 255 or less */
1989                 offset = 8;
1990         }
1991         ap = arr + offset;
1992         if ((bd_len > 0) && (!sdebug_capacity))
1993                 sdebug_capacity = get_sdebug_capacity();
1994
1995         if (8 == bd_len) {
1996                 if (sdebug_capacity > 0xfffffffe) {
1997                         ap[0] = 0xff;
1998                         ap[1] = 0xff;
1999                         ap[2] = 0xff;
2000                         ap[3] = 0xff;
2001                 } else {
2002                         ap[0] = (sdebug_capacity >> 24) & 0xff;
2003                         ap[1] = (sdebug_capacity >> 16) & 0xff;
2004                         ap[2] = (sdebug_capacity >> 8) & 0xff;
2005                         ap[3] = sdebug_capacity & 0xff;
2006                 }
2007                 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2008                 ap[7] = scsi_debug_sector_size & 0xff;
2009                 offset += bd_len;
2010                 ap = arr + offset;
2011         } else if (16 == bd_len) {
2012                 unsigned long long capac = sdebug_capacity;
2013
2014                 for (k = 0; k < 8; ++k, capac >>= 8)
2015                         ap[7 - k] = capac & 0xff;
2016                 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2017                 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2018                 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2019                 ap[15] = scsi_debug_sector_size & 0xff;
2020                 offset += bd_len;
2021                 ap = arr + offset;
2022         }
2023
2024         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2025                 /* TODO: Control Extension page */
2026                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2027                 return check_condition_result;
2028         }
2029         switch (pcode) {
2030         case 0x1:       /* Read-Write error recovery page, direct access */
2031                 len = resp_err_recov_pg(ap, pcontrol, target);
2032                 offset += len;
2033                 break;
2034         case 0x2:       /* Disconnect-Reconnect page, all devices */
2035                 len = resp_disconnect_pg(ap, pcontrol, target);
2036                 offset += len;
2037                 break;
2038         case 0x3:       /* Format device page, direct access */
2039                 len = resp_format_pg(ap, pcontrol, target);
2040                 offset += len;
2041                 break;
2042         case 0x8:       /* Caching page, direct access */
2043                 len = resp_caching_pg(ap, pcontrol, target);
2044                 offset += len;
2045                 break;
2046         case 0xa:       /* Control Mode page, all devices */
2047                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2048                 offset += len;
2049                 break;
2050         case 0x19:      /* if spc==1 then sas phy, control+discover */
2051                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2052                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2053                         return check_condition_result;
2054                 }
2055                 len = 0;
2056                 if ((0x0 == subpcode) || (0xff == subpcode))
2057                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2058                 if ((0x1 == subpcode) || (0xff == subpcode))
2059                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2060                                                   target_dev_id);
2061                 if ((0x2 == subpcode) || (0xff == subpcode))
2062                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2063                 offset += len;
2064                 break;
2065         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2066                 len = resp_iec_m_pg(ap, pcontrol, target);
2067                 offset += len;
2068                 break;
2069         case 0x3f:      /* Read all Mode pages */
2070                 if ((0 == subpcode) || (0xff == subpcode)) {
2071                         len = resp_err_recov_pg(ap, pcontrol, target);
2072                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2073                         len += resp_format_pg(ap + len, pcontrol, target);
2074                         len += resp_caching_pg(ap + len, pcontrol, target);
2075                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2076                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2077                         if (0xff == subpcode) {
2078                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2079                                                   target, target_dev_id);
2080                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2081                         }
2082                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2083                 } else {
2084                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2085                         return check_condition_result;
2086                 }
2087                 offset += len;
2088                 break;
2089         default:
2090                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2091                 return check_condition_result;
2092         }
2093         if (msense_6)
2094                 arr[0] = offset - 1;
2095         else {
2096                 arr[0] = ((offset - 2) >> 8) & 0xff;
2097                 arr[1] = (offset - 2) & 0xff;
2098         }
2099         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2100 }
2101
2102 #define SDEBUG_MAX_MSELECT_SZ 512
2103
2104 static int
2105 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2106 {
2107         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2108         int param_len, res, mpage;
2109         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2110         unsigned char *cmd = scp->cmnd;
2111         int mselect6 = (MODE_SELECT == cmd[0]);
2112
2113         memset(arr, 0, sizeof(arr));
2114         pf = cmd[1] & 0x10;
2115         sp = cmd[1] & 0x1;
2116         param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2117         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2118                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2119                 return check_condition_result;
2120         }
2121         res = fetch_to_dev_buffer(scp, arr, param_len);
2122         if (-1 == res)
2123                 return (DID_ERROR << 16);
2124         else if ((res < param_len) &&
2125                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2126                 sdev_printk(KERN_INFO, scp->device,
2127                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2128                             __func__, param_len, res);
2129         md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2130         bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2131         if (md_len > 2) {
2132                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2133                 return check_condition_result;
2134         }
2135         off = bd_len + (mselect6 ? 4 : 8);
2136         mpage = arr[off] & 0x3f;
2137         ps = !!(arr[off] & 0x80);
2138         if (ps) {
2139                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2140                 return check_condition_result;
2141         }
2142         spf = !!(arr[off] & 0x40);
2143         pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2144                        (arr[off + 1] + 2);
2145         if ((pg_len + off) > param_len) {
2146                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2147                                 PARAMETER_LIST_LENGTH_ERR, 0);
2148                 return check_condition_result;
2149         }
2150         switch (mpage) {
2151         case 0x8:      /* Caching Mode page */
2152                 if (caching_pg[1] == arr[off + 1]) {
2153                         memcpy(caching_pg + 2, arr + off + 2,
2154                                sizeof(caching_pg) - 2);
2155                         goto set_mode_changed_ua;
2156                 }
2157                 break;
2158         case 0xa:      /* Control Mode page */
2159                 if (ctrl_m_pg[1] == arr[off + 1]) {
2160                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2161                                sizeof(ctrl_m_pg) - 2);
2162                         scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2163                         goto set_mode_changed_ua;
2164                 }
2165                 break;
2166         case 0x1c:      /* Informational Exceptions Mode page */
2167                 if (iec_m_pg[1] == arr[off + 1]) {
2168                         memcpy(iec_m_pg + 2, arr + off + 2,
2169                                sizeof(iec_m_pg) - 2);
2170                         goto set_mode_changed_ua;
2171                 }
2172                 break;
2173         default:
2174                 break;
2175         }
2176         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2177         return check_condition_result;
2178 set_mode_changed_ua:
2179         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2180         return 0;
2181 }
2182
2183 static int resp_temp_l_pg(unsigned char * arr)
2184 {
2185         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2186                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2187                 };
2188
2189         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2190         return sizeof(temp_l_pg);
2191 }
2192
2193 static int resp_ie_l_pg(unsigned char * arr)
2194 {
2195         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2196                 };
2197
2198         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2199         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2200                 arr[4] = THRESHOLD_EXCEEDED;
2201                 arr[5] = 0xff;
2202         }
2203         return sizeof(ie_l_pg);
2204 }
2205
2206 #define SDEBUG_MAX_LSENSE_SZ 512
2207
2208 static int resp_log_sense(struct scsi_cmnd * scp,
2209                           struct sdebug_dev_info * devip)
2210 {
2211         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2212         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2213         unsigned char *cmd = scp->cmnd;
2214
2215         memset(arr, 0, sizeof(arr));
2216         ppc = cmd[1] & 0x2;
2217         sp = cmd[1] & 0x1;
2218         if (ppc || sp) {
2219                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2220                 return check_condition_result;
2221         }
2222         pcontrol = (cmd[2] & 0xc0) >> 6;
2223         pcode = cmd[2] & 0x3f;
2224         subpcode = cmd[3] & 0xff;
2225         alloc_len = (cmd[7] << 8) + cmd[8];
2226         arr[0] = pcode;
2227         if (0 == subpcode) {
2228                 switch (pcode) {
2229                 case 0x0:       /* Supported log pages log page */
2230                         n = 4;
2231                         arr[n++] = 0x0;         /* this page */
2232                         arr[n++] = 0xd;         /* Temperature */
2233                         arr[n++] = 0x2f;        /* Informational exceptions */
2234                         arr[3] = n - 4;
2235                         break;
2236                 case 0xd:       /* Temperature log page */
2237                         arr[3] = resp_temp_l_pg(arr + 4);
2238                         break;
2239                 case 0x2f:      /* Informational exceptions log page */
2240                         arr[3] = resp_ie_l_pg(arr + 4);
2241                         break;
2242                 default:
2243                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2244                         return check_condition_result;
2245                 }
2246         } else if (0xff == subpcode) {
2247                 arr[0] |= 0x40;
2248                 arr[1] = subpcode;
2249                 switch (pcode) {
2250                 case 0x0:       /* Supported log pages and subpages log page */
2251                         n = 4;
2252                         arr[n++] = 0x0;
2253                         arr[n++] = 0x0;         /* 0,0 page */
2254                         arr[n++] = 0x0;
2255                         arr[n++] = 0xff;        /* this page */
2256                         arr[n++] = 0xd;
2257                         arr[n++] = 0x0;         /* Temperature */
2258                         arr[n++] = 0x2f;
2259                         arr[n++] = 0x0; /* Informational exceptions */
2260                         arr[3] = n - 4;
2261                         break;
2262                 case 0xd:       /* Temperature subpages */
2263                         n = 4;
2264                         arr[n++] = 0xd;
2265                         arr[n++] = 0x0;         /* Temperature */
2266                         arr[3] = n - 4;
2267                         break;
2268                 case 0x2f:      /* Informational exceptions subpages */
2269                         n = 4;
2270                         arr[n++] = 0x2f;
2271                         arr[n++] = 0x0;         /* Informational exceptions */
2272                         arr[3] = n - 4;
2273                         break;
2274                 default:
2275                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2276                         return check_condition_result;
2277                 }
2278         } else {
2279                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2280                 return check_condition_result;
2281         }
2282         len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2283         return fill_from_dev_buffer(scp, arr,
2284                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2285 }
2286
2287 static int check_device_access_params(struct scsi_cmnd *scp,
2288                                       unsigned long long lba, unsigned int num)
2289 {
2290         if (lba + num > sdebug_capacity) {
2291                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2292                 return check_condition_result;
2293         }
2294         /* transfer length excessive (tie in to block limits VPD page) */
2295         if (num > sdebug_store_sectors) {
2296                 /* needs work to find which cdb byte 'num' comes from */
2297                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2298                 return check_condition_result;
2299         }
2300         return 0;
2301 }
2302
2303 /* Returns number of bytes copied or -1 if error. */
2304 static int
2305 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2306 {
2307         int ret;
2308         u64 block, rest = 0;
2309         struct scsi_data_buffer *sdb;
2310         enum dma_data_direction dir;
2311         size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
2312                        off_t);
2313
2314         if (do_write) {
2315                 sdb = scsi_out(scmd);
2316                 dir = DMA_TO_DEVICE;
2317                 func = sg_pcopy_to_buffer;
2318         } else {
2319                 sdb = scsi_in(scmd);
2320                 dir = DMA_FROM_DEVICE;
2321                 func = sg_pcopy_from_buffer;
2322         }
2323
2324         if (!sdb->length)
2325                 return 0;
2326         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2327                 return -1;
2328
2329         block = do_div(lba, sdebug_store_sectors);
2330         if (block + num > sdebug_store_sectors)
2331                 rest = block + num - sdebug_store_sectors;
2332
2333         ret = func(sdb->table.sgl, sdb->table.nents,
2334                    fake_storep + (block * scsi_debug_sector_size),
2335                    (num - rest) * scsi_debug_sector_size, 0);
2336         if (ret != (num - rest) * scsi_debug_sector_size)
2337                 return ret;
2338
2339         if (rest) {
2340                 ret += func(sdb->table.sgl, sdb->table.nents,
2341                             fake_storep, rest * scsi_debug_sector_size,
2342                             (num - rest) * scsi_debug_sector_size);
2343         }
2344
2345         return ret;
2346 }
2347
2348 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2349  * arr into fake_store(lba,num) and return true. If comparison fails then
2350  * return false. */
2351 static bool
2352 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2353 {
2354         bool res;
2355         u64 block, rest = 0;
2356         u32 store_blks = sdebug_store_sectors;
2357         u32 lb_size = scsi_debug_sector_size;
2358
2359         block = do_div(lba, store_blks);
2360         if (block + num > store_blks)
2361                 rest = block + num - store_blks;
2362
2363         res = !memcmp(fake_storep + (block * lb_size), arr,
2364                       (num - rest) * lb_size);
2365         if (!res)
2366                 return res;
2367         if (rest)
2368                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2369                              rest * lb_size);
2370         if (!res)
2371                 return res;
2372         arr += num * lb_size;
2373         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2374         if (rest)
2375                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2376                        rest * lb_size);
2377         return res;
2378 }
2379
2380 static __be16 dif_compute_csum(const void *buf, int len)
2381 {
2382         __be16 csum;
2383
2384         if (scsi_debug_guard)
2385                 csum = (__force __be16)ip_compute_csum(buf, len);
2386         else
2387                 csum = cpu_to_be16(crc_t10dif(buf, len));
2388
2389         return csum;
2390 }
2391
2392 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2393                       sector_t sector, u32 ei_lba)
2394 {
2395         __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2396
2397         if (sdt->guard_tag != csum) {
2398                 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2399                         __func__,
2400                         (unsigned long)sector,
2401                         be16_to_cpu(sdt->guard_tag),
2402                         be16_to_cpu(csum));
2403                 return 0x01;
2404         }
2405         if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2406             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2407                 pr_err("%s: REF check failed on sector %lu\n",
2408                         __func__, (unsigned long)sector);
2409                 return 0x03;
2410         }
2411         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2412             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2413                 pr_err("%s: REF check failed on sector %lu\n",
2414                         __func__, (unsigned long)sector);
2415                 return 0x03;
2416         }
2417         return 0;
2418 }
2419
2420 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2421                           unsigned int sectors, bool read)
2422 {
2423         size_t resid;
2424         void *paddr;
2425         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2426         struct sg_mapping_iter miter;
2427
2428         /* Bytes of protection data to copy into sgl */
2429         resid = sectors * sizeof(*dif_storep);
2430
2431         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2432                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2433                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2434
2435         while (sg_miter_next(&miter) && resid > 0) {
2436                 size_t len = min(miter.length, resid);
2437                 void *start = dif_store(sector);
2438                 size_t rest = 0;
2439
2440                 if (dif_store_end < start + len)
2441                         rest = start + len - dif_store_end;
2442
2443                 paddr = miter.addr;
2444
2445                 if (read)
2446                         memcpy(paddr, start, len - rest);
2447                 else
2448                         memcpy(start, paddr, len - rest);
2449
2450                 if (rest) {
2451                         if (read)
2452                                 memcpy(paddr + len - rest, dif_storep, rest);
2453                         else
2454                                 memcpy(dif_storep, paddr + len - rest, rest);
2455                 }
2456
2457                 sector += len / sizeof(*dif_storep);
2458                 resid -= len;
2459         }
2460         sg_miter_stop(&miter);
2461 }
2462
2463 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2464                             unsigned int sectors, u32 ei_lba)
2465 {
2466         unsigned int i;
2467         struct sd_dif_tuple *sdt;
2468         sector_t sector;
2469
2470         for (i = 0; i < sectors; i++, ei_lba++) {
2471                 int ret;
2472
2473                 sector = start_sec + i;
2474                 sdt = dif_store(sector);
2475
2476                 if (sdt->app_tag == cpu_to_be16(0xffff))
2477                         continue;
2478
2479                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2480                 if (ret) {
2481                         dif_errors++;
2482                         return ret;
2483                 }
2484         }
2485
2486         dif_copy_prot(SCpnt, start_sec, sectors, true);
2487         dix_reads++;
2488
2489         return 0;
2490 }
2491
2492 static int
2493 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2494 {
2495         u8 *cmd = scp->cmnd;
2496         u64 lba;
2497         u32 num;
2498         u32 ei_lba;
2499         unsigned long iflags;
2500         int ret;
2501         bool check_prot;
2502
2503         switch (cmd[0]) {
2504         case READ_16:
2505                 ei_lba = 0;
2506                 lba = get_unaligned_be64(cmd + 2);
2507                 num = get_unaligned_be32(cmd + 10);
2508                 check_prot = true;
2509                 break;
2510         case READ_10:
2511                 ei_lba = 0;
2512                 lba = get_unaligned_be32(cmd + 2);
2513                 num = get_unaligned_be16(cmd + 7);
2514                 check_prot = true;
2515                 break;
2516         case READ_6:
2517                 ei_lba = 0;
2518                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2519                       (u32)(cmd[1] & 0x1f) << 16;
2520                 num = (0 == cmd[4]) ? 256 : cmd[4];
2521                 check_prot = true;
2522                 break;
2523         case READ_12:
2524                 ei_lba = 0;
2525                 lba = get_unaligned_be32(cmd + 2);
2526                 num = get_unaligned_be32(cmd + 6);
2527                 check_prot = true;
2528                 break;
2529         case XDWRITEREAD_10:
2530                 ei_lba = 0;
2531                 lba = get_unaligned_be32(cmd + 2);
2532                 num = get_unaligned_be16(cmd + 7);
2533                 check_prot = false;
2534                 break;
2535         default:        /* assume READ(32) */
2536                 lba = get_unaligned_be64(cmd + 12);
2537                 ei_lba = get_unaligned_be32(cmd + 20);
2538                 num = get_unaligned_be32(cmd + 28);
2539                 check_prot = false;
2540                 break;
2541         }
2542         if (check_prot) {
2543                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2544                     (cmd[1] & 0xe0)) {
2545                         mk_sense_invalid_opcode(scp);
2546                         return check_condition_result;
2547                 }
2548                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2549                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2550                     (cmd[1] & 0xe0) == 0)
2551                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2552                                     "to DIF device\n");
2553         }
2554         if (sdebug_any_injecting_opt) {
2555                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2556
2557                 if (ep->inj_short)
2558                         num /= 2;
2559         }
2560
2561         /* inline check_device_access_params() */
2562         if (lba + num > sdebug_capacity) {
2563                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2564                 return check_condition_result;
2565         }
2566         /* transfer length excessive (tie in to block limits VPD page) */
2567         if (num > sdebug_store_sectors) {
2568                 /* needs work to find which cdb byte 'num' comes from */
2569                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2570                 return check_condition_result;
2571         }
2572
2573         if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2574             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2575             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2576                 /* claim unrecoverable read error */
2577                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2578                 /* set info field and valid bit for fixed descriptor */
2579                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2580                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2581                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2582                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2583                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2584                 }
2585                 scsi_set_resid(scp, scsi_bufflen(scp));
2586                 return check_condition_result;
2587         }
2588
2589         read_lock_irqsave(&atomic_rw, iflags);
2590
2591         /* DIX + T10 DIF */
2592         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2593                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2594
2595                 if (prot_ret) {
2596                         read_unlock_irqrestore(&atomic_rw, iflags);
2597                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2598                         return illegal_condition_result;
2599                 }
2600         }
2601
2602         ret = do_device_access(scp, lba, num, false);
2603         read_unlock_irqrestore(&atomic_rw, iflags);
2604         if (ret == -1)
2605                 return DID_ERROR << 16;
2606
2607         scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2608
2609         if (sdebug_any_injecting_opt) {
2610                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2611
2612                 if (ep->inj_recovered) {
2613                         mk_sense_buffer(scp, RECOVERED_ERROR,
2614                                         THRESHOLD_EXCEEDED, 0);
2615                         return check_condition_result;
2616                 } else if (ep->inj_transport) {
2617                         mk_sense_buffer(scp, ABORTED_COMMAND,
2618                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2619                         return check_condition_result;
2620                 } else if (ep->inj_dif) {
2621                         /* Logical block guard check failed */
2622                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2623                         return illegal_condition_result;
2624                 } else if (ep->inj_dix) {
2625                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2626                         return illegal_condition_result;
2627                 }
2628         }
2629         return 0;
2630 }
2631
2632 void dump_sector(unsigned char *buf, int len)
2633 {
2634         int i, j, n;
2635
2636         pr_err(">>> Sector Dump <<<\n");
2637         for (i = 0 ; i < len ; i += 16) {
2638                 char b[128];
2639
2640                 for (j = 0, n = 0; j < 16; j++) {
2641                         unsigned char c = buf[i+j];
2642
2643                         if (c >= 0x20 && c < 0x7e)
2644                                 n += scnprintf(b + n, sizeof(b) - n,
2645                                                " %c ", buf[i+j]);
2646                         else
2647                                 n += scnprintf(b + n, sizeof(b) - n,
2648                                                "%02x ", buf[i+j]);
2649                 }
2650                 pr_err("%04d: %s\n", i, b);
2651         }
2652 }
2653
2654 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2655                              unsigned int sectors, u32 ei_lba)
2656 {
2657         int ret;
2658         struct sd_dif_tuple *sdt;
2659         void *daddr;
2660         sector_t sector = start_sec;
2661         int ppage_offset;
2662         int dpage_offset;
2663         struct sg_mapping_iter diter;
2664         struct sg_mapping_iter piter;
2665
2666         BUG_ON(scsi_sg_count(SCpnt) == 0);
2667         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2668
2669         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2670                         scsi_prot_sg_count(SCpnt),
2671                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2672         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2673                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2674
2675         /* For each protection page */
2676         while (sg_miter_next(&piter)) {
2677                 dpage_offset = 0;
2678                 if (WARN_ON(!sg_miter_next(&diter))) {
2679                         ret = 0x01;
2680                         goto out;
2681                 }
2682
2683                 for (ppage_offset = 0; ppage_offset < piter.length;
2684                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2685                         /* If we're at the end of the current
2686                          * data page advance to the next one
2687                          */
2688                         if (dpage_offset >= diter.length) {
2689                                 if (WARN_ON(!sg_miter_next(&diter))) {
2690                                         ret = 0x01;
2691                                         goto out;
2692                                 }
2693                                 dpage_offset = 0;
2694                         }
2695
2696                         sdt = piter.addr + ppage_offset;
2697                         daddr = diter.addr + dpage_offset;
2698
2699                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2700                         if (ret) {
2701                                 dump_sector(daddr, scsi_debug_sector_size);
2702                                 goto out;
2703                         }
2704
2705                         sector++;
2706                         ei_lba++;
2707                         dpage_offset += scsi_debug_sector_size;
2708                 }
2709                 diter.consumed = dpage_offset;
2710                 sg_miter_stop(&diter);
2711         }
2712         sg_miter_stop(&piter);
2713
2714         dif_copy_prot(SCpnt, start_sec, sectors, false);
2715         dix_writes++;
2716
2717         return 0;
2718
2719 out:
2720         dif_errors++;
2721         sg_miter_stop(&diter);
2722         sg_miter_stop(&piter);
2723         return ret;
2724 }
2725
2726 static unsigned long lba_to_map_index(sector_t lba)
2727 {
2728         if (scsi_debug_unmap_alignment) {
2729                 lba += scsi_debug_unmap_granularity -
2730                         scsi_debug_unmap_alignment;
2731         }
2732         do_div(lba, scsi_debug_unmap_granularity);
2733
2734         return lba;
2735 }
2736
2737 static sector_t map_index_to_lba(unsigned long index)
2738 {
2739         sector_t lba = index * scsi_debug_unmap_granularity;
2740
2741         if (scsi_debug_unmap_alignment) {
2742                 lba -= scsi_debug_unmap_granularity -
2743                         scsi_debug_unmap_alignment;
2744         }
2745
2746         return lba;
2747 }
2748
2749 static unsigned int map_state(sector_t lba, unsigned int *num)
2750 {
2751         sector_t end;
2752         unsigned int mapped;
2753         unsigned long index;
2754         unsigned long next;
2755
2756         index = lba_to_map_index(lba);
2757         mapped = test_bit(index, map_storep);
2758
2759         if (mapped)
2760                 next = find_next_zero_bit(map_storep, map_size, index);
2761         else
2762                 next = find_next_bit(map_storep, map_size, index);
2763
2764         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2765         *num = end - lba;
2766
2767         return mapped;
2768 }
2769
2770 static void map_region(sector_t lba, unsigned int len)
2771 {
2772         sector_t end = lba + len;
2773
2774         while (lba < end) {
2775                 unsigned long index = lba_to_map_index(lba);
2776
2777                 if (index < map_size)
2778                         set_bit(index, map_storep);
2779
2780                 lba = map_index_to_lba(index + 1);
2781         }
2782 }
2783
2784 static void unmap_region(sector_t lba, unsigned int len)
2785 {
2786         sector_t end = lba + len;
2787
2788         while (lba < end) {
2789                 unsigned long index = lba_to_map_index(lba);
2790
2791                 if (lba == map_index_to_lba(index) &&
2792                     lba + scsi_debug_unmap_granularity <= end &&
2793                     index < map_size) {
2794                         clear_bit(index, map_storep);
2795                         if (scsi_debug_lbprz) {
2796                                 memset(fake_storep +
2797                                        lba * scsi_debug_sector_size, 0,
2798                                        scsi_debug_sector_size *
2799                                        scsi_debug_unmap_granularity);
2800                         }
2801                         if (dif_storep) {
2802                                 memset(dif_storep + lba, 0xff,
2803                                        sizeof(*dif_storep) *
2804                                        scsi_debug_unmap_granularity);
2805                         }
2806                 }
2807                 lba = map_index_to_lba(index + 1);
2808         }
2809 }
2810
2811 static int
2812 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2813 {
2814         u8 *cmd = scp->cmnd;
2815         u64 lba;
2816         u32 num;
2817         u32 ei_lba;
2818         unsigned long iflags;
2819         int ret;
2820         bool check_prot;
2821
2822         switch (cmd[0]) {
2823         case WRITE_16:
2824                 ei_lba = 0;
2825                 lba = get_unaligned_be64(cmd + 2);
2826                 num = get_unaligned_be32(cmd + 10);
2827                 check_prot = true;
2828                 break;
2829         case WRITE_10:
2830                 ei_lba = 0;
2831                 lba = get_unaligned_be32(cmd + 2);
2832                 num = get_unaligned_be16(cmd + 7);
2833                 check_prot = true;
2834                 break;
2835         case WRITE_6:
2836                 ei_lba = 0;
2837                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2838                       (u32)(cmd[1] & 0x1f) << 16;
2839                 num = (0 == cmd[4]) ? 256 : cmd[4];
2840                 check_prot = true;
2841                 break;
2842         case WRITE_12:
2843                 ei_lba = 0;
2844                 lba = get_unaligned_be32(cmd + 2);
2845                 num = get_unaligned_be32(cmd + 6);
2846                 check_prot = true;
2847                 break;
2848         case 0x53:      /* XDWRITEREAD(10) */
2849                 ei_lba = 0;
2850                 lba = get_unaligned_be32(cmd + 2);
2851                 num = get_unaligned_be16(cmd + 7);
2852                 check_prot = false;
2853                 break;
2854         default:        /* assume WRITE(32) */
2855                 lba = get_unaligned_be64(cmd + 12);
2856                 ei_lba = get_unaligned_be32(cmd + 20);
2857                 num = get_unaligned_be32(cmd + 28);
2858                 check_prot = false;
2859                 break;
2860         }
2861         if (check_prot) {
2862                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2863                     (cmd[1] & 0xe0)) {
2864                         mk_sense_invalid_opcode(scp);
2865                         return check_condition_result;
2866                 }
2867                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2868                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2869                     (cmd[1] & 0xe0) == 0)
2870                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2871                                     "to DIF device\n");
2872         }
2873
2874         /* inline check_device_access_params() */
2875         if (lba + num > sdebug_capacity) {
2876                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2877                 return check_condition_result;
2878         }
2879         /* transfer length excessive (tie in to block limits VPD page) */
2880         if (num > sdebug_store_sectors) {
2881                 /* needs work to find which cdb byte 'num' comes from */
2882                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2883                 return check_condition_result;
2884         }
2885
2886         write_lock_irqsave(&atomic_rw, iflags);
2887
2888         /* DIX + T10 DIF */
2889         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2890                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2891
2892                 if (prot_ret) {
2893                         write_unlock_irqrestore(&atomic_rw, iflags);
2894                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2895                         return illegal_condition_result;
2896                 }
2897         }
2898
2899         ret = do_device_access(scp, lba, num, true);
2900         if (scsi_debug_lbp())
2901                 map_region(lba, num);
2902         write_unlock_irqrestore(&atomic_rw, iflags);
2903         if (-1 == ret)
2904                 return (DID_ERROR << 16);
2905         else if ((ret < (num * scsi_debug_sector_size)) &&
2906                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2907                 sdev_printk(KERN_INFO, scp->device,
2908                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2909                             my_name, num * scsi_debug_sector_size, ret);
2910
2911         if (sdebug_any_injecting_opt) {
2912                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2913
2914                 if (ep->inj_recovered) {
2915                         mk_sense_buffer(scp, RECOVERED_ERROR,
2916                                         THRESHOLD_EXCEEDED, 0);
2917                         return check_condition_result;
2918                 } else if (ep->inj_dif) {
2919                         /* Logical block guard check failed */
2920                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2921                         return illegal_condition_result;
2922                 } else if (ep->inj_dix) {
2923                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2924                         return illegal_condition_result;
2925                 }
2926         }
2927         return 0;
2928 }
2929
2930 static int
2931 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2932                 bool unmap, bool ndob)
2933 {
2934         unsigned long iflags;
2935         unsigned long long i;
2936         int ret;
2937
2938         ret = check_device_access_params(scp, lba, num);
2939         if (ret)
2940                 return ret;
2941
2942         write_lock_irqsave(&atomic_rw, iflags);
2943
2944         if (unmap && scsi_debug_lbp()) {
2945                 unmap_region(lba, num);
2946                 goto out;
2947         }
2948
2949         /* if ndob then zero 1 logical block, else fetch 1 logical block */
2950         if (ndob) {
2951                 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
2952                        scsi_debug_sector_size);
2953                 ret = 0;
2954         } else
2955                 ret = fetch_to_dev_buffer(scp, fake_storep +
2956                                                (lba * scsi_debug_sector_size),
2957                                           scsi_debug_sector_size);
2958
2959         if (-1 == ret) {
2960                 write_unlock_irqrestore(&atomic_rw, iflags);
2961                 return (DID_ERROR << 16);
2962         } else if ((ret < (num * scsi_debug_sector_size)) &&
2963                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2964                 sdev_printk(KERN_INFO, scp->device,
2965                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2966                             my_name, "write same",
2967                             num * scsi_debug_sector_size, ret);
2968
2969         /* Copy first sector to remaining blocks */
2970         for (i = 1 ; i < num ; i++)
2971                 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2972                        fake_storep + (lba * scsi_debug_sector_size),
2973                        scsi_debug_sector_size);
2974
2975         if (scsi_debug_lbp())
2976                 map_region(lba, num);
2977 out:
2978         write_unlock_irqrestore(&atomic_rw, iflags);
2979
2980         return 0;
2981 }
2982
2983 static int
2984 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2985 {
2986         u8 *cmd = scp->cmnd;
2987         u32 lba;
2988         u16 num;
2989         u32 ei_lba = 0;
2990         bool unmap = false;
2991
2992         if (cmd[1] & 0x8) {
2993                 if (scsi_debug_lbpws10 == 0) {
2994                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2995                         return check_condition_result;
2996                 } else
2997                         unmap = true;
2998         }
2999         lba = get_unaligned_be32(cmd + 2);
3000         num = get_unaligned_be16(cmd + 7);
3001         if (num > scsi_debug_write_same_length) {
3002                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3003                 return check_condition_result;
3004         }
3005         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3006 }
3007
3008 static int
3009 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3010 {
3011         u8 *cmd = scp->cmnd;
3012         u64 lba;
3013         u32 num;
3014         u32 ei_lba = 0;
3015         bool unmap = false;
3016         bool ndob = false;
3017
3018         if (cmd[1] & 0x8) {     /* UNMAP */
3019                 if (scsi_debug_lbpws == 0) {
3020                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3021                         return check_condition_result;
3022                 } else
3023                         unmap = true;
3024         }
3025         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3026                 ndob = true;
3027         lba = get_unaligned_be64(cmd + 2);
3028         num = get_unaligned_be32(cmd + 10);
3029         if (num > scsi_debug_write_same_length) {
3030                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3031                 return check_condition_result;
3032         }
3033         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3034 }
3035
3036 static int
3037 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3038 {
3039         u8 *cmd = scp->cmnd;
3040         u8 *arr;
3041         u8 *fake_storep_hold;
3042         u64 lba;
3043         u32 dnum;
3044         u32 lb_size = scsi_debug_sector_size;
3045         u8 num;
3046         unsigned long iflags;
3047         int ret;
3048
3049         lba = get_unaligned_be32(cmd + 2);
3050         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3051         if (0 == num)
3052                 return 0;       /* degenerate case, not an error */
3053         dnum = 2 * num;
3054         arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3055         if (NULL == arr) {
3056                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3057                                 INSUFF_RES_ASCQ);
3058                 return check_condition_result;
3059         }
3060         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3061             (cmd[1] & 0xe0)) {
3062                 mk_sense_invalid_opcode(scp);
3063                 return check_condition_result;
3064         }
3065         if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3066              scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3067             (cmd[1] & 0xe0) == 0)
3068                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3069                             "to DIF device\n");
3070
3071         /* inline check_device_access_params() */
3072         if (lba + num > sdebug_capacity) {
3073                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3074                 return check_condition_result;
3075         }
3076         /* transfer length excessive (tie in to block limits VPD page) */
3077         if (num > sdebug_store_sectors) {
3078                 /* needs work to find which cdb byte 'num' comes from */
3079                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3080                 return check_condition_result;
3081         }
3082
3083         write_lock_irqsave(&atomic_rw, iflags);
3084
3085         /* trick do_device_access() to fetch both compare and write buffers
3086          * from data-in into arr. Safe (atomic) since write_lock held. */
3087         fake_storep_hold = fake_storep;
3088         fake_storep = arr;
3089         ret = do_device_access(scp, 0, dnum, true);
3090         fake_storep = fake_storep_hold;
3091         if (ret == -1) {
3092                 write_unlock_irqrestore(&atomic_rw, iflags);
3093                 kfree(arr);
3094                 return DID_ERROR << 16;
3095         } else if ((ret < (dnum * lb_size)) &&
3096                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3097                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3098                             "indicated=%u, IO sent=%d bytes\n", my_name,
3099                             dnum * lb_size, ret);
3100         if (!comp_write_worker(lba, num, arr)) {
3101                 write_unlock_irqrestore(&atomic_rw, iflags);
3102                 kfree(arr);
3103                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3104                 return check_condition_result;
3105         }
3106         if (scsi_debug_lbp())
3107                 map_region(lba, num);
3108         write_unlock_irqrestore(&atomic_rw, iflags);
3109         return 0;
3110 }
3111
3112 struct unmap_block_desc {
3113         __be64  lba;
3114         __be32  blocks;
3115         __be32  __reserved;
3116 };
3117
3118 static int
3119 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3120 {
3121         unsigned char *buf;
3122         struct unmap_block_desc *desc;
3123         unsigned int i, payload_len, descriptors;
3124         int ret;
3125         unsigned long iflags;
3126
3127
3128         if (!scsi_debug_lbp())
3129                 return 0;       /* fib and say its done */
3130         payload_len = get_unaligned_be16(scp->cmnd + 7);
3131         BUG_ON(scsi_bufflen(scp) != payload_len);
3132
3133         descriptors = (payload_len - 8) / 16;
3134         if (descriptors > scsi_debug_unmap_max_desc) {
3135                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3136                 return check_condition_result;
3137         }
3138
3139         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3140         if (!buf) {
3141                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3142                                 INSUFF_RES_ASCQ);
3143                 return check_condition_result;
3144         }
3145
3146         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3147
3148         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3149         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3150
3151         desc = (void *)&buf[8];
3152
3153         write_lock_irqsave(&atomic_rw, iflags);
3154
3155         for (i = 0 ; i < descriptors ; i++) {
3156                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3157                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3158
3159                 ret = check_device_access_params(scp, lba, num);
3160                 if (ret)
3161                         goto out;
3162
3163                 unmap_region(lba, num);
3164         }
3165
3166         ret = 0;
3167
3168 out:
3169         write_unlock_irqrestore(&atomic_rw, iflags);
3170         kfree(buf);
3171
3172         return ret;
3173 }
3174
3175 #define SDEBUG_GET_LBA_STATUS_LEN 32
3176
3177 static int
3178 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3179 {
3180         u8 *cmd = scp->cmnd;
3181         u64 lba;
3182         u32 alloc_len, mapped, num;
3183         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3184         int ret;
3185
3186         lba = get_unaligned_be64(cmd + 2);
3187         alloc_len = get_unaligned_be32(cmd + 10);
3188
3189         if (alloc_len < 24)
3190                 return 0;
3191
3192         ret = check_device_access_params(scp, lba, 1);
3193         if (ret)
3194                 return ret;
3195
3196         if (scsi_debug_lbp())
3197                 mapped = map_state(lba, &num);
3198         else {
3199                 mapped = 1;
3200                 /* following just in case virtual_gb changed */
3201                 sdebug_capacity = get_sdebug_capacity();
3202                 if (sdebug_capacity - lba <= 0xffffffff)
3203                         num = sdebug_capacity - lba;
3204                 else
3205                         num = 0xffffffff;
3206         }
3207
3208         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3209         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3210         put_unaligned_be64(lba, arr + 8);       /* LBA */
3211         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3212         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3213
3214         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3215 }
3216
3217 #define SDEBUG_RLUN_ARR_SZ 256
3218
3219 static int resp_report_luns(struct scsi_cmnd * scp,
3220                             struct sdebug_dev_info * devip)
3221 {
3222         unsigned int alloc_len;
3223         int lun_cnt, i, upper, num, n, want_wlun, shortish;
3224         u64 lun;
3225         unsigned char *cmd = scp->cmnd;
3226         int select_report = (int)cmd[2];
3227         struct scsi_lun *one_lun;
3228         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3229         unsigned char * max_addr;
3230
3231         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3232         shortish = (alloc_len < 4);
3233         if (shortish || (select_report > 2)) {
3234                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3235                 return check_condition_result;
3236         }
3237         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3238         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3239         lun_cnt = scsi_debug_max_luns;
3240         if (1 == select_report)
3241                 lun_cnt = 0;
3242         else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3243                 --lun_cnt;
3244         want_wlun = (select_report > 0) ? 1 : 0;
3245         num = lun_cnt + want_wlun;
3246         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3247         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3248         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3249                             sizeof(struct scsi_lun)), num);
3250         if (n < num) {
3251                 want_wlun = 0;
3252                 lun_cnt = n;
3253         }
3254         one_lun = (struct scsi_lun *) &arr[8];
3255         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3256         for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3257              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3258              i++, lun++) {
3259                 upper = (lun >> 8) & 0x3f;
3260                 if (upper)
3261                         one_lun[i].scsi_lun[0] =
3262                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3263                 one_lun[i].scsi_lun[1] = lun & 0xff;
3264         }
3265         if (want_wlun) {
3266                 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
3267                 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
3268                 i++;
3269         }
3270         alloc_len = (unsigned char *)(one_lun + i) - arr;
3271         return fill_from_dev_buffer(scp, arr,
3272                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3273 }
3274
3275 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3276                             unsigned int num, struct sdebug_dev_info *devip)
3277 {
3278         int j;
3279         unsigned char *kaddr, *buf;
3280         unsigned int offset;
3281         struct scsi_data_buffer *sdb = scsi_in(scp);
3282         struct sg_mapping_iter miter;
3283
3284         /* better not to use temporary buffer. */
3285         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3286         if (!buf) {
3287                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3288                                 INSUFF_RES_ASCQ);
3289                 return check_condition_result;
3290         }
3291
3292         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3293
3294         offset = 0;
3295         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3296                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
3297
3298         while (sg_miter_next(&miter)) {
3299                 kaddr = miter.addr;
3300                 for (j = 0; j < miter.length; j++)
3301                         *(kaddr + j) ^= *(buf + offset + j);
3302
3303                 offset += miter.length;
3304         }
3305         sg_miter_stop(&miter);
3306         kfree(buf);
3307
3308         return 0;
3309 }
3310
3311 static int
3312 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3313 {
3314         u8 *cmd = scp->cmnd;
3315         u64 lba;
3316         u32 num;
3317         int errsts;
3318
3319         if (!scsi_bidi_cmnd(scp)) {
3320                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3321                                 INSUFF_RES_ASCQ);
3322                 return check_condition_result;
3323         }
3324         errsts = resp_read_dt0(scp, devip);
3325         if (errsts)
3326                 return errsts;
3327         if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3328                 errsts = resp_write_dt0(scp, devip);
3329                 if (errsts)
3330                         return errsts;
3331         }
3332         lba = get_unaligned_be32(cmd + 2);
3333         num = get_unaligned_be16(cmd + 7);
3334         return resp_xdwriteread(scp, lba, num, devip);
3335 }
3336
3337 /* When timer or tasklet goes off this function is called. */
3338 static void sdebug_q_cmd_complete(unsigned long indx)
3339 {
3340         int qa_indx;
3341         int retiring = 0;
3342         unsigned long iflags;
3343         struct sdebug_queued_cmd *sqcp;
3344         struct scsi_cmnd *scp;
3345         struct sdebug_dev_info *devip;
3346
3347         atomic_inc(&sdebug_completions);
3348         qa_indx = indx;
3349         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3350                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3351                 return;
3352         }
3353         spin_lock_irqsave(&queued_arr_lock, iflags);
3354         sqcp = &queued_arr[qa_indx];
3355         scp = sqcp->a_cmnd;
3356         if (NULL == scp) {
3357                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3358                 pr_err("%s: scp is NULL\n", __func__);
3359                 return;
3360         }
3361         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3362         if (devip)
3363                 atomic_dec(&devip->num_in_q);
3364         else
3365                 pr_err("%s: devip=NULL\n", __func__);
3366         if (atomic_read(&retired_max_queue) > 0)
3367                 retiring = 1;
3368
3369         sqcp->a_cmnd = NULL;
3370         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3371                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3372                 pr_err("%s: Unexpected completion\n", __func__);
3373                 return;
3374         }
3375
3376         if (unlikely(retiring)) {       /* user has reduced max_queue */
3377                 int k, retval;
3378
3379                 retval = atomic_read(&retired_max_queue);
3380                 if (qa_indx >= retval) {
3381                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3382                         pr_err("%s: index %d too large\n", __func__, retval);
3383                         return;
3384                 }
3385                 k = find_last_bit(queued_in_use_bm, retval);
3386                 if ((k < scsi_debug_max_queue) || (k == retval))
3387                         atomic_set(&retired_max_queue, 0);
3388                 else
3389                         atomic_set(&retired_max_queue, k + 1);
3390         }
3391         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3392         scp->scsi_done(scp); /* callback to mid level */
3393 }
3394
3395 /* When high resolution timer goes off this function is called. */
3396 static enum hrtimer_restart
3397 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3398 {
3399         int qa_indx;
3400         int retiring = 0;
3401         unsigned long iflags;
3402         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3403         struct sdebug_queued_cmd *sqcp;
3404         struct scsi_cmnd *scp;
3405         struct sdebug_dev_info *devip;
3406
3407         atomic_inc(&sdebug_completions);
3408         qa_indx = sd_hrtp->qa_indx;
3409         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3410                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3411                 goto the_end;
3412         }
3413         spin_lock_irqsave(&queued_arr_lock, iflags);
3414         sqcp = &queued_arr[qa_indx];
3415         scp = sqcp->a_cmnd;
3416         if (NULL == scp) {
3417                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3418                 pr_err("%s: scp is NULL\n", __func__);
3419                 goto the_end;
3420         }
3421         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3422         if (devip)
3423                 atomic_dec(&devip->num_in_q);
3424         else
3425                 pr_err("%s: devip=NULL\n", __func__);
3426         if (atomic_read(&retired_max_queue) > 0)
3427                 retiring = 1;
3428
3429         sqcp->a_cmnd = NULL;
3430         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3431                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3432                 pr_err("%s: Unexpected completion\n", __func__);
3433                 goto the_end;
3434         }
3435
3436         if (unlikely(retiring)) {       /* user has reduced max_queue */
3437                 int k, retval;
3438
3439                 retval = atomic_read(&retired_max_queue);
3440                 if (qa_indx >= retval) {
3441                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3442                         pr_err("%s: index %d too large\n", __func__, retval);
3443                         goto the_end;
3444                 }
3445                 k = find_last_bit(queued_in_use_bm, retval);
3446                 if ((k < scsi_debug_max_queue) || (k == retval))
3447                         atomic_set(&retired_max_queue, 0);
3448                 else
3449                         atomic_set(&retired_max_queue, k + 1);
3450         }
3451         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3452         scp->scsi_done(scp); /* callback to mid level */
3453 the_end:
3454         return HRTIMER_NORESTART;
3455 }
3456
3457 static struct sdebug_dev_info *
3458 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3459 {
3460         struct sdebug_dev_info *devip;
3461
3462         devip = kzalloc(sizeof(*devip), flags);
3463         if (devip) {
3464                 devip->sdbg_host = sdbg_host;
3465                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3466         }
3467         return devip;
3468 }
3469
3470 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3471 {
3472         struct sdebug_host_info * sdbg_host;
3473         struct sdebug_dev_info * open_devip = NULL;
3474         struct sdebug_dev_info * devip =
3475                         (struct sdebug_dev_info *)sdev->hostdata;
3476
3477         if (devip)
3478                 return devip;
3479         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3480         if (!sdbg_host) {
3481                 pr_err("%s: Host info NULL\n", __func__);
3482                 return NULL;
3483         }
3484         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3485                 if ((devip->used) && (devip->channel == sdev->channel) &&
3486                     (devip->target == sdev->id) &&
3487                     (devip->lun == sdev->lun))
3488                         return devip;
3489                 else {
3490                         if ((!devip->used) && (!open_devip))
3491                                 open_devip = devip;
3492                 }
3493         }
3494         if (!open_devip) { /* try and make a new one */
3495                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3496                 if (!open_devip) {
3497                         printk(KERN_ERR "%s: out of memory at line %d\n",
3498                                 __func__, __LINE__);
3499                         return NULL;
3500                 }
3501         }
3502
3503         open_devip->channel = sdev->channel;
3504         open_devip->target = sdev->id;
3505         open_devip->lun = sdev->lun;
3506         open_devip->sdbg_host = sdbg_host;
3507         atomic_set(&open_devip->num_in_q, 0);
3508         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3509         open_devip->used = true;
3510         return open_devip;
3511 }
3512
3513 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3514 {
3515         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3516                 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
3517                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3518         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3519         return 0;
3520 }
3521
3522 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3523 {
3524         struct sdebug_dev_info *devip;
3525
3526         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3527                 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
3528                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3529         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3530                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3531         devip = devInfoReg(sdp);
3532         if (NULL == devip)
3533                 return 1;       /* no resources, will be marked offline */
3534         sdp->hostdata = devip;
3535         blk_queue_max_segment_size(sdp->request_queue, -1U);
3536         if (scsi_debug_no_uld)
3537                 sdp->no_uld_attach = 1;
3538         return 0;
3539 }
3540
3541 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3542 {
3543         struct sdebug_dev_info *devip =
3544                 (struct sdebug_dev_info *)sdp->hostdata;
3545
3546         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3547                 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
3548                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3549         if (devip) {
3550                 /* make this slot available for re-use */
3551                 devip->used = false;
3552                 sdp->hostdata = NULL;
3553         }
3554 }
3555
3556 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3557 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3558 {
3559         unsigned long iflags;
3560         int k, qmax, r_qmax;
3561         struct sdebug_queued_cmd *sqcp;
3562         struct sdebug_dev_info *devip;
3563
3564         spin_lock_irqsave(&queued_arr_lock, iflags);
3565         qmax = scsi_debug_max_queue;
3566         r_qmax = atomic_read(&retired_max_queue);
3567         if (r_qmax > qmax)
3568                 qmax = r_qmax;
3569         for (k = 0; k < qmax; ++k) {
3570                 if (test_bit(k, queued_in_use_bm)) {
3571                         sqcp = &queued_arr[k];
3572                         if (cmnd == sqcp->a_cmnd) {
3573                                 devip = (struct sdebug_dev_info *)
3574                                         cmnd->device->hostdata;
3575                                 if (devip)
3576                                         atomic_dec(&devip->num_in_q);
3577                                 sqcp->a_cmnd = NULL;
3578                                 spin_unlock_irqrestore(&queued_arr_lock,
3579                                                        iflags);
3580                                 if (scsi_debug_ndelay > 0) {
3581                                         if (sqcp->sd_hrtp)
3582                                                 hrtimer_cancel(
3583                                                         &sqcp->sd_hrtp->hrt);
3584                                 } else if (scsi_debug_delay > 0) {
3585                                         if (sqcp->cmnd_timerp)
3586                                                 del_timer_sync(
3587                                                         sqcp->cmnd_timerp);
3588                                 } else if (scsi_debug_delay < 0) {
3589                                         if (sqcp->tletp)
3590                                                 tasklet_kill(sqcp->tletp);
3591                                 }
3592                                 clear_bit(k, queued_in_use_bm);
3593                                 return 1;
3594                         }
3595                 }
3596         }
3597         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3598         return 0;
3599 }
3600
3601 /* Deletes (stops) timers or tasklets of all queued commands */
3602 static void stop_all_queued(void)
3603 {
3604         unsigned long iflags;
3605         int k;
3606         struct sdebug_queued_cmd *sqcp;
3607         struct sdebug_dev_info *devip;
3608
3609         spin_lock_irqsave(&queued_arr_lock, iflags);
3610         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3611                 if (test_bit(k, queued_in_use_bm)) {
3612                         sqcp = &queued_arr[k];
3613                         if (sqcp->a_cmnd) {
3614                                 devip = (struct sdebug_dev_info *)
3615                                         sqcp->a_cmnd->device->hostdata;
3616                                 if (devip)
3617                                         atomic_dec(&devip->num_in_q);
3618                                 sqcp->a_cmnd = NULL;
3619                                 spin_unlock_irqrestore(&queued_arr_lock,
3620                                                        iflags);
3621                                 if (scsi_debug_ndelay > 0) {
3622                                         if (sqcp->sd_hrtp)
3623                                                 hrtimer_cancel(
3624                                                         &sqcp->sd_hrtp->hrt);
3625                                 } else if (scsi_debug_delay > 0) {
3626                                         if (sqcp->cmnd_timerp)
3627                                                 del_timer_sync(
3628                                                         sqcp->cmnd_timerp);
3629                                 } else if (scsi_debug_delay < 0) {
3630                                         if (sqcp->tletp)
3631                                                 tasklet_kill(sqcp->tletp);
3632                                 }
3633                                 clear_bit(k, queued_in_use_bm);
3634                                 spin_lock_irqsave(&queued_arr_lock, iflags);
3635                         }
3636                 }
3637         }
3638         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3639 }
3640
3641 /* Free queued command memory on heap */
3642 static void free_all_queued(void)
3643 {
3644         unsigned long iflags;
3645         int k;
3646         struct sdebug_queued_cmd *sqcp;
3647
3648         spin_lock_irqsave(&queued_arr_lock, iflags);
3649         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3650                 sqcp = &queued_arr[k];
3651                 kfree(sqcp->cmnd_timerp);
3652                 sqcp->cmnd_timerp = NULL;
3653                 kfree(sqcp->tletp);
3654                 sqcp->tletp = NULL;
3655                 kfree(sqcp->sd_hrtp);
3656                 sqcp->sd_hrtp = NULL;
3657         }
3658         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3659 }
3660
3661 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3662 {
3663         ++num_aborts;
3664         if (SCpnt) {
3665                 if (SCpnt->device &&
3666                     (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3667                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3668                                     __func__);
3669                 stop_queued_cmnd(SCpnt);
3670         }
3671         return SUCCESS;
3672 }
3673
3674 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3675 {
3676         struct sdebug_dev_info * devip;
3677
3678         ++num_dev_resets;
3679         if (SCpnt && SCpnt->device) {
3680                 struct scsi_device *sdp = SCpnt->device;
3681
3682                 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3683                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3684                 devip = devInfoReg(sdp);
3685                 if (devip)
3686                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
3687         }
3688         return SUCCESS;
3689 }
3690
3691 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3692 {
3693         struct sdebug_host_info *sdbg_host;
3694         struct sdebug_dev_info *devip;
3695         struct scsi_device *sdp;
3696         struct Scsi_Host *hp;
3697         int k = 0;
3698
3699         ++num_target_resets;
3700         if (!SCpnt)
3701                 goto lie;
3702         sdp = SCpnt->device;
3703         if (!sdp)
3704                 goto lie;
3705         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3706                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3707         hp = sdp->host;
3708         if (!hp)
3709                 goto lie;
3710         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3711         if (sdbg_host) {
3712                 list_for_each_entry(devip,
3713                                     &sdbg_host->dev_info_list,
3714                                     dev_list)
3715                         if (devip->target == sdp->id) {
3716                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3717                                 ++k;
3718                         }
3719         }
3720         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3721                 sdev_printk(KERN_INFO, sdp,
3722                             "%s: %d device(s) found in target\n", __func__, k);
3723 lie:
3724         return SUCCESS;
3725 }
3726
3727 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3728 {
3729         struct sdebug_host_info *sdbg_host;
3730         struct sdebug_dev_info *devip;
3731         struct scsi_device * sdp;
3732         struct Scsi_Host * hp;
3733         int k = 0;
3734
3735         ++num_bus_resets;
3736         if (!(SCpnt && SCpnt->device))
3737                 goto lie;
3738         sdp = SCpnt->device;
3739         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3740                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3741         hp = sdp->host;
3742         if (hp) {
3743                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3744                 if (sdbg_host) {
3745                         list_for_each_entry(devip,
3746                                             &sdbg_host->dev_info_list,
3747                                             dev_list) {
3748                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3749                                 ++k;
3750                         }
3751                 }
3752         }
3753         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3754                 sdev_printk(KERN_INFO, sdp,
3755                             "%s: %d device(s) found in host\n", __func__, k);
3756 lie:
3757         return SUCCESS;
3758 }
3759
3760 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3761 {
3762         struct sdebug_host_info * sdbg_host;
3763         struct sdebug_dev_info *devip;
3764         int k = 0;
3765
3766         ++num_host_resets;
3767         if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3768                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3769         spin_lock(&sdebug_host_list_lock);
3770         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3771                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3772                                     dev_list) {
3773                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3774                         ++k;
3775                 }
3776         }
3777         spin_unlock(&sdebug_host_list_lock);
3778         stop_all_queued();
3779         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3780                 sdev_printk(KERN_INFO, SCpnt->device,
3781                             "%s: %d device(s) found\n", __func__, k);
3782         return SUCCESS;
3783 }
3784
3785 static void __init sdebug_build_parts(unsigned char *ramp,
3786                                       unsigned long store_size)
3787 {
3788         struct partition * pp;
3789         int starts[SDEBUG_MAX_PARTS + 2];
3790         int sectors_per_part, num_sectors, k;
3791         int heads_by_sects, start_sec, end_sec;
3792
3793         /* assume partition table already zeroed */
3794         if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3795                 return;
3796         if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3797                 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3798                 pr_warn("%s: reducing partitions to %d\n", __func__,
3799                         SDEBUG_MAX_PARTS);
3800         }
3801         num_sectors = (int)sdebug_store_sectors;
3802         sectors_per_part = (num_sectors - sdebug_sectors_per)
3803                            / scsi_debug_num_parts;
3804         heads_by_sects = sdebug_heads * sdebug_sectors_per;
3805         starts[0] = sdebug_sectors_per;
3806         for (k = 1; k < scsi_debug_num_parts; ++k)
3807                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3808                             * heads_by_sects;
3809         starts[scsi_debug_num_parts] = num_sectors;
3810         starts[scsi_debug_num_parts + 1] = 0;
3811
3812         ramp[510] = 0x55;       /* magic partition markings */
3813         ramp[511] = 0xAA;
3814         pp = (struct partition *)(ramp + 0x1be);
3815         for (k = 0; starts[k + 1]; ++k, ++pp) {
3816                 start_sec = starts[k];
3817                 end_sec = starts[k + 1] - 1;
3818                 pp->boot_ind = 0;
3819
3820                 pp->cyl = start_sec / heads_by_sects;
3821                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3822                            / sdebug_sectors_per;
3823                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3824
3825                 pp->end_cyl = end_sec / heads_by_sects;
3826                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3827                                / sdebug_sectors_per;
3828                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3829
3830                 pp->start_sect = cpu_to_le32(start_sec);
3831                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3832                 pp->sys_ind = 0x83;     /* plain Linux partition */
3833         }
3834 }
3835
3836 static int
3837 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3838               int scsi_result, int delta_jiff)
3839 {
3840         unsigned long iflags;
3841         int k, num_in_q, qdepth, inject;
3842         struct sdebug_queued_cmd *sqcp = NULL;
3843         struct scsi_device *sdp = cmnd->device;
3844
3845         if (NULL == cmnd || NULL == devip) {
3846                 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3847                         __func__);
3848                 /* no particularly good error to report back */
3849                 return SCSI_MLQUEUE_HOST_BUSY;
3850         }
3851         if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3852                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3853                             __func__, scsi_result);
3854         if (delta_jiff == 0)
3855                 goto respond_in_thread;
3856
3857         /* schedule the response at a later time if resources permit */
3858         spin_lock_irqsave(&queued_arr_lock, iflags);
3859         num_in_q = atomic_read(&devip->num_in_q);
3860         qdepth = cmnd->device->queue_depth;
3861         inject = 0;
3862         if ((qdepth > 0) && (num_in_q >= qdepth)) {
3863                 if (scsi_result) {
3864                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3865                         goto respond_in_thread;
3866                 } else
3867                         scsi_result = device_qfull_result;
3868         } else if ((scsi_debug_every_nth != 0) &&
3869                    (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3870                    (scsi_result == 0)) {
3871                 if ((num_in_q == (qdepth - 1)) &&
3872                     (atomic_inc_return(&sdebug_a_tsf) >=
3873                      abs(scsi_debug_every_nth))) {
3874                         atomic_set(&sdebug_a_tsf, 0);
3875                         inject = 1;
3876                         scsi_result = device_qfull_result;
3877                 }
3878         }
3879
3880         k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3881         if (k >= scsi_debug_max_queue) {
3882                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3883                 if (scsi_result)
3884                         goto respond_in_thread;
3885                 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3886                         scsi_result = device_qfull_result;
3887                 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3888                         sdev_printk(KERN_INFO, sdp,
3889                                     "%s: max_queue=%d exceeded, %s\n",
3890                                     __func__, scsi_debug_max_queue,
3891                                     (scsi_result ?  "status: TASK SET FULL" :
3892                                                     "report: host busy"));
3893                 if (scsi_result)
3894                         goto respond_in_thread;
3895                 else
3896                         return SCSI_MLQUEUE_HOST_BUSY;
3897         }
3898         __set_bit(k, queued_in_use_bm);
3899         atomic_inc(&devip->num_in_q);
3900         sqcp = &queued_arr[k];
3901         sqcp->a_cmnd = cmnd;
3902         cmnd->result = scsi_result;
3903         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3904         if (delta_jiff > 0) {
3905                 if (NULL == sqcp->cmnd_timerp) {
3906                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3907                                                     GFP_ATOMIC);
3908                         if (NULL == sqcp->cmnd_timerp)
3909                                 return SCSI_MLQUEUE_HOST_BUSY;
3910                         init_timer(sqcp->cmnd_timerp);
3911                 }
3912                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3913                 sqcp->cmnd_timerp->data = k;
3914                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3915                 add_timer(sqcp->cmnd_timerp);
3916         } else if (scsi_debug_ndelay > 0) {
3917                 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3918                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3919
3920                 if (NULL == sd_hp) {
3921                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3922                         if (NULL == sd_hp)
3923                                 return SCSI_MLQUEUE_HOST_BUSY;
3924                         sqcp->sd_hrtp = sd_hp;
3925                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3926                                      HRTIMER_MODE_REL);
3927                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3928                         sd_hp->qa_indx = k;
3929                 }
3930                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3931         } else {        /* delay < 0 */
3932                 if (NULL == sqcp->tletp) {
3933                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3934                                               GFP_ATOMIC);
3935                         if (NULL == sqcp->tletp)
3936                                 return SCSI_MLQUEUE_HOST_BUSY;
3937                         tasklet_init(sqcp->tletp,
3938                                      sdebug_q_cmd_complete, k);
3939                 }
3940                 if (-1 == delta_jiff)
3941                         tasklet_hi_schedule(sqcp->tletp);
3942                 else
3943                         tasklet_schedule(sqcp->tletp);
3944         }
3945         if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3946             (scsi_result == device_qfull_result))
3947                 sdev_printk(KERN_INFO, sdp,
3948                             "%s: num_in_q=%d +1, %s%s\n", __func__,
3949                             num_in_q, (inject ? "<inject> " : ""),
3950                             "status: TASK SET FULL");
3951         return 0;
3952
3953 respond_in_thread:      /* call back to mid-layer using invocation thread */
3954         cmnd->result = scsi_result;
3955         cmnd->scsi_done(cmnd);
3956         return 0;
3957 }
3958
3959 /* Note: The following macros create attribute files in the
3960    /sys/module/scsi_debug/parameters directory. Unfortunately this
3961    driver is unaware of a change and cannot trigger auxiliary actions
3962    as it can when the corresponding attribute in the
3963    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3964  */
3965 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3966 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3967 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3968 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3969 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3970 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3971 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3972 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3973 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3974 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3975 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3976 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3977 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3978 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3979 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3980 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3981 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3982 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3983 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3984 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3985 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3986 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3987 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3988 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3989 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3990 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3991 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3992 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3993 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3994 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3995 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3996 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
3997 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3998 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3999 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4000 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4001 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4002 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4003                    S_IRUGO | S_IWUSR);
4004 module_param_named(write_same_length, scsi_debug_write_same_length, int,
4005                    S_IRUGO | S_IWUSR);
4006
4007 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4008 MODULE_DESCRIPTION("SCSI debug adapter driver");
4009 MODULE_LICENSE("GPL");
4010 MODULE_VERSION(SCSI_DEBUG_VERSION);
4011
4012 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4013 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4014 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4015 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4016 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4017 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4018 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4019 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4020 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4021 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4022 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4023 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4024 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4025 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4026 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4027 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4028 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4029 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4030 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4031 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4032 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4033 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4034 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4035 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4036 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
4037 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4038 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4039 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4040 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4041 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4042 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4043 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4044 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4045 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4046 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4047 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4048 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4049 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4050 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4051
4052 static char sdebug_info[256];
4053
4054 static const char * scsi_debug_info(struct Scsi_Host * shp)
4055 {
4056         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4057                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4058                 scsi_debug_version_date, scsi_debug_dev_size_mb,
4059                 scsi_debug_opts);
4060         return sdebug_info;
4061 }
4062
4063 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4064 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4065 {
4066         char arr[16];
4067         int opts;
4068         int minLen = length > 15 ? 15 : length;
4069
4070         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4071                 return -EACCES;
4072         memcpy(arr, buffer, minLen);
4073         arr[minLen] = '\0';
4074         if (1 != sscanf(arr, "%d", &opts))
4075                 return -EINVAL;
4076         scsi_debug_opts = opts;
4077         if (scsi_debug_every_nth != 0)
4078                 atomic_set(&sdebug_cmnd_count, 0);
4079         return length;
4080 }
4081
4082 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4083  * same for each scsi_debug host (if more than one). Some of the counters
4084  * output are not atomics so might be inaccurate in a busy system. */
4085 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4086 {
4087         int f, l;
4088         char b[32];
4089
4090         if (scsi_debug_every_nth > 0)
4091                 snprintf(b, sizeof(b), " (curr:%d)",
4092                          ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4093                                 atomic_read(&sdebug_a_tsf) :
4094                                 atomic_read(&sdebug_cmnd_count)));
4095         else
4096                 b[0] = '\0';
4097
4098         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4099                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4100                 "every_nth=%d%s\n"
4101                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4102                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4103                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4104                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4105                 "usec_in_jiffy=%lu\n",
4106                 SCSI_DEBUG_VERSION, scsi_debug_version_date,
4107                 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4108                 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4109                 scsi_debug_max_luns, atomic_read(&sdebug_completions),
4110                 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4111                 sdebug_sectors_per, num_aborts, num_dev_resets,
4112                 num_target_resets, num_bus_resets, num_host_resets,
4113                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4114
4115         f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4116         if (f != scsi_debug_max_queue) {
4117                 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4118                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4119                            "queued_in_use_bm", f, l);
4120         }
4121         return 0;
4122 }
4123
4124 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4125 {
4126         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4127 }
4128 /* Returns -EBUSY if delay is being changed and commands are queued */
4129 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4130                            size_t count)
4131 {
4132         int delay, res;
4133
4134         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4135                 res = count;
4136                 if (scsi_debug_delay != delay) {
4137                         unsigned long iflags;
4138                         int k;
4139
4140                         spin_lock_irqsave(&queued_arr_lock, iflags);
4141                         k = find_first_bit(queued_in_use_bm,
4142                                            scsi_debug_max_queue);
4143                         if (k != scsi_debug_max_queue)
4144                                 res = -EBUSY;   /* have queued commands */
4145                         else {
4146                                 scsi_debug_delay = delay;
4147                                 scsi_debug_ndelay = 0;
4148                         }
4149                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4150                 }
4151                 return res;
4152         }
4153         return -EINVAL;
4154 }
4155 static DRIVER_ATTR_RW(delay);
4156
4157 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4158 {
4159         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4160 }
4161 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4162 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4163 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4164                            size_t count)
4165 {
4166         unsigned long iflags;
4167         int ndelay, res, k;
4168
4169         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4170             (ndelay >= 0) && (ndelay < 1000000000)) {
4171                 res = count;
4172                 if (scsi_debug_ndelay != ndelay) {
4173                         spin_lock_irqsave(&queued_arr_lock, iflags);
4174                         k = find_first_bit(queued_in_use_bm,
4175                                            scsi_debug_max_queue);
4176                         if (k != scsi_debug_max_queue)
4177                                 res = -EBUSY;   /* have queued commands */
4178                         else {
4179                                 scsi_debug_ndelay = ndelay;
4180                                 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4181                                                           : DEF_DELAY;
4182                         }
4183                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4184                 }
4185                 return res;
4186         }
4187         return -EINVAL;
4188 }
4189 static DRIVER_ATTR_RW(ndelay);
4190
4191 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4192 {
4193         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4194 }
4195
4196 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4197                           size_t count)
4198 {
4199         int opts;
4200         char work[20];
4201
4202         if (1 == sscanf(buf, "%10s", work)) {
4203                 if (0 == strncasecmp(work,"0x", 2)) {
4204                         if (1 == sscanf(&work[2], "%x", &opts))
4205                                 goto opts_done;
4206                 } else {
4207                         if (1 == sscanf(work, "%d", &opts))
4208                                 goto opts_done;
4209                 }
4210         }
4211         return -EINVAL;
4212 opts_done:
4213         scsi_debug_opts = opts;
4214         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4215                 sdebug_any_injecting_opt = true;
4216         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4217                 sdebug_any_injecting_opt = true;
4218         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4219                 sdebug_any_injecting_opt = true;
4220         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4221                 sdebug_any_injecting_opt = true;
4222         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4223                 sdebug_any_injecting_opt = true;
4224         atomic_set(&sdebug_cmnd_count, 0);
4225         atomic_set(&sdebug_a_tsf, 0);
4226         return count;
4227 }
4228 static DRIVER_ATTR_RW(opts);
4229
4230 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4231 {
4232         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4233 }
4234 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4235                            size_t count)
4236 {
4237         int n;
4238
4239         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4240                 scsi_debug_ptype = n;
4241                 return count;
4242         }
4243         return -EINVAL;
4244 }
4245 static DRIVER_ATTR_RW(ptype);
4246
4247 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4248 {
4249         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4250 }
4251 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4252                             size_t count)
4253 {
4254         int n;
4255
4256         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4257                 scsi_debug_dsense = n;
4258                 return count;
4259         }
4260         return -EINVAL;
4261 }
4262 static DRIVER_ATTR_RW(dsense);
4263
4264 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4265 {
4266         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4267 }
4268 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4269                              size_t count)
4270 {
4271         int n;
4272
4273         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4274                 n = (n > 0);
4275                 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4276                 if (scsi_debug_fake_rw != n) {
4277                         if ((0 == n) && (NULL == fake_storep)) {
4278                                 unsigned long sz =
4279                                         (unsigned long)scsi_debug_dev_size_mb *
4280                                         1048576;
4281
4282                                 fake_storep = vmalloc(sz);
4283                                 if (NULL == fake_storep) {
4284                                         pr_err("%s: out of memory, 9\n",
4285                                                __func__);
4286                                         return -ENOMEM;
4287                                 }
4288                                 memset(fake_storep, 0, sz);
4289                         }
4290                         scsi_debug_fake_rw = n;
4291                 }
4292                 return count;
4293         }
4294         return -EINVAL;
4295 }
4296 static DRIVER_ATTR_RW(fake_rw);
4297
4298 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4299 {
4300         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4301 }
4302 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4303                               size_t count)
4304 {
4305         int n;
4306
4307         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4308                 scsi_debug_no_lun_0 = n;
4309                 return count;
4310         }
4311         return -EINVAL;
4312 }
4313 static DRIVER_ATTR_RW(no_lun_0);
4314
4315 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4316 {
4317         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4318 }
4319 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4320                               size_t count)
4321 {
4322         int n;
4323
4324         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4325                 scsi_debug_num_tgts = n;
4326                 sdebug_max_tgts_luns();
4327                 return count;
4328         }
4329         return -EINVAL;
4330 }
4331 static DRIVER_ATTR_RW(num_tgts);
4332
4333 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4334 {
4335         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4336 }
4337 static DRIVER_ATTR_RO(dev_size_mb);
4338
4339 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4340 {
4341         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4342 }
4343 static DRIVER_ATTR_RO(num_parts);
4344
4345 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4346 {
4347         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4348 }
4349 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4350                                size_t count)
4351 {
4352         int nth;
4353
4354         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4355                 scsi_debug_every_nth = nth;
4356                 atomic_set(&sdebug_cmnd_count, 0);
4357                 return count;
4358         }
4359         return -EINVAL;
4360 }
4361 static DRIVER_ATTR_RW(every_nth);
4362
4363 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4364 {
4365         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4366 }
4367 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4368                               size_t count)
4369 {
4370         int n;
4371
4372         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4373                 scsi_debug_max_luns = n;
4374                 sdebug_max_tgts_luns();
4375                 return count;
4376         }
4377         return -EINVAL;
4378 }
4379 static DRIVER_ATTR_RW(max_luns);
4380
4381 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4382 {
4383         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4384 }
4385 /* N.B. max_queue can be changed while there are queued commands. In flight
4386  * commands beyond the new max_queue will be completed. */
4387 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4388                                size_t count)
4389 {
4390         unsigned long iflags;
4391         int n, k;
4392
4393         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4394             (n <= SCSI_DEBUG_CANQUEUE)) {
4395                 spin_lock_irqsave(&queued_arr_lock, iflags);
4396                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4397                 scsi_debug_max_queue = n;
4398                 if (SCSI_DEBUG_CANQUEUE == k)
4399                         atomic_set(&retired_max_queue, 0);
4400                 else if (k >= n)
4401                         atomic_set(&retired_max_queue, k + 1);
4402                 else
4403                         atomic_set(&retired_max_queue, 0);
4404                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4405                 return count;
4406         }
4407         return -EINVAL;
4408 }
4409 static DRIVER_ATTR_RW(max_queue);
4410
4411 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4412 {
4413         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4414 }
4415 static DRIVER_ATTR_RO(no_uld);
4416
4417 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4418 {
4419         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4420 }
4421 static DRIVER_ATTR_RO(scsi_level);
4422
4423 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4424 {
4425         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4426 }
4427 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4428                                 size_t count)
4429 {
4430         int n;
4431         bool changed;
4432
4433         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4434                 changed = (scsi_debug_virtual_gb != n);
4435                 scsi_debug_virtual_gb = n;
4436                 sdebug_capacity = get_sdebug_capacity();
4437                 if (changed) {
4438                         struct sdebug_host_info *sdhp;
4439                         struct sdebug_dev_info *dp;
4440
4441                         list_for_each_entry(sdhp, &sdebug_host_list,
4442                                             host_list) {
4443                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4444                                                     dev_list) {
4445                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4446                                                 dp->uas_bm);
4447                                 }
4448                         }
4449                 }
4450                 return count;
4451         }
4452         return -EINVAL;
4453 }
4454 static DRIVER_ATTR_RW(virtual_gb);
4455
4456 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4457 {
4458         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4459 }
4460
4461 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4462                               size_t count)
4463 {
4464         int delta_hosts;
4465
4466         if (sscanf(buf, "%d", &delta_hosts) != 1)
4467                 return -EINVAL;
4468         if (delta_hosts > 0) {
4469                 do {
4470                         sdebug_add_adapter();
4471                 } while (--delta_hosts);
4472         } else if (delta_hosts < 0) {
4473                 do {
4474                         sdebug_remove_adapter();
4475                 } while (++delta_hosts);
4476         }
4477         return count;
4478 }
4479 static DRIVER_ATTR_RW(add_host);
4480
4481 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4482 {
4483         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4484 }
4485 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4486                                     size_t count)
4487 {
4488         int n;
4489
4490         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4491                 scsi_debug_vpd_use_hostno = n;
4492                 return count;
4493         }
4494         return -EINVAL;
4495 }
4496 static DRIVER_ATTR_RW(vpd_use_hostno);
4497
4498 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4499 {
4500         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4501 }
4502 static DRIVER_ATTR_RO(sector_size);
4503
4504 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4505 {
4506         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4507 }
4508 static DRIVER_ATTR_RO(dix);
4509
4510 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4511 {
4512         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4513 }
4514 static DRIVER_ATTR_RO(dif);
4515
4516 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4517 {
4518         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4519 }
4520 static DRIVER_ATTR_RO(guard);
4521
4522 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4523 {
4524         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4525 }
4526 static DRIVER_ATTR_RO(ato);
4527
4528 static ssize_t map_show(struct device_driver *ddp, char *buf)
4529 {
4530         ssize_t count;
4531
4532         if (!scsi_debug_lbp())
4533                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4534                                  sdebug_store_sectors);
4535
4536         count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
4537
4538         buf[count++] = '\n';
4539         buf[count++] = 0;
4540
4541         return count;
4542 }
4543 static DRIVER_ATTR_RO(map);
4544
4545 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4546 {
4547         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4548 }
4549 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4550                                size_t count)
4551 {
4552         int n;
4553
4554         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4555                 scsi_debug_removable = (n > 0);
4556                 return count;
4557         }
4558         return -EINVAL;
4559 }
4560 static DRIVER_ATTR_RW(removable);
4561
4562 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4563 {
4564         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4565 }
4566 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4567 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4568                                size_t count)
4569 {
4570         int n, res;
4571
4572         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4573                 bool new_host_lock = (n > 0);
4574
4575                 res = count;
4576                 if (new_host_lock != scsi_debug_host_lock) {
4577                         unsigned long iflags;
4578                         int k;
4579
4580                         spin_lock_irqsave(&queued_arr_lock, iflags);
4581                         k = find_first_bit(queued_in_use_bm,
4582                                            scsi_debug_max_queue);
4583                         if (k != scsi_debug_max_queue)
4584                                 res = -EBUSY;   /* have queued commands */
4585                         else
4586                                 scsi_debug_host_lock = new_host_lock;
4587                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4588                 }
4589                 return res;
4590         }
4591         return -EINVAL;
4592 }
4593 static DRIVER_ATTR_RW(host_lock);
4594
4595 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4596 {
4597         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4598 }
4599 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4600                             size_t count)
4601 {
4602         int n;
4603
4604         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4605                 scsi_debug_strict = (n > 0);
4606                 return count;
4607         }
4608         return -EINVAL;
4609 }
4610 static DRIVER_ATTR_RW(strict);
4611
4612
4613 /* Note: The following array creates attribute files in the
4614    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4615    files (over those found in the /sys/module/scsi_debug/parameters
4616    directory) is that auxiliary actions can be triggered when an attribute
4617    is changed. For example see: sdebug_add_host_store() above.
4618  */
4619
4620 static struct attribute *sdebug_drv_attrs[] = {
4621         &driver_attr_delay.attr,
4622         &driver_attr_opts.attr,
4623         &driver_attr_ptype.attr,
4624         &driver_attr_dsense.attr,
4625         &driver_attr_fake_rw.attr,
4626         &driver_attr_no_lun_0.attr,
4627         &driver_attr_num_tgts.attr,
4628         &driver_attr_dev_size_mb.attr,
4629         &driver_attr_num_parts.attr,
4630         &driver_attr_every_nth.attr,
4631         &driver_attr_max_luns.attr,
4632         &driver_attr_max_queue.attr,
4633         &driver_attr_no_uld.attr,
4634         &driver_attr_scsi_level.attr,
4635         &driver_attr_virtual_gb.attr,
4636         &driver_attr_add_host.attr,
4637         &driver_attr_vpd_use_hostno.attr,
4638         &driver_attr_sector_size.attr,
4639         &driver_attr_dix.attr,
4640         &driver_attr_dif.attr,
4641         &driver_attr_guard.attr,
4642         &driver_attr_ato.attr,
4643         &driver_attr_map.attr,
4644         &driver_attr_removable.attr,
4645         &driver_attr_host_lock.attr,
4646         &driver_attr_ndelay.attr,
4647         &driver_attr_strict.attr,
4648         NULL,
4649 };
4650 ATTRIBUTE_GROUPS(sdebug_drv);
4651
4652 static struct device *pseudo_primary;
4653
4654 static int __init scsi_debug_init(void)
4655 {
4656         unsigned long sz;
4657         int host_to_add;
4658         int k;
4659         int ret;
4660
4661         atomic_set(&sdebug_cmnd_count, 0);
4662         atomic_set(&sdebug_completions, 0);
4663         atomic_set(&retired_max_queue, 0);
4664
4665         if (scsi_debug_ndelay >= 1000000000) {
4666                 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
4667                         __func__);
4668                 scsi_debug_ndelay = 0;
4669         } else if (scsi_debug_ndelay > 0)
4670                 scsi_debug_delay = DELAY_OVERRIDDEN;
4671
4672         switch (scsi_debug_sector_size) {
4673         case  512:
4674         case 1024:
4675         case 2048:
4676         case 4096:
4677                 break;
4678         default:
4679                 pr_err("%s: invalid sector_size %d\n", __func__,
4680                        scsi_debug_sector_size);
4681                 return -EINVAL;
4682         }
4683
4684         switch (scsi_debug_dif) {
4685
4686         case SD_DIF_TYPE0_PROTECTION:
4687         case SD_DIF_TYPE1_PROTECTION:
4688         case SD_DIF_TYPE2_PROTECTION:
4689         case SD_DIF_TYPE3_PROTECTION:
4690                 break;
4691
4692         default:
4693                 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
4694                 return -EINVAL;
4695         }
4696
4697         if (scsi_debug_guard > 1) {
4698                 pr_err("%s: guard must be 0 or 1\n", __func__);
4699                 return -EINVAL;
4700         }
4701
4702         if (scsi_debug_ato > 1) {
4703                 pr_err("%s: ato must be 0 or 1\n", __func__);
4704                 return -EINVAL;
4705         }
4706
4707         if (scsi_debug_physblk_exp > 15) {
4708                 pr_err("%s: invalid physblk_exp %u\n", __func__,
4709                        scsi_debug_physblk_exp);
4710                 return -EINVAL;
4711         }
4712
4713         if (scsi_debug_lowest_aligned > 0x3fff) {
4714                 pr_err("%s: lowest_aligned too big: %u\n", __func__,
4715                        scsi_debug_lowest_aligned);
4716                 return -EINVAL;
4717         }
4718
4719         if (scsi_debug_dev_size_mb < 1)
4720                 scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4721         sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4722         sdebug_store_sectors = sz / scsi_debug_sector_size;
4723         sdebug_capacity = get_sdebug_capacity();
4724
4725         /* play around with geometry, don't waste too much on track 0 */
4726         sdebug_heads = 8;
4727         sdebug_sectors_per = 32;
4728         if (scsi_debug_dev_size_mb >= 16)
4729                 sdebug_heads = 32;
4730         else if (scsi_debug_dev_size_mb >= 256)
4731                 sdebug_heads = 64;
4732         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4733                                (sdebug_sectors_per * sdebug_heads);
4734         if (sdebug_cylinders_per >= 1024) {
4735                 /* other LLDs do this; implies >= 1GB ram disk ... */
4736                 sdebug_heads = 255;
4737                 sdebug_sectors_per = 63;
4738                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4739                                (sdebug_sectors_per * sdebug_heads);
4740         }
4741
4742         if (0 == scsi_debug_fake_rw) {
4743                 fake_storep = vmalloc(sz);
4744                 if (NULL == fake_storep) {
4745                         pr_err("%s: out of memory, 1\n", __func__);
4746                         return -ENOMEM;
4747                 }
4748                 memset(fake_storep, 0, sz);
4749                 if (scsi_debug_num_parts > 0)
4750                         sdebug_build_parts(fake_storep, sz);
4751         }
4752
4753         if (scsi_debug_dix) {
4754                 int dif_size;
4755
4756                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4757                 dif_storep = vmalloc(dif_size);
4758
4759                 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
4760                         dif_storep);
4761
4762                 if (dif_storep == NULL) {
4763                         pr_err("%s: out of mem. (DIX)\n", __func__);
4764                         ret = -ENOMEM;
4765                         goto free_vm;
4766                 }
4767
4768                 memset(dif_storep, 0xff, dif_size);
4769         }
4770
4771         /* Logical Block Provisioning */
4772         if (scsi_debug_lbp()) {
4773                 scsi_debug_unmap_max_blocks =
4774                         clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4775
4776                 scsi_debug_unmap_max_desc =
4777                         clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4778
4779                 scsi_debug_unmap_granularity =
4780                         clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4781
4782                 if (scsi_debug_unmap_alignment &&
4783                     scsi_debug_unmap_granularity <=
4784                     scsi_debug_unmap_alignment) {
4785                         pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
4786                                __func__);
4787                         return -EINVAL;
4788                 }
4789
4790                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4791                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4792
4793                 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
4794
4795                 if (map_storep == NULL) {
4796                         pr_err("%s: out of mem. (MAP)\n", __func__);
4797                         ret = -ENOMEM;
4798                         goto free_vm;
4799                 }
4800
4801                 bitmap_zero(map_storep, map_size);
4802
4803                 /* Map first 1KB for partition table */
4804                 if (scsi_debug_num_parts)
4805                         map_region(0, 2);
4806         }
4807
4808         pseudo_primary = root_device_register("pseudo_0");
4809         if (IS_ERR(pseudo_primary)) {
4810                 pr_warn("%s: root_device_register() error\n", __func__);
4811                 ret = PTR_ERR(pseudo_primary);
4812                 goto free_vm;
4813         }
4814         ret = bus_register(&pseudo_lld_bus);
4815         if (ret < 0) {
4816                 pr_warn("%s: bus_register error: %d\n", __func__, ret);
4817                 goto dev_unreg;
4818         }
4819         ret = driver_register(&sdebug_driverfs_driver);
4820         if (ret < 0) {
4821                 pr_warn("%s: driver_register error: %d\n", __func__, ret);
4822                 goto bus_unreg;
4823         }
4824
4825         host_to_add = scsi_debug_add_host;
4826         scsi_debug_add_host = 0;
4827
4828         for (k = 0; k < host_to_add; k++) {
4829                 if (sdebug_add_adapter()) {
4830                         pr_err("%s: sdebug_add_adapter failed k=%d\n",
4831                                 __func__, k);
4832                         break;
4833                 }
4834         }
4835
4836         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4837                 pr_info("%s: built %d host(s)\n", __func__,
4838                         scsi_debug_add_host);
4839         }
4840         return 0;
4841
4842 bus_unreg:
4843         bus_unregister(&pseudo_lld_bus);
4844 dev_unreg:
4845         root_device_unregister(pseudo_primary);
4846 free_vm:
4847         if (map_storep)
4848                 vfree(map_storep);
4849         if (dif_storep)
4850                 vfree(dif_storep);
4851         vfree(fake_storep);
4852
4853         return ret;
4854 }
4855
4856 static void __exit scsi_debug_exit(void)
4857 {
4858         int k = scsi_debug_add_host;
4859
4860         stop_all_queued();
4861         free_all_queued();
4862         for (; k; k--)
4863                 sdebug_remove_adapter();
4864         driver_unregister(&sdebug_driverfs_driver);
4865         bus_unregister(&pseudo_lld_bus);
4866         root_device_unregister(pseudo_primary);
4867
4868         if (dif_storep)
4869                 vfree(dif_storep);
4870
4871         vfree(fake_storep);
4872 }
4873
4874 device_initcall(scsi_debug_init);
4875 module_exit(scsi_debug_exit);
4876
4877 static void sdebug_release_adapter(struct device * dev)
4878 {
4879         struct sdebug_host_info *sdbg_host;
4880
4881         sdbg_host = to_sdebug_host(dev);
4882         kfree(sdbg_host);
4883 }
4884
4885 static int sdebug_add_adapter(void)
4886 {
4887         int k, devs_per_host;
4888         int error = 0;
4889         struct sdebug_host_info *sdbg_host;
4890         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4891
4892         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4893         if (NULL == sdbg_host) {
4894                 printk(KERN_ERR "%s: out of memory at line %d\n",
4895                        __func__, __LINE__);
4896                 return -ENOMEM;
4897         }
4898
4899         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4900
4901         devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4902         for (k = 0; k < devs_per_host; k++) {
4903                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4904                 if (!sdbg_devinfo) {
4905                         printk(KERN_ERR "%s: out of memory at line %d\n",
4906                                __func__, __LINE__);
4907                         error = -ENOMEM;
4908                         goto clean;
4909                 }
4910         }
4911
4912         spin_lock(&sdebug_host_list_lock);
4913         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4914         spin_unlock(&sdebug_host_list_lock);
4915
4916         sdbg_host->dev.bus = &pseudo_lld_bus;
4917         sdbg_host->dev.parent = pseudo_primary;
4918         sdbg_host->dev.release = &sdebug_release_adapter;
4919         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4920
4921         error = device_register(&sdbg_host->dev);
4922
4923         if (error)
4924                 goto clean;
4925
4926         ++scsi_debug_add_host;
4927         return error;
4928
4929 clean:
4930         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4931                                  dev_list) {
4932                 list_del(&sdbg_devinfo->dev_list);
4933                 kfree(sdbg_devinfo);
4934         }
4935
4936         kfree(sdbg_host);
4937         return error;
4938 }
4939
4940 static void sdebug_remove_adapter(void)
4941 {
4942         struct sdebug_host_info * sdbg_host = NULL;
4943
4944         spin_lock(&sdebug_host_list_lock);
4945         if (!list_empty(&sdebug_host_list)) {
4946                 sdbg_host = list_entry(sdebug_host_list.prev,
4947                                        struct sdebug_host_info, host_list);
4948                 list_del(&sdbg_host->host_list);
4949         }
4950         spin_unlock(&sdebug_host_list_lock);
4951
4952         if (!sdbg_host)
4953                 return;
4954
4955         device_unregister(&sdbg_host->dev);
4956         --scsi_debug_add_host;
4957 }
4958
4959 static int
4960 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4961 {
4962         int num_in_q = 0;
4963         unsigned long iflags;
4964         struct sdebug_dev_info *devip;
4965
4966         spin_lock_irqsave(&queued_arr_lock, iflags);
4967         devip = (struct sdebug_dev_info *)sdev->hostdata;
4968         if (NULL == devip) {
4969                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4970                 return  -ENODEV;
4971         }
4972         num_in_q = atomic_read(&devip->num_in_q);
4973         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4974
4975         if (qdepth < 1)
4976                 qdepth = 1;
4977         /* allow to exceed max host queued_arr elements for testing */
4978         if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4979                 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4980         scsi_change_queue_depth(sdev, qdepth);
4981
4982         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4983                 sdev_printk(KERN_INFO, sdev,
4984                             "%s: qdepth=%d, num_in_q=%d\n",
4985                             __func__, qdepth, num_in_q);
4986         }
4987         return sdev->queue_depth;
4988 }
4989
4990 static int
4991 sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4992 {
4993         qtype = scsi_change_queue_type(sdev, qtype);
4994         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4995                 const char *cp;
4996
4997                 switch (qtype) {
4998                 case 0:
4999                         cp = "untagged";
5000                         break;
5001                 case MSG_SIMPLE_TAG:
5002                         cp = "simple tags";
5003                         break;
5004                 case MSG_ORDERED_TAG:
5005                         cp = "ordered tags";
5006                         break;
5007                 default:
5008                         cp = "unknown";
5009                         break;
5010                 }
5011                 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
5012         }
5013         return qtype;
5014 }
5015
5016 static int
5017 check_inject(struct scsi_cmnd *scp)
5018 {
5019         struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5020
5021         memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5022
5023         if (atomic_inc_return(&sdebug_cmnd_count) >=
5024             abs(scsi_debug_every_nth)) {
5025                 atomic_set(&sdebug_cmnd_count, 0);
5026                 if (scsi_debug_every_nth < -1)
5027                         scsi_debug_every_nth = -1;
5028                 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5029                         return 1; /* ignore command causing timeout */
5030                 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5031                          scsi_medium_access_command(scp))
5032                         return 1; /* time out reads and writes */
5033                 if (sdebug_any_injecting_opt) {
5034                         int opts = scsi_debug_opts;
5035
5036                         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5037                                 ep->inj_recovered = true;
5038                         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5039                                 ep->inj_transport = true;
5040                         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5041                                 ep->inj_dif = true;
5042                         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5043                                 ep->inj_dix = true;
5044                         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5045                                 ep->inj_short = true;
5046                 }
5047         }
5048         return 0;
5049 }
5050
5051 static int
5052 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5053 {
5054         u8 sdeb_i;
5055         struct scsi_device *sdp = scp->device;
5056         const struct opcode_info_t *oip;
5057         const struct opcode_info_t *r_oip;
5058         struct sdebug_dev_info *devip;
5059         u8 *cmd = scp->cmnd;
5060         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5061         int k, na;
5062         int errsts = 0;
5063         int errsts_no_connect = DID_NO_CONNECT << 16;
5064         u32 flags;
5065         u16 sa;
5066         u8 opcode = cmd[0];
5067         bool has_wlun_rl;
5068         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5069
5070         scsi_set_resid(scp, 0);
5071         if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5072                 char b[120];
5073                 int n, len, sb;
5074
5075                 len = scp->cmd_len;
5076                 sb = (int)sizeof(b);
5077                 if (len > 32)
5078                         strcpy(b, "too long, over 32 bytes");
5079                 else {
5080                         for (k = 0, n = 0; k < len && n < sb; ++k)
5081                                 n += scnprintf(b + n, sb - n, "%02x ",
5082                                                (u32)cmd[k]);
5083                 }
5084                 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5085         }
5086         has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5087         if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5088                 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5089
5090         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5091         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5092         devip = (struct sdebug_dev_info *)sdp->hostdata;
5093         if (!devip) {
5094                 devip = devInfoReg(sdp);
5095                 if (NULL == devip)
5096                         return schedule_resp(scp, NULL, errsts_no_connect, 0);
5097         }
5098         na = oip->num_attached;
5099         r_pfp = oip->pfp;
5100         if (na) {       /* multiple commands with this opcode */
5101                 r_oip = oip;
5102                 if (FF_SA & r_oip->flags) {
5103                         if (F_SA_LOW & oip->flags)
5104                                 sa = 0x1f & cmd[1];
5105                         else
5106                                 sa = get_unaligned_be16(cmd + 8);
5107                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5108                                 if (opcode == oip->opcode && sa == oip->sa)
5109                                         break;
5110                         }
5111                 } else {   /* since no service action only check opcode */
5112                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5113                                 if (opcode == oip->opcode)
5114                                         break;
5115                         }
5116                 }
5117                 if (k > na) {
5118                         if (F_SA_LOW & r_oip->flags)
5119                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5120                         else if (F_SA_HIGH & r_oip->flags)
5121                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5122                         else
5123                                 mk_sense_invalid_opcode(scp);
5124                         goto check_cond;
5125                 }
5126         }       /* else (when na==0) we assume the oip is a match */
5127         flags = oip->flags;
5128         if (F_INV_OP & flags) {
5129                 mk_sense_invalid_opcode(scp);
5130                 goto check_cond;
5131         }
5132         if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5133                 if (debug)
5134                         sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5135                                     "0x%x not supported for wlun\n", opcode);
5136                 mk_sense_invalid_opcode(scp);
5137                 goto check_cond;
5138         }
5139         if (scsi_debug_strict) {        /* check cdb against mask */
5140                 u8 rem;
5141                 int j;
5142
5143                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5144                         rem = ~oip->len_mask[k] & cmd[k];
5145                         if (rem) {
5146                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5147                                         if (0x80 & rem)
5148                                                 break;
5149                                 }
5150                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5151                                 goto check_cond;
5152                         }
5153                 }
5154         }
5155         if (!(F_SKIP_UA & flags) &&
5156             SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5157                 errsts = check_readiness(scp, UAS_ONLY, devip);
5158                 if (errsts)
5159                         goto check_cond;
5160         }
5161         if ((F_M_ACCESS & flags) && devip->stopped) {
5162                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5163                 if (debug)
5164                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5165                                     "%s\n", my_name, "initializing command "
5166                                     "required");
5167                 errsts = check_condition_result;
5168                 goto fini;
5169         }
5170         if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5171                 goto fini;
5172         if (scsi_debug_every_nth) {
5173                 if (check_inject(scp))
5174                         return 0;       /* ignore command: make trouble */
5175         }
5176         if (oip->pfp)   /* if this command has a resp_* function, call it */
5177                 errsts = oip->pfp(scp, devip);
5178         else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5179                 errsts = r_pfp(scp, devip);
5180
5181 fini:
5182         return schedule_resp(scp, devip, errsts,
5183                              ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5184 check_cond:
5185         return schedule_resp(scp, devip, check_condition_result, 0);
5186 }
5187
5188 static int
5189 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5190 {
5191         if (scsi_debug_host_lock) {
5192                 unsigned long iflags;
5193                 int rc;
5194
5195                 spin_lock_irqsave(shost->host_lock, iflags);
5196                 rc = scsi_debug_queuecommand(cmd);
5197                 spin_unlock_irqrestore(shost->host_lock, iflags);
5198                 return rc;
5199         } else
5200                 return scsi_debug_queuecommand(cmd);
5201 }
5202
5203 static struct scsi_host_template sdebug_driver_template = {
5204         .show_info =            scsi_debug_show_info,
5205         .write_info =           scsi_debug_write_info,
5206         .proc_name =            sdebug_proc_name,
5207         .name =                 "SCSI DEBUG",
5208         .info =                 scsi_debug_info,
5209         .slave_alloc =          scsi_debug_slave_alloc,
5210         .slave_configure =      scsi_debug_slave_configure,
5211         .slave_destroy =        scsi_debug_slave_destroy,
5212         .ioctl =                scsi_debug_ioctl,
5213         .queuecommand =         sdebug_queuecommand_lock_or_not,
5214         .change_queue_depth =   sdebug_change_qdepth,
5215         .change_queue_type =    sdebug_change_qtype,
5216         .eh_abort_handler =     scsi_debug_abort,
5217         .eh_device_reset_handler = scsi_debug_device_reset,
5218         .eh_target_reset_handler = scsi_debug_target_reset,
5219         .eh_bus_reset_handler = scsi_debug_bus_reset,
5220         .eh_host_reset_handler = scsi_debug_host_reset,
5221         .can_queue =            SCSI_DEBUG_CANQUEUE,
5222         .this_id =              7,
5223         .sg_tablesize =         SCSI_MAX_SG_CHAIN_SEGMENTS,
5224         .cmd_per_lun =          DEF_CMD_PER_LUN,
5225         .max_sectors =          -1U,
5226         .use_clustering =       DISABLE_CLUSTERING,
5227         .module =               THIS_MODULE,
5228         .track_queue_depth =    1,
5229         .cmd_size =             sizeof(struct sdebug_scmd_extra_t),
5230 };
5231
5232 static int sdebug_driver_probe(struct device * dev)
5233 {
5234         int error = 0;
5235         int opts;
5236         struct sdebug_host_info *sdbg_host;
5237         struct Scsi_Host *hpnt;
5238         int host_prot;
5239
5240         sdbg_host = to_sdebug_host(dev);
5241
5242         sdebug_driver_template.can_queue = scsi_debug_max_queue;
5243         if (scsi_debug_clustering)
5244                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5245         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5246         if (NULL == hpnt) {
5247                 pr_err("%s: scsi_host_alloc failed\n", __func__);
5248                 error = -ENODEV;
5249                 return error;
5250         }
5251
5252         sdbg_host->shost = hpnt;
5253         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5254         if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5255                 hpnt->max_id = scsi_debug_num_tgts + 1;
5256         else
5257                 hpnt->max_id = scsi_debug_num_tgts;
5258         hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;  /* = scsi_debug_max_luns; */
5259
5260         host_prot = 0;
5261
5262         switch (scsi_debug_dif) {
5263
5264         case SD_DIF_TYPE1_PROTECTION:
5265                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5266                 if (scsi_debug_dix)
5267                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5268                 break;
5269
5270         case SD_DIF_TYPE2_PROTECTION:
5271                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5272                 if (scsi_debug_dix)
5273                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5274                 break;
5275
5276         case SD_DIF_TYPE3_PROTECTION:
5277                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5278                 if (scsi_debug_dix)
5279                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5280                 break;
5281
5282         default:
5283                 if (scsi_debug_dix)
5284                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5285                 break;
5286         }
5287
5288         scsi_host_set_prot(hpnt, host_prot);
5289
5290         printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
5291                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5292                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5293                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5294                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5295                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5296                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5297                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5298
5299         if (scsi_debug_guard == 1)
5300                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5301         else
5302                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5303
5304         opts = scsi_debug_opts;
5305         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5306                 sdebug_any_injecting_opt = true;
5307         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5308                 sdebug_any_injecting_opt = true;
5309         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5310                 sdebug_any_injecting_opt = true;
5311         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5312                 sdebug_any_injecting_opt = true;
5313         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5314                 sdebug_any_injecting_opt = true;
5315
5316         error = scsi_add_host(hpnt, &sdbg_host->dev);
5317         if (error) {
5318                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
5319                 error = -ENODEV;
5320                 scsi_host_put(hpnt);
5321         } else
5322                 scsi_scan_host(hpnt);
5323
5324         return error;
5325 }
5326
5327 static int sdebug_driver_remove(struct device * dev)
5328 {
5329         struct sdebug_host_info *sdbg_host;
5330         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5331
5332         sdbg_host = to_sdebug_host(dev);
5333
5334         if (!sdbg_host) {
5335                 printk(KERN_ERR "%s: Unable to locate host info\n",
5336                        __func__);
5337                 return -ENODEV;
5338         }
5339
5340         scsi_remove_host(sdbg_host->shost);
5341
5342         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5343                                  dev_list) {
5344                 list_del(&sdbg_devinfo->dev_list);
5345                 kfree(sdbg_devinfo);
5346         }
5347
5348         scsi_host_put(sdbg_host->shost);
5349         return 0;
5350 }
5351
5352 static int pseudo_lld_bus_match(struct device *dev,
5353                                 struct device_driver *dev_driver)
5354 {
5355         return 1;
5356 }
5357
5358 static struct bus_type pseudo_lld_bus = {
5359         .name = "pseudo",
5360         .match = pseudo_lld_bus_match,
5361         .probe = sdebug_driver_probe,
5362         .remove = sdebug_driver_remove,
5363         .drv_groups = sdebug_drv_groups,
5364 };