Merge branch 'akpm' (patches from Andrew)
[sfrench/cifs-2.6.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  * Copyright (C) 2001 - 2016 Douglas Gilbert
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
17  *
18  */
19
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
22
23 #include <linux/module.h>
24
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
46
47 #include <net/checksum.h>
48
49 #include <asm/unaligned.h>
50
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
59
60 #include "sd.h"
61 #include "scsi_logging.h"
62
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "1.86"
65 static const char *sdebug_version_date = "20160430";
66
67 #define MY_NAME "scsi_debug"
68
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96
97 /* Additional Sense Code Qualifier (ASCQ) */
98 #define ACK_NAK_TO 0x3
99
100 /* Default values for driver parameters */
101 #define DEF_NUM_HOST   1
102 #define DEF_NUM_TGTS   1
103 #define DEF_MAX_LUNS   1
104 /* With these defaults, this driver will make 1 host with 1 target
105  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
106  */
107 #define DEF_ATO 1
108 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
109 #define DEF_DEV_SIZE_MB   8
110 #define DEF_DIF 0
111 #define DEF_DIX 0
112 #define DEF_D_SENSE   0
113 #define DEF_EVERY_NTH   0
114 #define DEF_FAKE_RW     0
115 #define DEF_GUARD 0
116 #define DEF_HOST_LOCK 0
117 #define DEF_LBPU 0
118 #define DEF_LBPWS 0
119 #define DEF_LBPWS10 0
120 #define DEF_LBPRZ 1
121 #define DEF_LOWEST_ALIGNED 0
122 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
123 #define DEF_NO_LUN_0   0
124 #define DEF_NUM_PARTS   0
125 #define DEF_OPTS   0
126 #define DEF_OPT_BLKS 1024
127 #define DEF_PHYSBLK_EXP 0
128 #define DEF_OPT_XFERLEN_EXP 0
129 #define DEF_PTYPE   TYPE_DISK
130 #define DEF_REMOVABLE false
131 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
132 #define DEF_SECTOR_SIZE 512
133 #define DEF_UNMAP_ALIGNMENT 0
134 #define DEF_UNMAP_GRANULARITY 1
135 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
136 #define DEF_UNMAP_MAX_DESC 256
137 #define DEF_VIRTUAL_GB   0
138 #define DEF_VPD_USE_HOSTNO 1
139 #define DEF_WRITESAME_LENGTH 0xFFFF
140 #define DEF_STRICT 0
141 #define DEF_STATISTICS false
142 #define DEF_SUBMIT_QUEUES 1
143 #define DEF_UUID_CTL 0
144 #define JDELAY_OVERRIDDEN -9999
145
146 #define SDEBUG_LUN_0_VAL 0
147
148 /* bit mask values for sdebug_opts */
149 #define SDEBUG_OPT_NOISE                1
150 #define SDEBUG_OPT_MEDIUM_ERR           2
151 #define SDEBUG_OPT_TIMEOUT              4
152 #define SDEBUG_OPT_RECOVERED_ERR        8
153 #define SDEBUG_OPT_TRANSPORT_ERR        16
154 #define SDEBUG_OPT_DIF_ERR              32
155 #define SDEBUG_OPT_DIX_ERR              64
156 #define SDEBUG_OPT_MAC_TIMEOUT          128
157 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
158 #define SDEBUG_OPT_Q_NOISE              0x200
159 #define SDEBUG_OPT_ALL_TSF              0x400
160 #define SDEBUG_OPT_RARE_TSF             0x800
161 #define SDEBUG_OPT_N_WCE                0x1000
162 #define SDEBUG_OPT_RESET_NOISE          0x2000
163 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
164 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
165                               SDEBUG_OPT_RESET_NOISE)
166 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
167                                   SDEBUG_OPT_TRANSPORT_ERR | \
168                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
169                                   SDEBUG_OPT_SHORT_TRANSFER)
170 /* When "every_nth" > 0 then modulo "every_nth" commands:
171  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
172  *   - a RECOVERED_ERROR is simulated on successful read and write
173  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
174  *   - a TRANSPORT_ERROR is simulated on successful read and write
175  *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
176  *
177  * When "every_nth" < 0 then after "- every_nth" commands:
178  *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
179  *   - a RECOVERED_ERROR is simulated on successful read and write
180  *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
181  *   - a TRANSPORT_ERROR is simulated on successful read and write
182  *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
183  * This will continue on every subsequent command until some other action
184  * occurs (e.g. the user * writing a new value (other than -1 or 1) to
185  * every_nth via sysfs).
186  */
187
188 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
189  * priority order. In the subset implemented here lower numbers have higher
190  * priority. The UA numbers should be a sequence starting from 0 with
191  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
192 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
193 #define SDEBUG_UA_BUS_RESET 1
194 #define SDEBUG_UA_MODE_CHANGED 2
195 #define SDEBUG_UA_CAPACITY_CHANGED 3
196 #define SDEBUG_UA_LUNS_CHANGED 4
197 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
198 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
199 #define SDEBUG_NUM_UAS 7
200
201 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
202  * sector on read commands: */
203 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
204 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
205
206 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
207  * or "peripheral device" addressing (value 0) */
208 #define SAM2_LUN_ADDRESS_METHOD 0
209
210 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
211  * (for response) per submit queue at one time. Can be reduced by max_queue
212  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
213  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
214  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
215  * but cannot exceed SDEBUG_CANQUEUE .
216  */
217 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
218 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
219 #define DEF_CMD_PER_LUN  255
220
221 #define F_D_IN                  1
222 #define F_D_OUT                 2
223 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
224 #define F_D_UNKN                8
225 #define F_RL_WLUN_OK            0x10
226 #define F_SKIP_UA               0x20
227 #define F_DELAY_OVERR           0x40
228 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
229 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
230 #define F_INV_OP                0x200
231 #define F_FAKE_RW               0x400
232 #define F_M_ACCESS              0x800   /* media access */
233
234 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
235 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
236 #define FF_SA (F_SA_HIGH | F_SA_LOW)
237
238 #define SDEBUG_MAX_PARTS 4
239
240 #define SDEBUG_MAX_CMD_LEN 32
241
242
243 struct sdebug_dev_info {
244         struct list_head dev_list;
245         unsigned int channel;
246         unsigned int target;
247         u64 lun;
248         uuid_t lu_name;
249         struct sdebug_host_info *sdbg_host;
250         unsigned long uas_bm[1];
251         atomic_t num_in_q;
252         atomic_t stopped;
253         bool used;
254 };
255
256 struct sdebug_host_info {
257         struct list_head host_list;
258         struct Scsi_Host *shost;
259         struct device dev;
260         struct list_head dev_info_list;
261 };
262
263 #define to_sdebug_host(d)       \
264         container_of(d, struct sdebug_host_info, dev)
265
266 struct sdebug_defer {
267         struct hrtimer hrt;
268         struct execute_work ew;
269         int sqa_idx;    /* index of sdebug_queue array */
270         int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
271         int issuing_cpu;
272 };
273
274 struct sdebug_queued_cmd {
275         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
276          * instance indicates this slot is in use.
277          */
278         struct sdebug_defer *sd_dp;
279         struct scsi_cmnd *a_cmnd;
280         unsigned int inj_recovered:1;
281         unsigned int inj_transport:1;
282         unsigned int inj_dif:1;
283         unsigned int inj_dix:1;
284         unsigned int inj_short:1;
285 };
286
287 struct sdebug_queue {
288         struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
289         unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
290         spinlock_t qc_lock;
291         atomic_t blocked;       /* to temporarily stop more being queued */
292 };
293
294 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
295 static atomic_t sdebug_completions;  /* count of deferred completions */
296 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
297 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
298
299 struct opcode_info_t {
300         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
301                                 /* for terminating element */
302         u8 opcode;              /* if num_attached > 0, preferred */
303         u16 sa;                 /* service action */
304         u32 flags;              /* OR-ed set of SDEB_F_* */
305         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
306         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
307         u8 len_mask[16];        /* len=len_mask[0], then mask for cdb[1]... */
308                                 /* ignore cdb bytes after position 15 */
309 };
310
311 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
312 enum sdeb_opcode_index {
313         SDEB_I_INVALID_OPCODE = 0,
314         SDEB_I_INQUIRY = 1,
315         SDEB_I_REPORT_LUNS = 2,
316         SDEB_I_REQUEST_SENSE = 3,
317         SDEB_I_TEST_UNIT_READY = 4,
318         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
319         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
320         SDEB_I_LOG_SENSE = 7,
321         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
322         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
323         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
324         SDEB_I_START_STOP = 11,
325         SDEB_I_SERV_ACT_IN = 12,        /* 12, 16 */
326         SDEB_I_SERV_ACT_OUT = 13,       /* 12, 16 */
327         SDEB_I_MAINT_IN = 14,
328         SDEB_I_MAINT_OUT = 15,
329         SDEB_I_VERIFY = 16,             /* 10 only */
330         SDEB_I_VARIABLE_LEN = 17,
331         SDEB_I_RESERVE = 18,            /* 6, 10 */
332         SDEB_I_RELEASE = 19,            /* 6, 10 */
333         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
334         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
335         SDEB_I_ATA_PT = 22,             /* 12, 16 */
336         SDEB_I_SEND_DIAG = 23,
337         SDEB_I_UNMAP = 24,
338         SDEB_I_XDWRITEREAD = 25,        /* 10 only */
339         SDEB_I_WRITE_BUFFER = 26,
340         SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
341         SDEB_I_SYNC_CACHE = 28,         /* 10 only */
342         SDEB_I_COMP_WRITE = 29,
343         SDEB_I_LAST_ELEMENT = 30,       /* keep this last */
344 };
345
346
347 static const unsigned char opcode_ind_arr[256] = {
348 /* 0x0; 0x0->0x1f: 6 byte cdbs */
349         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
350             0, 0, 0, 0,
351         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
352         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
353             SDEB_I_RELEASE,
354         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
355             SDEB_I_ALLOW_REMOVAL, 0,
356 /* 0x20; 0x20->0x3f: 10 byte cdbs */
357         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
358         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
359         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
360         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
361 /* 0x40; 0x40->0x5f: 10 byte cdbs */
362         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
363         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
364         0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
365             SDEB_I_RELEASE,
366         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
367 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
368         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
369         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
370         0, SDEB_I_VARIABLE_LEN,
371 /* 0x80; 0x80->0x9f: 16 byte cdbs */
372         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
373         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
374         0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
375         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
376 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
377         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
378              SDEB_I_MAINT_OUT, 0, 0, 0,
379         SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
380              0, 0, 0, 0,
381         0, 0, 0, 0, 0, 0, 0, 0,
382         0, 0, 0, 0, 0, 0, 0, 0,
383 /* 0xc0; 0xc0->0xff: vendor specific */
384         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
387         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
388 };
389
390 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
391 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
392 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
393 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
394 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
395 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
396 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
397 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
398 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
399 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
400 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
401 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
409 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
410 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
411
412 static const struct opcode_info_t msense_iarr[1] = {
413         {0, 0x1a, 0, F_D_IN, NULL, NULL,
414             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
415 };
416
417 static const struct opcode_info_t mselect_iarr[1] = {
418         {0, 0x15, 0, F_D_OUT, NULL, NULL,
419             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
420 };
421
422 static const struct opcode_info_t read_iarr[3] = {
423         {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
424             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
425              0, 0, 0, 0} },
426         {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
427             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428         {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
429             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
430              0xc7, 0, 0, 0, 0} },
431 };
432
433 static const struct opcode_info_t write_iarr[3] = {
434         {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
435             {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
436              0, 0, 0, 0} },
437         {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
438             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
439         {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
440             {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
441              0xc7, 0, 0, 0, 0} },
442 };
443
444 static const struct opcode_info_t sa_in_iarr[1] = {
445         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
446             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
447              0xff, 0xff, 0xff, 0, 0xc7} },
448 };
449
450 static const struct opcode_info_t vl_iarr[1] = {        /* VARIABLE LENGTH */
451         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
452             NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
453                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
454 };
455
456 static const struct opcode_info_t maint_in_iarr[2] = {
457         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
458             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
459              0xc7, 0, 0, 0, 0} },
460         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
461             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
462              0, 0} },
463 };
464
465 static const struct opcode_info_t write_same_iarr[1] = {
466         {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
467             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
468              0xff, 0xff, 0xff, 0x1f, 0xc7} },
469 };
470
471 static const struct opcode_info_t reserve_iarr[1] = {
472         {0, 0x16, 0, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
473             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474 };
475
476 static const struct opcode_info_t release_iarr[1] = {
477         {0, 0x17, 0, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
478             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
479 };
480
481
482 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
483  * plus the terminating elements for logic that scans this table such as
484  * REPORT SUPPORTED OPERATION CODES. */
485 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
486 /* 0 */
487         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
488             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
489         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
490             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
491         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
492             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
493              0, 0} },
494         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
495             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
496         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
497             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
498         {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
499             {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
500              0} },
501         {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
502             {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
503         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
504             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
505              0, 0, 0} },
506         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
507             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
508              0, 0} },
509         {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
510             {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
511              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* READ(16) */
512 /* 10 */
513         {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
514             {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
515              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* WRITE(16) */
516         {0, 0x1b, 0, 0, resp_start_stop, NULL,          /* START STOP UNIT */
517             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518         {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
519             {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
520              0xff, 0xff, 0xff, 0x1, 0xc7} },    /* READ CAPACITY(16) */
521         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
522             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
523         {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
524             {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
525              0} },
526         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
527             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
528         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
529             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
530              0, 0, 0, 0, 0, 0} },
531         {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
532             vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
533                       0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
534         {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
535             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
536              0} },
537         {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
538             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
539              0} },
540 /* 20 */
541         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
542             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
543         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
544             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
545         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
546             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
547         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
548             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
549         {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
550             {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
551         {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
552             NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
553                    0, 0, 0, 0, 0, 0} },
554         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
555             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
556              0, 0, 0, 0} },                     /* WRITE_BUFFER */
557         {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
558             write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
559                               0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
560         {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
561             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
562              0, 0, 0, 0} },
563         {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
564             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
565              0, 0xff, 0x1f, 0xc7} },            /* COMPARE AND WRITE */
566
567 /* 30 */
568         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
569             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
570 };
571
572 static int sdebug_add_host = DEF_NUM_HOST;
573 static int sdebug_ato = DEF_ATO;
574 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
575 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
576 static int sdebug_dif = DEF_DIF;
577 static int sdebug_dix = DEF_DIX;
578 static int sdebug_dsense = DEF_D_SENSE;
579 static int sdebug_every_nth = DEF_EVERY_NTH;
580 static int sdebug_fake_rw = DEF_FAKE_RW;
581 static unsigned int sdebug_guard = DEF_GUARD;
582 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
583 static int sdebug_max_luns = DEF_MAX_LUNS;
584 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
585 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
586 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
587 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
588 static int sdebug_no_uld;
589 static int sdebug_num_parts = DEF_NUM_PARTS;
590 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
591 static int sdebug_opt_blks = DEF_OPT_BLKS;
592 static int sdebug_opts = DEF_OPTS;
593 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
594 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
595 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
596 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
597 static int sdebug_sector_size = DEF_SECTOR_SIZE;
598 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
599 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
600 static unsigned int sdebug_lbpu = DEF_LBPU;
601 static unsigned int sdebug_lbpws = DEF_LBPWS;
602 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
603 static unsigned int sdebug_lbprz = DEF_LBPRZ;
604 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
605 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
606 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
607 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
608 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
609 static int sdebug_uuid_ctl = DEF_UUID_CTL;
610 static bool sdebug_removable = DEF_REMOVABLE;
611 static bool sdebug_clustering;
612 static bool sdebug_host_lock = DEF_HOST_LOCK;
613 static bool sdebug_strict = DEF_STRICT;
614 static bool sdebug_any_injecting_opt;
615 static bool sdebug_verbose;
616 static bool have_dif_prot;
617 static bool sdebug_statistics = DEF_STATISTICS;
618 static bool sdebug_mq_active;
619
620 static unsigned int sdebug_store_sectors;
621 static sector_t sdebug_capacity;        /* in sectors */
622
623 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
624    may still need them */
625 static int sdebug_heads;                /* heads per disk */
626 static int sdebug_cylinders_per;        /* cylinders per surface */
627 static int sdebug_sectors_per;          /* sectors per cylinder */
628
629 static LIST_HEAD(sdebug_host_list);
630 static DEFINE_SPINLOCK(sdebug_host_list_lock);
631
632 static unsigned char *fake_storep;      /* ramdisk storage */
633 static struct t10_pi_tuple *dif_storep; /* protection info */
634 static void *map_storep;                /* provisioning map */
635
636 static unsigned long map_size;
637 static int num_aborts;
638 static int num_dev_resets;
639 static int num_target_resets;
640 static int num_bus_resets;
641 static int num_host_resets;
642 static int dix_writes;
643 static int dix_reads;
644 static int dif_errors;
645
646 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
647 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
648
649 static DEFINE_RWLOCK(atomic_rw);
650
651 static char sdebug_proc_name[] = MY_NAME;
652 static const char *my_name = MY_NAME;
653
654 static struct bus_type pseudo_lld_bus;
655
656 static struct device_driver sdebug_driverfs_driver = {
657         .name           = sdebug_proc_name,
658         .bus            = &pseudo_lld_bus,
659 };
660
661 static const int check_condition_result =
662                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
663
664 static const int illegal_condition_result =
665         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
666
667 static const int device_qfull_result =
668         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
669
670
671 /* Only do the extra work involved in logical block provisioning if one or
672  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
673  * real reads and writes (i.e. not skipping them for speed).
674  */
675 static inline bool scsi_debug_lbp(void)
676 {
677         return 0 == sdebug_fake_rw &&
678                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
679 }
680
681 static void *fake_store(unsigned long long lba)
682 {
683         lba = do_div(lba, sdebug_store_sectors);
684
685         return fake_storep + lba * sdebug_sector_size;
686 }
687
688 static struct t10_pi_tuple *dif_store(sector_t sector)
689 {
690         sector = sector_div(sector, sdebug_store_sectors);
691
692         return dif_storep + sector;
693 }
694
695 static void sdebug_max_tgts_luns(void)
696 {
697         struct sdebug_host_info *sdbg_host;
698         struct Scsi_Host *hpnt;
699
700         spin_lock(&sdebug_host_list_lock);
701         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
702                 hpnt = sdbg_host->shost;
703                 if ((hpnt->this_id >= 0) &&
704                     (sdebug_num_tgts > hpnt->this_id))
705                         hpnt->max_id = sdebug_num_tgts + 1;
706                 else
707                         hpnt->max_id = sdebug_num_tgts;
708                 /* sdebug_max_luns; */
709                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
710         }
711         spin_unlock(&sdebug_host_list_lock);
712 }
713
714 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
715
716 /* Set in_bit to -1 to indicate no bit position of invalid field */
717 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
718                                  enum sdeb_cmd_data c_d,
719                                  int in_byte, int in_bit)
720 {
721         unsigned char *sbuff;
722         u8 sks[4];
723         int sl, asc;
724
725         sbuff = scp->sense_buffer;
726         if (!sbuff) {
727                 sdev_printk(KERN_ERR, scp->device,
728                             "%s: sense_buffer is NULL\n", __func__);
729                 return;
730         }
731         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
732         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
733         scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
734         memset(sks, 0, sizeof(sks));
735         sks[0] = 0x80;
736         if (c_d)
737                 sks[0] |= 0x40;
738         if (in_bit >= 0) {
739                 sks[0] |= 0x8;
740                 sks[0] |= 0x7 & in_bit;
741         }
742         put_unaligned_be16(in_byte, sks + 1);
743         if (sdebug_dsense) {
744                 sl = sbuff[7] + 8;
745                 sbuff[7] = sl;
746                 sbuff[sl] = 0x2;
747                 sbuff[sl + 1] = 0x6;
748                 memcpy(sbuff + sl + 4, sks, 3);
749         } else
750                 memcpy(sbuff + 15, sks, 3);
751         if (sdebug_verbose)
752                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
753                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
754                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
755 }
756
757 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
758 {
759         unsigned char *sbuff;
760
761         sbuff = scp->sense_buffer;
762         if (!sbuff) {
763                 sdev_printk(KERN_ERR, scp->device,
764                             "%s: sense_buffer is NULL\n", __func__);
765                 return;
766         }
767         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
768
769         scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
770
771         if (sdebug_verbose)
772                 sdev_printk(KERN_INFO, scp->device,
773                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
774                             my_name, key, asc, asq);
775 }
776
777 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
778 {
779         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
780 }
781
782 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
783 {
784         if (sdebug_verbose) {
785                 if (0x1261 == cmd)
786                         sdev_printk(KERN_INFO, dev,
787                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
788                 else if (0x5331 == cmd)
789                         sdev_printk(KERN_INFO, dev,
790                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
791                                     __func__);
792                 else
793                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
794                                     __func__, cmd);
795         }
796         return -EINVAL;
797         /* return -ENOTTY; // correct return but upsets fdisk */
798 }
799
800 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
801 {
802         struct sdebug_host_info *sdhp;
803         struct sdebug_dev_info *dp;
804
805         spin_lock(&sdebug_host_list_lock);
806         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
807                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
808                         if ((devip->sdbg_host == dp->sdbg_host) &&
809                             (devip->target == dp->target))
810                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
811                 }
812         }
813         spin_unlock(&sdebug_host_list_lock);
814 }
815
816 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
817 {
818         int k;
819
820         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
821         if (k != SDEBUG_NUM_UAS) {
822                 const char *cp = NULL;
823
824                 switch (k) {
825                 case SDEBUG_UA_POR:
826                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
827                                         POWER_ON_RESET_ASCQ);
828                         if (sdebug_verbose)
829                                 cp = "power on reset";
830                         break;
831                 case SDEBUG_UA_BUS_RESET:
832                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
833                                         BUS_RESET_ASCQ);
834                         if (sdebug_verbose)
835                                 cp = "bus reset";
836                         break;
837                 case SDEBUG_UA_MODE_CHANGED:
838                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
839                                         MODE_CHANGED_ASCQ);
840                         if (sdebug_verbose)
841                                 cp = "mode parameters changed";
842                         break;
843                 case SDEBUG_UA_CAPACITY_CHANGED:
844                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
845                                         CAPACITY_CHANGED_ASCQ);
846                         if (sdebug_verbose)
847                                 cp = "capacity data changed";
848                         break;
849                 case SDEBUG_UA_MICROCODE_CHANGED:
850                         mk_sense_buffer(scp, UNIT_ATTENTION,
851                                         TARGET_CHANGED_ASC,
852                                         MICROCODE_CHANGED_ASCQ);
853                         if (sdebug_verbose)
854                                 cp = "microcode has been changed";
855                         break;
856                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
857                         mk_sense_buffer(scp, UNIT_ATTENTION,
858                                         TARGET_CHANGED_ASC,
859                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
860                         if (sdebug_verbose)
861                                 cp = "microcode has been changed without reset";
862                         break;
863                 case SDEBUG_UA_LUNS_CHANGED:
864                         /*
865                          * SPC-3 behavior is to report a UNIT ATTENTION with
866                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
867                          * on the target, until a REPORT LUNS command is
868                          * received.  SPC-4 behavior is to report it only once.
869                          * NOTE:  sdebug_scsi_level does not use the same
870                          * values as struct scsi_device->scsi_level.
871                          */
872                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
873                                 clear_luns_changed_on_target(devip);
874                         mk_sense_buffer(scp, UNIT_ATTENTION,
875                                         TARGET_CHANGED_ASC,
876                                         LUNS_CHANGED_ASCQ);
877                         if (sdebug_verbose)
878                                 cp = "reported luns data has changed";
879                         break;
880                 default:
881                         pr_warn("unexpected unit attention code=%d\n", k);
882                         if (sdebug_verbose)
883                                 cp = "unknown";
884                         break;
885                 }
886                 clear_bit(k, devip->uas_bm);
887                 if (sdebug_verbose)
888                         sdev_printk(KERN_INFO, scp->device,
889                                    "%s reports: Unit attention: %s\n",
890                                    my_name, cp);
891                 return check_condition_result;
892         }
893         return 0;
894 }
895
896 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
897 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
898                                 int arr_len)
899 {
900         int act_len;
901         struct scsi_data_buffer *sdb = scsi_in(scp);
902
903         if (!sdb->length)
904                 return 0;
905         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
906                 return DID_ERROR << 16;
907
908         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
909                                       arr, arr_len);
910         sdb->resid = scsi_bufflen(scp) - act_len;
911
912         return 0;
913 }
914
915 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
916  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
917  * calls, not required to write in ascending offset order. Assumes resid
918  * set to scsi_bufflen() prior to any calls.
919  */
920 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
921                                   int arr_len, unsigned int off_dst)
922 {
923         int act_len, n;
924         struct scsi_data_buffer *sdb = scsi_in(scp);
925         off_t skip = off_dst;
926
927         if (sdb->length <= off_dst)
928                 return 0;
929         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
930                 return DID_ERROR << 16;
931
932         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
933                                        arr, arr_len, skip);
934         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
935                  __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
936         n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
937         sdb->resid = min(sdb->resid, n);
938         return 0;
939 }
940
941 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
942  * 'arr' or -1 if error.
943  */
944 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
945                                int arr_len)
946 {
947         if (!scsi_bufflen(scp))
948                 return 0;
949         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
950                 return -1;
951
952         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
953 }
954
955
956 static const char * inq_vendor_id = "Linux   ";
957 static const char * inq_product_id = "scsi_debug      ";
958 static const char *inq_product_rev = "0186";    /* version less '.' */
959 /* Use some locally assigned NAAs for SAS addresses. */
960 static const u64 naa3_comp_a = 0x3222222000000000ULL;
961 static const u64 naa3_comp_b = 0x3333333000000000ULL;
962 static const u64 naa3_comp_c = 0x3111111000000000ULL;
963
964 /* Device identification VPD page. Returns number of bytes placed in arr */
965 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
966                           int target_dev_id, int dev_id_num,
967                           const char *dev_id_str, int dev_id_str_len,
968                           const uuid_t *lu_name)
969 {
970         int num, port_a;
971         char b[32];
972
973         port_a = target_dev_id + 1;
974         /* T10 vendor identifier field format (faked) */
975         arr[0] = 0x2;   /* ASCII */
976         arr[1] = 0x1;
977         arr[2] = 0x0;
978         memcpy(&arr[4], inq_vendor_id, 8);
979         memcpy(&arr[12], inq_product_id, 16);
980         memcpy(&arr[28], dev_id_str, dev_id_str_len);
981         num = 8 + 16 + dev_id_str_len;
982         arr[3] = num;
983         num += 4;
984         if (dev_id_num >= 0) {
985                 if (sdebug_uuid_ctl) {
986                         /* Locally assigned UUID */
987                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
988                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
989                         arr[num++] = 0x0;
990                         arr[num++] = 0x12;
991                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
992                         arr[num++] = 0x0;
993                         memcpy(arr + num, lu_name, 16);
994                         num += 16;
995                 } else {
996                         /* NAA-3, Logical unit identifier (binary) */
997                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
998                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
999                         arr[num++] = 0x0;
1000                         arr[num++] = 0x8;
1001                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1002                         num += 8;
1003                 }
1004                 /* Target relative port number */
1005                 arr[num++] = 0x61;      /* proto=sas, binary */
1006                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1007                 arr[num++] = 0x0;       /* reserved */
1008                 arr[num++] = 0x4;       /* length */
1009                 arr[num++] = 0x0;       /* reserved */
1010                 arr[num++] = 0x0;       /* reserved */
1011                 arr[num++] = 0x0;
1012                 arr[num++] = 0x1;       /* relative port A */
1013         }
1014         /* NAA-3, Target port identifier */
1015         arr[num++] = 0x61;      /* proto=sas, binary */
1016         arr[num++] = 0x93;      /* piv=1, target port, naa */
1017         arr[num++] = 0x0;
1018         arr[num++] = 0x8;
1019         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1020         num += 8;
1021         /* NAA-3, Target port group identifier */
1022         arr[num++] = 0x61;      /* proto=sas, binary */
1023         arr[num++] = 0x95;      /* piv=1, target port group id */
1024         arr[num++] = 0x0;
1025         arr[num++] = 0x4;
1026         arr[num++] = 0;
1027         arr[num++] = 0;
1028         put_unaligned_be16(port_group_id, arr + num);
1029         num += 2;
1030         /* NAA-3, Target device identifier */
1031         arr[num++] = 0x61;      /* proto=sas, binary */
1032         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1033         arr[num++] = 0x0;
1034         arr[num++] = 0x8;
1035         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1036         num += 8;
1037         /* SCSI name string: Target device identifier */
1038         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1039         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1040         arr[num++] = 0x0;
1041         arr[num++] = 24;
1042         memcpy(arr + num, "naa.32222220", 12);
1043         num += 12;
1044         snprintf(b, sizeof(b), "%08X", target_dev_id);
1045         memcpy(arr + num, b, 8);
1046         num += 8;
1047         memset(arr + num, 0, 4);
1048         num += 4;
1049         return num;
1050 }
1051
1052 static unsigned char vpd84_data[] = {
1053 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1054     0x22,0x22,0x22,0x0,0xbb,0x1,
1055     0x22,0x22,0x22,0x0,0xbb,0x2,
1056 };
1057
1058 /*  Software interface identification VPD page */
1059 static int inquiry_vpd_84(unsigned char *arr)
1060 {
1061         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1062         return sizeof(vpd84_data);
1063 }
1064
1065 /* Management network addresses VPD page */
1066 static int inquiry_vpd_85(unsigned char *arr)
1067 {
1068         int num = 0;
1069         const char * na1 = "https://www.kernel.org/config";
1070         const char * na2 = "http://www.kernel.org/log";
1071         int plen, olen;
1072
1073         arr[num++] = 0x1;       /* lu, storage config */
1074         arr[num++] = 0x0;       /* reserved */
1075         arr[num++] = 0x0;
1076         olen = strlen(na1);
1077         plen = olen + 1;
1078         if (plen % 4)
1079                 plen = ((plen / 4) + 1) * 4;
1080         arr[num++] = plen;      /* length, null termianted, padded */
1081         memcpy(arr + num, na1, olen);
1082         memset(arr + num + olen, 0, plen - olen);
1083         num += plen;
1084
1085         arr[num++] = 0x4;       /* lu, logging */
1086         arr[num++] = 0x0;       /* reserved */
1087         arr[num++] = 0x0;
1088         olen = strlen(na2);
1089         plen = olen + 1;
1090         if (plen % 4)
1091                 plen = ((plen / 4) + 1) * 4;
1092         arr[num++] = plen;      /* length, null terminated, padded */
1093         memcpy(arr + num, na2, olen);
1094         memset(arr + num + olen, 0, plen - olen);
1095         num += plen;
1096
1097         return num;
1098 }
1099
1100 /* SCSI ports VPD page */
1101 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1102 {
1103         int num = 0;
1104         int port_a, port_b;
1105
1106         port_a = target_dev_id + 1;
1107         port_b = port_a + 1;
1108         arr[num++] = 0x0;       /* reserved */
1109         arr[num++] = 0x0;       /* reserved */
1110         arr[num++] = 0x0;
1111         arr[num++] = 0x1;       /* relative port 1 (primary) */
1112         memset(arr + num, 0, 6);
1113         num += 6;
1114         arr[num++] = 0x0;
1115         arr[num++] = 12;        /* length tp descriptor */
1116         /* naa-5 target port identifier (A) */
1117         arr[num++] = 0x61;      /* proto=sas, binary */
1118         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1119         arr[num++] = 0x0;       /* reserved */
1120         arr[num++] = 0x8;       /* length */
1121         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1122         num += 8;
1123         arr[num++] = 0x0;       /* reserved */
1124         arr[num++] = 0x0;       /* reserved */
1125         arr[num++] = 0x0;
1126         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1127         memset(arr + num, 0, 6);
1128         num += 6;
1129         arr[num++] = 0x0;
1130         arr[num++] = 12;        /* length tp descriptor */
1131         /* naa-5 target port identifier (B) */
1132         arr[num++] = 0x61;      /* proto=sas, binary */
1133         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1134         arr[num++] = 0x0;       /* reserved */
1135         arr[num++] = 0x8;       /* length */
1136         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1137         num += 8;
1138
1139         return num;
1140 }
1141
1142
1143 static unsigned char vpd89_data[] = {
1144 /* from 4th byte */ 0,0,0,0,
1145 'l','i','n','u','x',' ',' ',' ',
1146 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1147 '1','2','3','4',
1148 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1149 0xec,0,0,0,
1150 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1151 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1152 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1153 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1154 0x53,0x41,
1155 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1156 0x20,0x20,
1157 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1158 0x10,0x80,
1159 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1160 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1161 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1162 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1163 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1164 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1165 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1166 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1167 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1168 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1169 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1170 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1171 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1172 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1173 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1174 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1175 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1176 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1177 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1178 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1179 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1180 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1181 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1182 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1183 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1184 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1185 };
1186
1187 /* ATA Information VPD page */
1188 static int inquiry_vpd_89(unsigned char *arr)
1189 {
1190         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1191         return sizeof(vpd89_data);
1192 }
1193
1194
1195 static unsigned char vpdb0_data[] = {
1196         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1197         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1198         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1199         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1200 };
1201
1202 /* Block limits VPD page (SBC-3) */
1203 static int inquiry_vpd_b0(unsigned char *arr)
1204 {
1205         unsigned int gran;
1206
1207         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1208
1209         /* Optimal transfer length granularity */
1210         if (sdebug_opt_xferlen_exp != 0 &&
1211             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1212                 gran = 1 << sdebug_opt_xferlen_exp;
1213         else
1214                 gran = 1 << sdebug_physblk_exp;
1215         put_unaligned_be16(gran, arr + 2);
1216
1217         /* Maximum Transfer Length */
1218         if (sdebug_store_sectors > 0x400)
1219                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1220
1221         /* Optimal Transfer Length */
1222         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1223
1224         if (sdebug_lbpu) {
1225                 /* Maximum Unmap LBA Count */
1226                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1227
1228                 /* Maximum Unmap Block Descriptor Count */
1229                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1230         }
1231
1232         /* Unmap Granularity Alignment */
1233         if (sdebug_unmap_alignment) {
1234                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1235                 arr[28] |= 0x80; /* UGAVALID */
1236         }
1237
1238         /* Optimal Unmap Granularity */
1239         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1240
1241         /* Maximum WRITE SAME Length */
1242         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1243
1244         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1245
1246         return sizeof(vpdb0_data);
1247 }
1248
1249 /* Block device characteristics VPD page (SBC-3) */
1250 static int inquiry_vpd_b1(unsigned char *arr)
1251 {
1252         memset(arr, 0, 0x3c);
1253         arr[0] = 0;
1254         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1255         arr[2] = 0;
1256         arr[3] = 5;     /* less than 1.8" */
1257
1258         return 0x3c;
1259 }
1260
1261 /* Logical block provisioning VPD page (SBC-4) */
1262 static int inquiry_vpd_b2(unsigned char *arr)
1263 {
1264         memset(arr, 0, 0x4);
1265         arr[0] = 0;                     /* threshold exponent */
1266         if (sdebug_lbpu)
1267                 arr[1] = 1 << 7;
1268         if (sdebug_lbpws)
1269                 arr[1] |= 1 << 6;
1270         if (sdebug_lbpws10)
1271                 arr[1] |= 1 << 5;
1272         if (sdebug_lbprz && scsi_debug_lbp())
1273                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1274         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1275         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1276         /* threshold_percentage=0 */
1277         return 0x4;
1278 }
1279
1280 #define SDEBUG_LONG_INQ_SZ 96
1281 #define SDEBUG_MAX_INQ_ARR_SZ 584
1282
1283 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1284 {
1285         unsigned char pq_pdt;
1286         unsigned char * arr;
1287         unsigned char *cmd = scp->cmnd;
1288         int alloc_len, n, ret;
1289         bool have_wlun, is_disk;
1290
1291         alloc_len = get_unaligned_be16(cmd + 3);
1292         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1293         if (! arr)
1294                 return DID_REQUEUE << 16;
1295         is_disk = (sdebug_ptype == TYPE_DISK);
1296         have_wlun = scsi_is_wlun(scp->device->lun);
1297         if (have_wlun)
1298                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1299         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1300                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1301         else
1302                 pq_pdt = (sdebug_ptype & 0x1f);
1303         arr[0] = pq_pdt;
1304         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1305                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1306                 kfree(arr);
1307                 return check_condition_result;
1308         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1309                 int lu_id_num, port_group_id, target_dev_id, len;
1310                 char lu_id_str[6];
1311                 int host_no = devip->sdbg_host->shost->host_no;
1312                 
1313                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1314                     (devip->channel & 0x7f);
1315                 if (sdebug_vpd_use_hostno == 0)
1316                         host_no = 0;
1317                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1318                             (devip->target * 1000) + devip->lun);
1319                 target_dev_id = ((host_no + 1) * 2000) +
1320                                  (devip->target * 1000) - 3;
1321                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1322                 if (0 == cmd[2]) { /* supported vital product data pages */
1323                         arr[1] = cmd[2];        /*sanity */
1324                         n = 4;
1325                         arr[n++] = 0x0;   /* this page */
1326                         arr[n++] = 0x80;  /* unit serial number */
1327                         arr[n++] = 0x83;  /* device identification */
1328                         arr[n++] = 0x84;  /* software interface ident. */
1329                         arr[n++] = 0x85;  /* management network addresses */
1330                         arr[n++] = 0x86;  /* extended inquiry */
1331                         arr[n++] = 0x87;  /* mode page policy */
1332                         arr[n++] = 0x88;  /* SCSI ports */
1333                         if (is_disk) {    /* SBC only */
1334                                 arr[n++] = 0x89;  /* ATA information */
1335                                 arr[n++] = 0xb0;  /* Block limits */
1336                                 arr[n++] = 0xb1;  /* Block characteristics */
1337                                 arr[n++] = 0xb2;  /* Logical Block Prov */
1338                         }
1339                         arr[3] = n - 4;   /* number of supported VPD pages */
1340                 } else if (0x80 == cmd[2]) { /* unit serial number */
1341                         arr[1] = cmd[2];        /*sanity */
1342                         arr[3] = len;
1343                         memcpy(&arr[4], lu_id_str, len);
1344                 } else if (0x83 == cmd[2]) { /* device identification */
1345                         arr[1] = cmd[2];        /*sanity */
1346                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1347                                                 target_dev_id, lu_id_num,
1348                                                 lu_id_str, len,
1349                                                 &devip->lu_name);
1350                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1351                         arr[1] = cmd[2];        /*sanity */
1352                         arr[3] = inquiry_vpd_84(&arr[4]);
1353                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1354                         arr[1] = cmd[2];        /*sanity */
1355                         arr[3] = inquiry_vpd_85(&arr[4]);
1356                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1357                         arr[1] = cmd[2];        /*sanity */
1358                         arr[3] = 0x3c;  /* number of following entries */
1359                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1360                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1361                         else if (have_dif_prot)
1362                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1363                         else
1364                                 arr[4] = 0x0;   /* no protection stuff */
1365                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1366                 } else if (0x87 == cmd[2]) { /* mode page policy */
1367                         arr[1] = cmd[2];        /*sanity */
1368                         arr[3] = 0x8;   /* number of following entries */
1369                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1370                         arr[6] = 0x80;  /* mlus, shared */
1371                         arr[8] = 0x18;   /* protocol specific lu */
1372                         arr[10] = 0x82;  /* mlus, per initiator port */
1373                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1374                         arr[1] = cmd[2];        /*sanity */
1375                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1376                 } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1377                         arr[1] = cmd[2];        /*sanity */
1378                         n = inquiry_vpd_89(&arr[4]);
1379                         put_unaligned_be16(n, arr + 2);
1380                 } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1381                         arr[1] = cmd[2];        /*sanity */
1382                         arr[3] = inquiry_vpd_b0(&arr[4]);
1383                 } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1384                         arr[1] = cmd[2];        /*sanity */
1385                         arr[3] = inquiry_vpd_b1(&arr[4]);
1386                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1387                         arr[1] = cmd[2];        /*sanity */
1388                         arr[3] = inquiry_vpd_b2(&arr[4]);
1389                 } else {
1390                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1391                         kfree(arr);
1392                         return check_condition_result;
1393                 }
1394                 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1395                 ret = fill_from_dev_buffer(scp, arr,
1396                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1397                 kfree(arr);
1398                 return ret;
1399         }
1400         /* drops through here for a standard inquiry */
1401         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
1402         arr[2] = sdebug_scsi_level;
1403         arr[3] = 2;    /* response_data_format==2 */
1404         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1405         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
1406         if (sdebug_vpd_use_hostno == 0)
1407                 arr[5] |= 0x10; /* claim: implicit TPGS */
1408         arr[6] = 0x10; /* claim: MultiP */
1409         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1410         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1411         memcpy(&arr[8], inq_vendor_id, 8);
1412         memcpy(&arr[16], inq_product_id, 16);
1413         memcpy(&arr[32], inq_product_rev, 4);
1414         /* version descriptors (2 bytes each) follow */
1415         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
1416         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
1417         n = 62;
1418         if (is_disk) {          /* SBC-4 no version claimed */
1419                 put_unaligned_be16(0x600, arr + n);
1420                 n += 2;
1421         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1422                 put_unaligned_be16(0x525, arr + n);
1423                 n += 2;
1424         }
1425         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
1426         ret = fill_from_dev_buffer(scp, arr,
1427                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1428         kfree(arr);
1429         return ret;
1430 }
1431
1432 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1433                                    0, 0, 0x0, 0x0};
1434
1435 static int resp_requests(struct scsi_cmnd * scp,
1436                          struct sdebug_dev_info * devip)
1437 {
1438         unsigned char * sbuff;
1439         unsigned char *cmd = scp->cmnd;
1440         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1441         bool dsense;
1442         int len = 18;
1443
1444         memset(arr, 0, sizeof(arr));
1445         dsense = !!(cmd[1] & 1);
1446         sbuff = scp->sense_buffer;
1447         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1448                 if (dsense) {
1449                         arr[0] = 0x72;
1450                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1451                         arr[2] = THRESHOLD_EXCEEDED;
1452                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1453                         len = 8;
1454                 } else {
1455                         arr[0] = 0x70;
1456                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1457                         arr[7] = 0xa;           /* 18 byte sense buffer */
1458                         arr[12] = THRESHOLD_EXCEEDED;
1459                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1460                 }
1461         } else {
1462                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1463                 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1464                         ;       /* have sense and formats match */
1465                 else if (arr[0] <= 0x70) {
1466                         if (dsense) {
1467                                 memset(arr, 0, 8);
1468                                 arr[0] = 0x72;
1469                                 len = 8;
1470                         } else {
1471                                 memset(arr, 0, 18);
1472                                 arr[0] = 0x70;
1473                                 arr[7] = 0xa;
1474                         }
1475                 } else if (dsense) {
1476                         memset(arr, 0, 8);
1477                         arr[0] = 0x72;
1478                         arr[1] = sbuff[2];     /* sense key */
1479                         arr[2] = sbuff[12];    /* asc */
1480                         arr[3] = sbuff[13];    /* ascq */
1481                         len = 8;
1482                 } else {
1483                         memset(arr, 0, 18);
1484                         arr[0] = 0x70;
1485                         arr[2] = sbuff[1];
1486                         arr[7] = 0xa;
1487                         arr[12] = sbuff[1];
1488                         arr[13] = sbuff[3];
1489                 }
1490
1491         }
1492         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1493         return fill_from_dev_buffer(scp, arr, len);
1494 }
1495
1496 static int resp_start_stop(struct scsi_cmnd * scp,
1497                            struct sdebug_dev_info * devip)
1498 {
1499         unsigned char *cmd = scp->cmnd;
1500         int power_cond, stop;
1501
1502         power_cond = (cmd[4] & 0xf0) >> 4;
1503         if (power_cond) {
1504                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1505                 return check_condition_result;
1506         }
1507         stop = !(cmd[4] & 1);
1508         atomic_xchg(&devip->stopped, stop);
1509         return 0;
1510 }
1511
1512 static sector_t get_sdebug_capacity(void)
1513 {
1514         static const unsigned int gibibyte = 1073741824;
1515
1516         if (sdebug_virtual_gb > 0)
1517                 return (sector_t)sdebug_virtual_gb *
1518                         (gibibyte / sdebug_sector_size);
1519         else
1520                 return sdebug_store_sectors;
1521 }
1522
1523 #define SDEBUG_READCAP_ARR_SZ 8
1524 static int resp_readcap(struct scsi_cmnd * scp,
1525                         struct sdebug_dev_info * devip)
1526 {
1527         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1528         unsigned int capac;
1529
1530         /* following just in case virtual_gb changed */
1531         sdebug_capacity = get_sdebug_capacity();
1532         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1533         if (sdebug_capacity < 0xffffffff) {
1534                 capac = (unsigned int)sdebug_capacity - 1;
1535                 put_unaligned_be32(capac, arr + 0);
1536         } else
1537                 put_unaligned_be32(0xffffffff, arr + 0);
1538         put_unaligned_be16(sdebug_sector_size, arr + 6);
1539         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1540 }
1541
1542 #define SDEBUG_READCAP16_ARR_SZ 32
1543 static int resp_readcap16(struct scsi_cmnd * scp,
1544                           struct sdebug_dev_info * devip)
1545 {
1546         unsigned char *cmd = scp->cmnd;
1547         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1548         int alloc_len;
1549
1550         alloc_len = get_unaligned_be32(cmd + 10);
1551         /* following just in case virtual_gb changed */
1552         sdebug_capacity = get_sdebug_capacity();
1553         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1554         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1555         put_unaligned_be32(sdebug_sector_size, arr + 8);
1556         arr[13] = sdebug_physblk_exp & 0xf;
1557         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1558
1559         if (scsi_debug_lbp()) {
1560                 arr[14] |= 0x80; /* LBPME */
1561                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1562                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1563                  * in the wider field maps to 0 in this field.
1564                  */
1565                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
1566                         arr[14] |= 0x40;
1567         }
1568
1569         arr[15] = sdebug_lowest_aligned & 0xff;
1570
1571         if (have_dif_prot) {
1572                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1573                 arr[12] |= 1; /* PROT_EN */
1574         }
1575
1576         return fill_from_dev_buffer(scp, arr,
1577                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1578 }
1579
1580 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1581
1582 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1583                               struct sdebug_dev_info * devip)
1584 {
1585         unsigned char *cmd = scp->cmnd;
1586         unsigned char * arr;
1587         int host_no = devip->sdbg_host->shost->host_no;
1588         int n, ret, alen, rlen;
1589         int port_group_a, port_group_b, port_a, port_b;
1590
1591         alen = get_unaligned_be32(cmd + 6);
1592         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1593         if (! arr)
1594                 return DID_REQUEUE << 16;
1595         /*
1596          * EVPD page 0x88 states we have two ports, one
1597          * real and a fake port with no device connected.
1598          * So we create two port groups with one port each
1599          * and set the group with port B to unavailable.
1600          */
1601         port_a = 0x1; /* relative port A */
1602         port_b = 0x2; /* relative port B */
1603         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1604                         (devip->channel & 0x7f);
1605         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1606                         (devip->channel & 0x7f) + 0x80;
1607
1608         /*
1609          * The asymmetric access state is cycled according to the host_id.
1610          */
1611         n = 4;
1612         if (sdebug_vpd_use_hostno == 0) {
1613                 arr[n++] = host_no % 3; /* Asymm access state */
1614                 arr[n++] = 0x0F; /* claim: all states are supported */
1615         } else {
1616                 arr[n++] = 0x0; /* Active/Optimized path */
1617                 arr[n++] = 0x01; /* only support active/optimized paths */
1618         }
1619         put_unaligned_be16(port_group_a, arr + n);
1620         n += 2;
1621         arr[n++] = 0;    /* Reserved */
1622         arr[n++] = 0;    /* Status code */
1623         arr[n++] = 0;    /* Vendor unique */
1624         arr[n++] = 0x1;  /* One port per group */
1625         arr[n++] = 0;    /* Reserved */
1626         arr[n++] = 0;    /* Reserved */
1627         put_unaligned_be16(port_a, arr + n);
1628         n += 2;
1629         arr[n++] = 3;    /* Port unavailable */
1630         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1631         put_unaligned_be16(port_group_b, arr + n);
1632         n += 2;
1633         arr[n++] = 0;    /* Reserved */
1634         arr[n++] = 0;    /* Status code */
1635         arr[n++] = 0;    /* Vendor unique */
1636         arr[n++] = 0x1;  /* One port per group */
1637         arr[n++] = 0;    /* Reserved */
1638         arr[n++] = 0;    /* Reserved */
1639         put_unaligned_be16(port_b, arr + n);
1640         n += 2;
1641
1642         rlen = n - 4;
1643         put_unaligned_be32(rlen, arr + 0);
1644
1645         /*
1646          * Return the smallest value of either
1647          * - The allocated length
1648          * - The constructed command length
1649          * - The maximum array size
1650          */
1651         rlen = min(alen,n);
1652         ret = fill_from_dev_buffer(scp, arr,
1653                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1654         kfree(arr);
1655         return ret;
1656 }
1657
1658 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1659                              struct sdebug_dev_info *devip)
1660 {
1661         bool rctd;
1662         u8 reporting_opts, req_opcode, sdeb_i, supp;
1663         u16 req_sa, u;
1664         u32 alloc_len, a_len;
1665         int k, offset, len, errsts, count, bump, na;
1666         const struct opcode_info_t *oip;
1667         const struct opcode_info_t *r_oip;
1668         u8 *arr;
1669         u8 *cmd = scp->cmnd;
1670
1671         rctd = !!(cmd[2] & 0x80);
1672         reporting_opts = cmd[2] & 0x7;
1673         req_opcode = cmd[3];
1674         req_sa = get_unaligned_be16(cmd + 4);
1675         alloc_len = get_unaligned_be32(cmd + 6);
1676         if (alloc_len < 4 || alloc_len > 0xffff) {
1677                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1678                 return check_condition_result;
1679         }
1680         if (alloc_len > 8192)
1681                 a_len = 8192;
1682         else
1683                 a_len = alloc_len;
1684         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1685         if (NULL == arr) {
1686                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1687                                 INSUFF_RES_ASCQ);
1688                 return check_condition_result;
1689         }
1690         switch (reporting_opts) {
1691         case 0: /* all commands */
1692                 /* count number of commands */
1693                 for (count = 0, oip = opcode_info_arr;
1694                      oip->num_attached != 0xff; ++oip) {
1695                         if (F_INV_OP & oip->flags)
1696                                 continue;
1697                         count += (oip->num_attached + 1);
1698                 }
1699                 bump = rctd ? 20 : 8;
1700                 put_unaligned_be32(count * bump, arr);
1701                 for (offset = 4, oip = opcode_info_arr;
1702                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1703                         if (F_INV_OP & oip->flags)
1704                                 continue;
1705                         na = oip->num_attached;
1706                         arr[offset] = oip->opcode;
1707                         put_unaligned_be16(oip->sa, arr + offset + 2);
1708                         if (rctd)
1709                                 arr[offset + 5] |= 0x2;
1710                         if (FF_SA & oip->flags)
1711                                 arr[offset + 5] |= 0x1;
1712                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1713                         if (rctd)
1714                                 put_unaligned_be16(0xa, arr + offset + 8);
1715                         r_oip = oip;
1716                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1717                                 if (F_INV_OP & oip->flags)
1718                                         continue;
1719                                 offset += bump;
1720                                 arr[offset] = oip->opcode;
1721                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1722                                 if (rctd)
1723                                         arr[offset + 5] |= 0x2;
1724                                 if (FF_SA & oip->flags)
1725                                         arr[offset + 5] |= 0x1;
1726                                 put_unaligned_be16(oip->len_mask[0],
1727                                                    arr + offset + 6);
1728                                 if (rctd)
1729                                         put_unaligned_be16(0xa,
1730                                                            arr + offset + 8);
1731                         }
1732                         oip = r_oip;
1733                         offset += bump;
1734                 }
1735                 break;
1736         case 1: /* one command: opcode only */
1737         case 2: /* one command: opcode plus service action */
1738         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1739                 sdeb_i = opcode_ind_arr[req_opcode];
1740                 oip = &opcode_info_arr[sdeb_i];
1741                 if (F_INV_OP & oip->flags) {
1742                         supp = 1;
1743                         offset = 4;
1744                 } else {
1745                         if (1 == reporting_opts) {
1746                                 if (FF_SA & oip->flags) {
1747                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1748                                                              2, 2);
1749                                         kfree(arr);
1750                                         return check_condition_result;
1751                                 }
1752                                 req_sa = 0;
1753                         } else if (2 == reporting_opts &&
1754                                    0 == (FF_SA & oip->flags)) {
1755                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1756                                 kfree(arr);     /* point at requested sa */
1757                                 return check_condition_result;
1758                         }
1759                         if (0 == (FF_SA & oip->flags) &&
1760                             req_opcode == oip->opcode)
1761                                 supp = 3;
1762                         else if (0 == (FF_SA & oip->flags)) {
1763                                 na = oip->num_attached;
1764                                 for (k = 0, oip = oip->arrp; k < na;
1765                                      ++k, ++oip) {
1766                                         if (req_opcode == oip->opcode)
1767                                                 break;
1768                                 }
1769                                 supp = (k >= na) ? 1 : 3;
1770                         } else if (req_sa != oip->sa) {
1771                                 na = oip->num_attached;
1772                                 for (k = 0, oip = oip->arrp; k < na;
1773                                      ++k, ++oip) {
1774                                         if (req_sa == oip->sa)
1775                                                 break;
1776                                 }
1777                                 supp = (k >= na) ? 1 : 3;
1778                         } else
1779                                 supp = 3;
1780                         if (3 == supp) {
1781                                 u = oip->len_mask[0];
1782                                 put_unaligned_be16(u, arr + 2);
1783                                 arr[4] = oip->opcode;
1784                                 for (k = 1; k < u; ++k)
1785                                         arr[4 + k] = (k < 16) ?
1786                                                  oip->len_mask[k] : 0xff;
1787                                 offset = 4 + u;
1788                         } else
1789                                 offset = 4;
1790                 }
1791                 arr[1] = (rctd ? 0x80 : 0) | supp;
1792                 if (rctd) {
1793                         put_unaligned_be16(0xa, arr + offset);
1794                         offset += 12;
1795                 }
1796                 break;
1797         default:
1798                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1799                 kfree(arr);
1800                 return check_condition_result;
1801         }
1802         offset = (offset < a_len) ? offset : a_len;
1803         len = (offset < alloc_len) ? offset : alloc_len;
1804         errsts = fill_from_dev_buffer(scp, arr, len);
1805         kfree(arr);
1806         return errsts;
1807 }
1808
1809 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1810                           struct sdebug_dev_info *devip)
1811 {
1812         bool repd;
1813         u32 alloc_len, len;
1814         u8 arr[16];
1815         u8 *cmd = scp->cmnd;
1816
1817         memset(arr, 0, sizeof(arr));
1818         repd = !!(cmd[2] & 0x80);
1819         alloc_len = get_unaligned_be32(cmd + 6);
1820         if (alloc_len < 4) {
1821                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1822                 return check_condition_result;
1823         }
1824         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1825         arr[1] = 0x1;           /* ITNRS */
1826         if (repd) {
1827                 arr[3] = 0xc;
1828                 len = 16;
1829         } else
1830                 len = 4;
1831
1832         len = (len < alloc_len) ? len : alloc_len;
1833         return fill_from_dev_buffer(scp, arr, len);
1834 }
1835
1836 /* <<Following mode page info copied from ST318451LW>> */
1837
1838 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1839 {       /* Read-Write Error Recovery page for mode_sense */
1840         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1841                                         5, 0, 0xff, 0xff};
1842
1843         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1844         if (1 == pcontrol)
1845                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1846         return sizeof(err_recov_pg);
1847 }
1848
1849 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1850 {       /* Disconnect-Reconnect page for mode_sense */
1851         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1852                                          0, 0, 0, 0, 0, 0, 0, 0};
1853
1854         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1855         if (1 == pcontrol)
1856                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1857         return sizeof(disconnect_pg);
1858 }
1859
1860 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1861 {       /* Format device page for mode_sense */
1862         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1863                                      0, 0, 0, 0, 0, 0, 0, 0,
1864                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1865
1866         memcpy(p, format_pg, sizeof(format_pg));
1867         put_unaligned_be16(sdebug_sectors_per, p + 10);
1868         put_unaligned_be16(sdebug_sector_size, p + 12);
1869         if (sdebug_removable)
1870                 p[20] |= 0x20; /* should agree with INQUIRY */
1871         if (1 == pcontrol)
1872                 memset(p + 2, 0, sizeof(format_pg) - 2);
1873         return sizeof(format_pg);
1874 }
1875
1876 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1877                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1878                                      0, 0, 0, 0};
1879
1880 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1881 {       /* Caching page for mode_sense */
1882         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1883                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1884         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1885                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1886
1887         if (SDEBUG_OPT_N_WCE & sdebug_opts)
1888                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1889         memcpy(p, caching_pg, sizeof(caching_pg));
1890         if (1 == pcontrol)
1891                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1892         else if (2 == pcontrol)
1893                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1894         return sizeof(caching_pg);
1895 }
1896
1897 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1898                                     0, 0, 0x2, 0x4b};
1899
1900 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1901 {       /* Control mode page for mode_sense */
1902         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1903                                         0, 0, 0, 0};
1904         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1905                                      0, 0, 0x2, 0x4b};
1906
1907         if (sdebug_dsense)
1908                 ctrl_m_pg[2] |= 0x4;
1909         else
1910                 ctrl_m_pg[2] &= ~0x4;
1911
1912         if (sdebug_ato)
1913                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1914
1915         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1916         if (1 == pcontrol)
1917                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1918         else if (2 == pcontrol)
1919                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1920         return sizeof(ctrl_m_pg);
1921 }
1922
1923
1924 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1925 {       /* Informational Exceptions control mode page for mode_sense */
1926         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1927                                        0, 0, 0x0, 0x0};
1928         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1929                                       0, 0, 0x0, 0x0};
1930
1931         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1932         if (1 == pcontrol)
1933                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1934         else if (2 == pcontrol)
1935                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1936         return sizeof(iec_m_pg);
1937 }
1938
1939 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1940 {       /* SAS SSP mode page - short format for mode_sense */
1941         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1942                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1943
1944         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1945         if (1 == pcontrol)
1946                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1947         return sizeof(sas_sf_m_pg);
1948 }
1949
1950
1951 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1952                               int target_dev_id)
1953 {       /* SAS phy control and discover mode page for mode_sense */
1954         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1955                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1956                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1957                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1958                     0x2, 0, 0, 0, 0, 0, 0, 0,
1959                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1960                     0, 0, 0, 0, 0, 0, 0, 0,
1961                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1962                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1963                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
1964                     0x3, 0, 0, 0, 0, 0, 0, 0,
1965                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1966                     0, 0, 0, 0, 0, 0, 0, 0,
1967                 };
1968         int port_a, port_b;
1969
1970         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
1971         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
1972         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
1973         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
1974         port_a = target_dev_id + 1;
1975         port_b = port_a + 1;
1976         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1977         put_unaligned_be32(port_a, p + 20);
1978         put_unaligned_be32(port_b, p + 48 + 20);
1979         if (1 == pcontrol)
1980                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1981         return sizeof(sas_pcd_m_pg);
1982 }
1983
1984 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1985 {       /* SAS SSP shared protocol specific port mode subpage */
1986         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1987                     0, 0, 0, 0, 0, 0, 0, 0,
1988                 };
1989
1990         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1991         if (1 == pcontrol)
1992                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1993         return sizeof(sas_sha_m_pg);
1994 }
1995
1996 #define SDEBUG_MAX_MSENSE_SZ 256
1997
1998 static int resp_mode_sense(struct scsi_cmnd *scp,
1999                            struct sdebug_dev_info *devip)
2000 {
2001         int pcontrol, pcode, subpcode, bd_len;
2002         unsigned char dev_spec;
2003         int alloc_len, offset, len, target_dev_id;
2004         int target = scp->device->id;
2005         unsigned char * ap;
2006         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2007         unsigned char *cmd = scp->cmnd;
2008         bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2009
2010         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2011         pcontrol = (cmd[2] & 0xc0) >> 6;
2012         pcode = cmd[2] & 0x3f;
2013         subpcode = cmd[3];
2014         msense_6 = (MODE_SENSE == cmd[0]);
2015         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2016         is_disk = (sdebug_ptype == TYPE_DISK);
2017         if (is_disk && !dbd)
2018                 bd_len = llbaa ? 16 : 8;
2019         else
2020                 bd_len = 0;
2021         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2022         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2023         if (0x3 == pcontrol) {  /* Saving values not supported */
2024                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2025                 return check_condition_result;
2026         }
2027         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2028                         (devip->target * 1000) - 3;
2029         /* for disks set DPOFUA bit and clear write protect (WP) bit */
2030         if (is_disk)
2031                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2032         else
2033                 dev_spec = 0x0;
2034         if (msense_6) {
2035                 arr[2] = dev_spec;
2036                 arr[3] = bd_len;
2037                 offset = 4;
2038         } else {
2039                 arr[3] = dev_spec;
2040                 if (16 == bd_len)
2041                         arr[4] = 0x1;   /* set LONGLBA bit */
2042                 arr[7] = bd_len;        /* assume 255 or less */
2043                 offset = 8;
2044         }
2045         ap = arr + offset;
2046         if ((bd_len > 0) && (!sdebug_capacity))
2047                 sdebug_capacity = get_sdebug_capacity();
2048
2049         if (8 == bd_len) {
2050                 if (sdebug_capacity > 0xfffffffe)
2051                         put_unaligned_be32(0xffffffff, ap + 0);
2052                 else
2053                         put_unaligned_be32(sdebug_capacity, ap + 0);
2054                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2055                 offset += bd_len;
2056                 ap = arr + offset;
2057         } else if (16 == bd_len) {
2058                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2059                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2060                 offset += bd_len;
2061                 ap = arr + offset;
2062         }
2063
2064         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2065                 /* TODO: Control Extension page */
2066                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2067                 return check_condition_result;
2068         }
2069         bad_pcode = false;
2070
2071         switch (pcode) {
2072         case 0x1:       /* Read-Write error recovery page, direct access */
2073                 len = resp_err_recov_pg(ap, pcontrol, target);
2074                 offset += len;
2075                 break;
2076         case 0x2:       /* Disconnect-Reconnect page, all devices */
2077                 len = resp_disconnect_pg(ap, pcontrol, target);
2078                 offset += len;
2079                 break;
2080         case 0x3:       /* Format device page, direct access */
2081                 if (is_disk) {
2082                         len = resp_format_pg(ap, pcontrol, target);
2083                         offset += len;
2084                 } else
2085                         bad_pcode = true;
2086                 break;
2087         case 0x8:       /* Caching page, direct access */
2088                 if (is_disk) {
2089                         len = resp_caching_pg(ap, pcontrol, target);
2090                         offset += len;
2091                 } else
2092                         bad_pcode = true;
2093                 break;
2094         case 0xa:       /* Control Mode page, all devices */
2095                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2096                 offset += len;
2097                 break;
2098         case 0x19:      /* if spc==1 then sas phy, control+discover */
2099                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2100                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2101                         return check_condition_result;
2102                 }
2103                 len = 0;
2104                 if ((0x0 == subpcode) || (0xff == subpcode))
2105                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2106                 if ((0x1 == subpcode) || (0xff == subpcode))
2107                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2108                                                   target_dev_id);
2109                 if ((0x2 == subpcode) || (0xff == subpcode))
2110                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2111                 offset += len;
2112                 break;
2113         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2114                 len = resp_iec_m_pg(ap, pcontrol, target);
2115                 offset += len;
2116                 break;
2117         case 0x3f:      /* Read all Mode pages */
2118                 if ((0 == subpcode) || (0xff == subpcode)) {
2119                         len = resp_err_recov_pg(ap, pcontrol, target);
2120                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2121                         if (is_disk) {
2122                                 len += resp_format_pg(ap + len, pcontrol,
2123                                                       target);
2124                                 len += resp_caching_pg(ap + len, pcontrol,
2125                                                        target);
2126                         }
2127                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2128                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2129                         if (0xff == subpcode) {
2130                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2131                                                   target, target_dev_id);
2132                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2133                         }
2134                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2135                         offset += len;
2136                 } else {
2137                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2138                         return check_condition_result;
2139                 }
2140                 break;
2141         default:
2142                 bad_pcode = true;
2143                 break;
2144         }
2145         if (bad_pcode) {
2146                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2147                 return check_condition_result;
2148         }
2149         if (msense_6)
2150                 arr[0] = offset - 1;
2151         else
2152                 put_unaligned_be16((offset - 2), arr + 0);
2153         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2154 }
2155
2156 #define SDEBUG_MAX_MSELECT_SZ 512
2157
2158 static int resp_mode_select(struct scsi_cmnd *scp,
2159                             struct sdebug_dev_info *devip)
2160 {
2161         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2162         int param_len, res, mpage;
2163         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2164         unsigned char *cmd = scp->cmnd;
2165         int mselect6 = (MODE_SELECT == cmd[0]);
2166
2167         memset(arr, 0, sizeof(arr));
2168         pf = cmd[1] & 0x10;
2169         sp = cmd[1] & 0x1;
2170         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2171         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2172                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2173                 return check_condition_result;
2174         }
2175         res = fetch_to_dev_buffer(scp, arr, param_len);
2176         if (-1 == res)
2177                 return DID_ERROR << 16;
2178         else if (sdebug_verbose && (res < param_len))
2179                 sdev_printk(KERN_INFO, scp->device,
2180                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2181                             __func__, param_len, res);
2182         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2183         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2184         if (md_len > 2) {
2185                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2186                 return check_condition_result;
2187         }
2188         off = bd_len + (mselect6 ? 4 : 8);
2189         mpage = arr[off] & 0x3f;
2190         ps = !!(arr[off] & 0x80);
2191         if (ps) {
2192                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2193                 return check_condition_result;
2194         }
2195         spf = !!(arr[off] & 0x40);
2196         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2197                        (arr[off + 1] + 2);
2198         if ((pg_len + off) > param_len) {
2199                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2200                                 PARAMETER_LIST_LENGTH_ERR, 0);
2201                 return check_condition_result;
2202         }
2203         switch (mpage) {
2204         case 0x8:      /* Caching Mode page */
2205                 if (caching_pg[1] == arr[off + 1]) {
2206                         memcpy(caching_pg + 2, arr + off + 2,
2207                                sizeof(caching_pg) - 2);
2208                         goto set_mode_changed_ua;
2209                 }
2210                 break;
2211         case 0xa:      /* Control Mode page */
2212                 if (ctrl_m_pg[1] == arr[off + 1]) {
2213                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2214                                sizeof(ctrl_m_pg) - 2);
2215                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2216                         goto set_mode_changed_ua;
2217                 }
2218                 break;
2219         case 0x1c:      /* Informational Exceptions Mode page */
2220                 if (iec_m_pg[1] == arr[off + 1]) {
2221                         memcpy(iec_m_pg + 2, arr + off + 2,
2222                                sizeof(iec_m_pg) - 2);
2223                         goto set_mode_changed_ua;
2224                 }
2225                 break;
2226         default:
2227                 break;
2228         }
2229         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2230         return check_condition_result;
2231 set_mode_changed_ua:
2232         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2233         return 0;
2234 }
2235
2236 static int resp_temp_l_pg(unsigned char * arr)
2237 {
2238         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2239                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2240                 };
2241
2242         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2243         return sizeof(temp_l_pg);
2244 }
2245
2246 static int resp_ie_l_pg(unsigned char * arr)
2247 {
2248         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2249                 };
2250
2251         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2252         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2253                 arr[4] = THRESHOLD_EXCEEDED;
2254                 arr[5] = 0xff;
2255         }
2256         return sizeof(ie_l_pg);
2257 }
2258
2259 #define SDEBUG_MAX_LSENSE_SZ 512
2260
2261 static int resp_log_sense(struct scsi_cmnd * scp,
2262                           struct sdebug_dev_info * devip)
2263 {
2264         int ppc, sp, pcode, subpcode, alloc_len, len, n;
2265         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2266         unsigned char *cmd = scp->cmnd;
2267
2268         memset(arr, 0, sizeof(arr));
2269         ppc = cmd[1] & 0x2;
2270         sp = cmd[1] & 0x1;
2271         if (ppc || sp) {
2272                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2273                 return check_condition_result;
2274         }
2275         pcode = cmd[2] & 0x3f;
2276         subpcode = cmd[3] & 0xff;
2277         alloc_len = get_unaligned_be16(cmd + 7);
2278         arr[0] = pcode;
2279         if (0 == subpcode) {
2280                 switch (pcode) {
2281                 case 0x0:       /* Supported log pages log page */
2282                         n = 4;
2283                         arr[n++] = 0x0;         /* this page */
2284                         arr[n++] = 0xd;         /* Temperature */
2285                         arr[n++] = 0x2f;        /* Informational exceptions */
2286                         arr[3] = n - 4;
2287                         break;
2288                 case 0xd:       /* Temperature log page */
2289                         arr[3] = resp_temp_l_pg(arr + 4);
2290                         break;
2291                 case 0x2f:      /* Informational exceptions log page */
2292                         arr[3] = resp_ie_l_pg(arr + 4);
2293                         break;
2294                 default:
2295                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2296                         return check_condition_result;
2297                 }
2298         } else if (0xff == subpcode) {
2299                 arr[0] |= 0x40;
2300                 arr[1] = subpcode;
2301                 switch (pcode) {
2302                 case 0x0:       /* Supported log pages and subpages log page */
2303                         n = 4;
2304                         arr[n++] = 0x0;
2305                         arr[n++] = 0x0;         /* 0,0 page */
2306                         arr[n++] = 0x0;
2307                         arr[n++] = 0xff;        /* this page */
2308                         arr[n++] = 0xd;
2309                         arr[n++] = 0x0;         /* Temperature */
2310                         arr[n++] = 0x2f;
2311                         arr[n++] = 0x0; /* Informational exceptions */
2312                         arr[3] = n - 4;
2313                         break;
2314                 case 0xd:       /* Temperature subpages */
2315                         n = 4;
2316                         arr[n++] = 0xd;
2317                         arr[n++] = 0x0;         /* Temperature */
2318                         arr[3] = n - 4;
2319                         break;
2320                 case 0x2f:      /* Informational exceptions subpages */
2321                         n = 4;
2322                         arr[n++] = 0x2f;
2323                         arr[n++] = 0x0;         /* Informational exceptions */
2324                         arr[3] = n - 4;
2325                         break;
2326                 default:
2327                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2328                         return check_condition_result;
2329                 }
2330         } else {
2331                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2332                 return check_condition_result;
2333         }
2334         len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2335         return fill_from_dev_buffer(scp, arr,
2336                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2337 }
2338
2339 static int check_device_access_params(struct scsi_cmnd *scp,
2340                                       unsigned long long lba, unsigned int num)
2341 {
2342         if (lba + num > sdebug_capacity) {
2343                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2344                 return check_condition_result;
2345         }
2346         /* transfer length excessive (tie in to block limits VPD page) */
2347         if (num > sdebug_store_sectors) {
2348                 /* needs work to find which cdb byte 'num' comes from */
2349                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2350                 return check_condition_result;
2351         }
2352         return 0;
2353 }
2354
2355 /* Returns number of bytes copied or -1 if error. */
2356 static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2357                             bool do_write)
2358 {
2359         int ret;
2360         u64 block, rest = 0;
2361         struct scsi_data_buffer *sdb;
2362         enum dma_data_direction dir;
2363
2364         if (do_write) {
2365                 sdb = scsi_out(scmd);
2366                 dir = DMA_TO_DEVICE;
2367         } else {
2368                 sdb = scsi_in(scmd);
2369                 dir = DMA_FROM_DEVICE;
2370         }
2371
2372         if (!sdb->length)
2373                 return 0;
2374         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2375                 return -1;
2376
2377         block = do_div(lba, sdebug_store_sectors);
2378         if (block + num > sdebug_store_sectors)
2379                 rest = block + num - sdebug_store_sectors;
2380
2381         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2382                    fake_storep + (block * sdebug_sector_size),
2383                    (num - rest) * sdebug_sector_size, 0, do_write);
2384         if (ret != (num - rest) * sdebug_sector_size)
2385                 return ret;
2386
2387         if (rest) {
2388                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2389                             fake_storep, rest * sdebug_sector_size,
2390                             (num - rest) * sdebug_sector_size, do_write);
2391         }
2392
2393         return ret;
2394 }
2395
2396 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2397  * arr into fake_store(lba,num) and return true. If comparison fails then
2398  * return false. */
2399 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2400 {
2401         bool res;
2402         u64 block, rest = 0;
2403         u32 store_blks = sdebug_store_sectors;
2404         u32 lb_size = sdebug_sector_size;
2405
2406         block = do_div(lba, store_blks);
2407         if (block + num > store_blks)
2408                 rest = block + num - store_blks;
2409
2410         res = !memcmp(fake_storep + (block * lb_size), arr,
2411                       (num - rest) * lb_size);
2412         if (!res)
2413                 return res;
2414         if (rest)
2415                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2416                              rest * lb_size);
2417         if (!res)
2418                 return res;
2419         arr += num * lb_size;
2420         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2421         if (rest)
2422                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2423                        rest * lb_size);
2424         return res;
2425 }
2426
2427 static __be16 dif_compute_csum(const void *buf, int len)
2428 {
2429         __be16 csum;
2430
2431         if (sdebug_guard)
2432                 csum = (__force __be16)ip_compute_csum(buf, len);
2433         else
2434                 csum = cpu_to_be16(crc_t10dif(buf, len));
2435
2436         return csum;
2437 }
2438
2439 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2440                       sector_t sector, u32 ei_lba)
2441 {
2442         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2443
2444         if (sdt->guard_tag != csum) {
2445                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2446                         (unsigned long)sector,
2447                         be16_to_cpu(sdt->guard_tag),
2448                         be16_to_cpu(csum));
2449                 return 0x01;
2450         }
2451         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2452             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2453                 pr_err("REF check failed on sector %lu\n",
2454                         (unsigned long)sector);
2455                 return 0x03;
2456         }
2457         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2458             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2459                 pr_err("REF check failed on sector %lu\n",
2460                         (unsigned long)sector);
2461                 return 0x03;
2462         }
2463         return 0;
2464 }
2465
2466 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2467                           unsigned int sectors, bool read)
2468 {
2469         size_t resid;
2470         void *paddr;
2471         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2472         struct sg_mapping_iter miter;
2473
2474         /* Bytes of protection data to copy into sgl */
2475         resid = sectors * sizeof(*dif_storep);
2476
2477         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2478                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2479                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2480
2481         while (sg_miter_next(&miter) && resid > 0) {
2482                 size_t len = min(miter.length, resid);
2483                 void *start = dif_store(sector);
2484                 size_t rest = 0;
2485
2486                 if (dif_store_end < start + len)
2487                         rest = start + len - dif_store_end;
2488
2489                 paddr = miter.addr;
2490
2491                 if (read)
2492                         memcpy(paddr, start, len - rest);
2493                 else
2494                         memcpy(start, paddr, len - rest);
2495
2496                 if (rest) {
2497                         if (read)
2498                                 memcpy(paddr + len - rest, dif_storep, rest);
2499                         else
2500                                 memcpy(dif_storep, paddr + len - rest, rest);
2501                 }
2502
2503                 sector += len / sizeof(*dif_storep);
2504                 resid -= len;
2505         }
2506         sg_miter_stop(&miter);
2507 }
2508
2509 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2510                             unsigned int sectors, u32 ei_lba)
2511 {
2512         unsigned int i;
2513         struct t10_pi_tuple *sdt;
2514         sector_t sector;
2515
2516         for (i = 0; i < sectors; i++, ei_lba++) {
2517                 int ret;
2518
2519                 sector = start_sec + i;
2520                 sdt = dif_store(sector);
2521
2522                 if (sdt->app_tag == cpu_to_be16(0xffff))
2523                         continue;
2524
2525                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2526                 if (ret) {
2527                         dif_errors++;
2528                         return ret;
2529                 }
2530         }
2531
2532         dif_copy_prot(SCpnt, start_sec, sectors, true);
2533         dix_reads++;
2534
2535         return 0;
2536 }
2537
2538 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2539 {
2540         u8 *cmd = scp->cmnd;
2541         struct sdebug_queued_cmd *sqcp;
2542         u64 lba;
2543         u32 num;
2544         u32 ei_lba;
2545         unsigned long iflags;
2546         int ret;
2547         bool check_prot;
2548
2549         switch (cmd[0]) {
2550         case READ_16:
2551                 ei_lba = 0;
2552                 lba = get_unaligned_be64(cmd + 2);
2553                 num = get_unaligned_be32(cmd + 10);
2554                 check_prot = true;
2555                 break;
2556         case READ_10:
2557                 ei_lba = 0;
2558                 lba = get_unaligned_be32(cmd + 2);
2559                 num = get_unaligned_be16(cmd + 7);
2560                 check_prot = true;
2561                 break;
2562         case READ_6:
2563                 ei_lba = 0;
2564                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2565                       (u32)(cmd[1] & 0x1f) << 16;
2566                 num = (0 == cmd[4]) ? 256 : cmd[4];
2567                 check_prot = true;
2568                 break;
2569         case READ_12:
2570                 ei_lba = 0;
2571                 lba = get_unaligned_be32(cmd + 2);
2572                 num = get_unaligned_be32(cmd + 6);
2573                 check_prot = true;
2574                 break;
2575         case XDWRITEREAD_10:
2576                 ei_lba = 0;
2577                 lba = get_unaligned_be32(cmd + 2);
2578                 num = get_unaligned_be16(cmd + 7);
2579                 check_prot = false;
2580                 break;
2581         default:        /* assume READ(32) */
2582                 lba = get_unaligned_be64(cmd + 12);
2583                 ei_lba = get_unaligned_be32(cmd + 20);
2584                 num = get_unaligned_be32(cmd + 28);
2585                 check_prot = false;
2586                 break;
2587         }
2588         if (unlikely(have_dif_prot && check_prot)) {
2589                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2590                     (cmd[1] & 0xe0)) {
2591                         mk_sense_invalid_opcode(scp);
2592                         return check_condition_result;
2593                 }
2594                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2595                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2596                     (cmd[1] & 0xe0) == 0)
2597                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2598                                     "to DIF device\n");
2599         }
2600         if (unlikely(sdebug_any_injecting_opt)) {
2601                 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2602
2603                 if (sqcp) {
2604                         if (sqcp->inj_short)
2605                                 num /= 2;
2606                 }
2607         } else
2608                 sqcp = NULL;
2609
2610         /* inline check_device_access_params() */
2611         if (unlikely(lba + num > sdebug_capacity)) {
2612                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2613                 return check_condition_result;
2614         }
2615         /* transfer length excessive (tie in to block limits VPD page) */
2616         if (unlikely(num > sdebug_store_sectors)) {
2617                 /* needs work to find which cdb byte 'num' comes from */
2618                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2619                 return check_condition_result;
2620         }
2621
2622         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2623                      (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2624                      ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2625                 /* claim unrecoverable read error */
2626                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2627                 /* set info field and valid bit for fixed descriptor */
2628                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2629                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2630                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2631                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2632                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2633                 }
2634                 scsi_set_resid(scp, scsi_bufflen(scp));
2635                 return check_condition_result;
2636         }
2637
2638         read_lock_irqsave(&atomic_rw, iflags);
2639
2640         /* DIX + T10 DIF */
2641         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2642                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2643
2644                 if (prot_ret) {
2645                         read_unlock_irqrestore(&atomic_rw, iflags);
2646                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2647                         return illegal_condition_result;
2648                 }
2649         }
2650
2651         ret = do_device_access(scp, lba, num, false);
2652         read_unlock_irqrestore(&atomic_rw, iflags);
2653         if (unlikely(ret == -1))
2654                 return DID_ERROR << 16;
2655
2656         scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2657
2658         if (unlikely(sqcp)) {
2659                 if (sqcp->inj_recovered) {
2660                         mk_sense_buffer(scp, RECOVERED_ERROR,
2661                                         THRESHOLD_EXCEEDED, 0);
2662                         return check_condition_result;
2663                 } else if (sqcp->inj_transport) {
2664                         mk_sense_buffer(scp, ABORTED_COMMAND,
2665                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2666                         return check_condition_result;
2667                 } else if (sqcp->inj_dif) {
2668                         /* Logical block guard check failed */
2669                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2670                         return illegal_condition_result;
2671                 } else if (sqcp->inj_dix) {
2672                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2673                         return illegal_condition_result;
2674                 }
2675         }
2676         return 0;
2677 }
2678
2679 static void dump_sector(unsigned char *buf, int len)
2680 {
2681         int i, j, n;
2682
2683         pr_err(">>> Sector Dump <<<\n");
2684         for (i = 0 ; i < len ; i += 16) {
2685                 char b[128];
2686
2687                 for (j = 0, n = 0; j < 16; j++) {
2688                         unsigned char c = buf[i+j];
2689
2690                         if (c >= 0x20 && c < 0x7e)
2691                                 n += scnprintf(b + n, sizeof(b) - n,
2692                                                " %c ", buf[i+j]);
2693                         else
2694                                 n += scnprintf(b + n, sizeof(b) - n,
2695                                                "%02x ", buf[i+j]);
2696                 }
2697                 pr_err("%04d: %s\n", i, b);
2698         }
2699 }
2700
2701 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2702                              unsigned int sectors, u32 ei_lba)
2703 {
2704         int ret;
2705         struct t10_pi_tuple *sdt;
2706         void *daddr;
2707         sector_t sector = start_sec;
2708         int ppage_offset;
2709         int dpage_offset;
2710         struct sg_mapping_iter diter;
2711         struct sg_mapping_iter piter;
2712
2713         BUG_ON(scsi_sg_count(SCpnt) == 0);
2714         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2715
2716         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2717                         scsi_prot_sg_count(SCpnt),
2718                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2719         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2720                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2721
2722         /* For each protection page */
2723         while (sg_miter_next(&piter)) {
2724                 dpage_offset = 0;
2725                 if (WARN_ON(!sg_miter_next(&diter))) {
2726                         ret = 0x01;
2727                         goto out;
2728                 }
2729
2730                 for (ppage_offset = 0; ppage_offset < piter.length;
2731                      ppage_offset += sizeof(struct t10_pi_tuple)) {
2732                         /* If we're at the end of the current
2733                          * data page advance to the next one
2734                          */
2735                         if (dpage_offset >= diter.length) {
2736                                 if (WARN_ON(!sg_miter_next(&diter))) {
2737                                         ret = 0x01;
2738                                         goto out;
2739                                 }
2740                                 dpage_offset = 0;
2741                         }
2742
2743                         sdt = piter.addr + ppage_offset;
2744                         daddr = diter.addr + dpage_offset;
2745
2746                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2747                         if (ret) {
2748                                 dump_sector(daddr, sdebug_sector_size);
2749                                 goto out;
2750                         }
2751
2752                         sector++;
2753                         ei_lba++;
2754                         dpage_offset += sdebug_sector_size;
2755                 }
2756                 diter.consumed = dpage_offset;
2757                 sg_miter_stop(&diter);
2758         }
2759         sg_miter_stop(&piter);
2760
2761         dif_copy_prot(SCpnt, start_sec, sectors, false);
2762         dix_writes++;
2763
2764         return 0;
2765
2766 out:
2767         dif_errors++;
2768         sg_miter_stop(&diter);
2769         sg_miter_stop(&piter);
2770         return ret;
2771 }
2772
2773 static unsigned long lba_to_map_index(sector_t lba)
2774 {
2775         if (sdebug_unmap_alignment)
2776                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2777         sector_div(lba, sdebug_unmap_granularity);
2778         return lba;
2779 }
2780
2781 static sector_t map_index_to_lba(unsigned long index)
2782 {
2783         sector_t lba = index * sdebug_unmap_granularity;
2784
2785         if (sdebug_unmap_alignment)
2786                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2787         return lba;
2788 }
2789
2790 static unsigned int map_state(sector_t lba, unsigned int *num)
2791 {
2792         sector_t end;
2793         unsigned int mapped;
2794         unsigned long index;
2795         unsigned long next;
2796
2797         index = lba_to_map_index(lba);
2798         mapped = test_bit(index, map_storep);
2799
2800         if (mapped)
2801                 next = find_next_zero_bit(map_storep, map_size, index);
2802         else
2803                 next = find_next_bit(map_storep, map_size, index);
2804
2805         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2806         *num = end - lba;
2807         return mapped;
2808 }
2809
2810 static void map_region(sector_t lba, unsigned int len)
2811 {
2812         sector_t end = lba + len;
2813
2814         while (lba < end) {
2815                 unsigned long index = lba_to_map_index(lba);
2816
2817                 if (index < map_size)
2818                         set_bit(index, map_storep);
2819
2820                 lba = map_index_to_lba(index + 1);
2821         }
2822 }
2823
2824 static void unmap_region(sector_t lba, unsigned int len)
2825 {
2826         sector_t end = lba + len;
2827
2828         while (lba < end) {
2829                 unsigned long index = lba_to_map_index(lba);
2830
2831                 if (lba == map_index_to_lba(index) &&
2832                     lba + sdebug_unmap_granularity <= end &&
2833                     index < map_size) {
2834                         clear_bit(index, map_storep);
2835                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
2836                                 memset(fake_storep +
2837                                        lba * sdebug_sector_size,
2838                                        (sdebug_lbprz & 1) ? 0 : 0xff,
2839                                        sdebug_sector_size *
2840                                        sdebug_unmap_granularity);
2841                         }
2842                         if (dif_storep) {
2843                                 memset(dif_storep + lba, 0xff,
2844                                        sizeof(*dif_storep) *
2845                                        sdebug_unmap_granularity);
2846                         }
2847                 }
2848                 lba = map_index_to_lba(index + 1);
2849         }
2850 }
2851
2852 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2853 {
2854         u8 *cmd = scp->cmnd;
2855         u64 lba;
2856         u32 num;
2857         u32 ei_lba;
2858         unsigned long iflags;
2859         int ret;
2860         bool check_prot;
2861
2862         switch (cmd[0]) {
2863         case WRITE_16:
2864                 ei_lba = 0;
2865                 lba = get_unaligned_be64(cmd + 2);
2866                 num = get_unaligned_be32(cmd + 10);
2867                 check_prot = true;
2868                 break;
2869         case WRITE_10:
2870                 ei_lba = 0;
2871                 lba = get_unaligned_be32(cmd + 2);
2872                 num = get_unaligned_be16(cmd + 7);
2873                 check_prot = true;
2874                 break;
2875         case WRITE_6:
2876                 ei_lba = 0;
2877                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2878                       (u32)(cmd[1] & 0x1f) << 16;
2879                 num = (0 == cmd[4]) ? 256 : cmd[4];
2880                 check_prot = true;
2881                 break;
2882         case WRITE_12:
2883                 ei_lba = 0;
2884                 lba = get_unaligned_be32(cmd + 2);
2885                 num = get_unaligned_be32(cmd + 6);
2886                 check_prot = true;
2887                 break;
2888         case 0x53:      /* XDWRITEREAD(10) */
2889                 ei_lba = 0;
2890                 lba = get_unaligned_be32(cmd + 2);
2891                 num = get_unaligned_be16(cmd + 7);
2892                 check_prot = false;
2893                 break;
2894         default:        /* assume WRITE(32) */
2895                 lba = get_unaligned_be64(cmd + 12);
2896                 ei_lba = get_unaligned_be32(cmd + 20);
2897                 num = get_unaligned_be32(cmd + 28);
2898                 check_prot = false;
2899                 break;
2900         }
2901         if (unlikely(have_dif_prot && check_prot)) {
2902                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2903                     (cmd[1] & 0xe0)) {
2904                         mk_sense_invalid_opcode(scp);
2905                         return check_condition_result;
2906                 }
2907                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2908                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2909                     (cmd[1] & 0xe0) == 0)
2910                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2911                                     "to DIF device\n");
2912         }
2913
2914         /* inline check_device_access_params() */
2915         if (unlikely(lba + num > sdebug_capacity)) {
2916                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2917                 return check_condition_result;
2918         }
2919         /* transfer length excessive (tie in to block limits VPD page) */
2920         if (unlikely(num > sdebug_store_sectors)) {
2921                 /* needs work to find which cdb byte 'num' comes from */
2922                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2923                 return check_condition_result;
2924         }
2925
2926         write_lock_irqsave(&atomic_rw, iflags);
2927
2928         /* DIX + T10 DIF */
2929         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2930                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2931
2932                 if (prot_ret) {
2933                         write_unlock_irqrestore(&atomic_rw, iflags);
2934                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2935                         return illegal_condition_result;
2936                 }
2937         }
2938
2939         ret = do_device_access(scp, lba, num, true);
2940         if (unlikely(scsi_debug_lbp()))
2941                 map_region(lba, num);
2942         write_unlock_irqrestore(&atomic_rw, iflags);
2943         if (unlikely(-1 == ret))
2944                 return DID_ERROR << 16;
2945         else if (unlikely(sdebug_verbose &&
2946                           (ret < (num * sdebug_sector_size))))
2947                 sdev_printk(KERN_INFO, scp->device,
2948                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2949                             my_name, num * sdebug_sector_size, ret);
2950
2951         if (unlikely(sdebug_any_injecting_opt)) {
2952                 struct sdebug_queued_cmd *sqcp =
2953                                 (struct sdebug_queued_cmd *)scp->host_scribble;
2954
2955                 if (sqcp) {
2956                         if (sqcp->inj_recovered) {
2957                                 mk_sense_buffer(scp, RECOVERED_ERROR,
2958                                                 THRESHOLD_EXCEEDED, 0);
2959                                 return check_condition_result;
2960                         } else if (sqcp->inj_dif) {
2961                                 /* Logical block guard check failed */
2962                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2963                                 return illegal_condition_result;
2964                         } else if (sqcp->inj_dix) {
2965                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2966                                 return illegal_condition_result;
2967                         }
2968                 }
2969         }
2970         return 0;
2971 }
2972
2973 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2974                            u32 ei_lba, bool unmap, bool ndob)
2975 {
2976         unsigned long iflags;
2977         unsigned long long i;
2978         int ret;
2979         u64 lba_off;
2980
2981         ret = check_device_access_params(scp, lba, num);
2982         if (ret)
2983                 return ret;
2984
2985         write_lock_irqsave(&atomic_rw, iflags);
2986
2987         if (unmap && scsi_debug_lbp()) {
2988                 unmap_region(lba, num);
2989                 goto out;
2990         }
2991
2992         lba_off = lba * sdebug_sector_size;
2993         /* if ndob then zero 1 logical block, else fetch 1 logical block */
2994         if (ndob) {
2995                 memset(fake_storep + lba_off, 0, sdebug_sector_size);
2996                 ret = 0;
2997         } else
2998                 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
2999                                           sdebug_sector_size);
3000
3001         if (-1 == ret) {
3002                 write_unlock_irqrestore(&atomic_rw, iflags);
3003                 return DID_ERROR << 16;
3004         } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
3005                 sdev_printk(KERN_INFO, scp->device,
3006                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3007                             my_name, "write same",
3008                             num * sdebug_sector_size, ret);
3009
3010         /* Copy first sector to remaining blocks */
3011         for (i = 1 ; i < num ; i++)
3012                 memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3013                        fake_storep + lba_off,
3014                        sdebug_sector_size);
3015
3016         if (scsi_debug_lbp())
3017                 map_region(lba, num);
3018 out:
3019         write_unlock_irqrestore(&atomic_rw, iflags);
3020
3021         return 0;
3022 }
3023
3024 static int resp_write_same_10(struct scsi_cmnd *scp,
3025                               struct sdebug_dev_info *devip)
3026 {
3027         u8 *cmd = scp->cmnd;
3028         u32 lba;
3029         u16 num;
3030         u32 ei_lba = 0;
3031         bool unmap = false;
3032
3033         if (cmd[1] & 0x8) {
3034                 if (sdebug_lbpws10 == 0) {
3035                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3036                         return check_condition_result;
3037                 } else
3038                         unmap = true;
3039         }
3040         lba = get_unaligned_be32(cmd + 2);
3041         num = get_unaligned_be16(cmd + 7);
3042         if (num > sdebug_write_same_length) {
3043                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3044                 return check_condition_result;
3045         }
3046         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3047 }
3048
3049 static int resp_write_same_16(struct scsi_cmnd *scp,
3050                               struct sdebug_dev_info *devip)
3051 {
3052         u8 *cmd = scp->cmnd;
3053         u64 lba;
3054         u32 num;
3055         u32 ei_lba = 0;
3056         bool unmap = false;
3057         bool ndob = false;
3058
3059         if (cmd[1] & 0x8) {     /* UNMAP */
3060                 if (sdebug_lbpws == 0) {
3061                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3062                         return check_condition_result;
3063                 } else
3064                         unmap = true;
3065         }
3066         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3067                 ndob = true;
3068         lba = get_unaligned_be64(cmd + 2);
3069         num = get_unaligned_be32(cmd + 10);
3070         if (num > sdebug_write_same_length) {
3071                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3072                 return check_condition_result;
3073         }
3074         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3075 }
3076
3077 /* Note the mode field is in the same position as the (lower) service action
3078  * field. For the Report supported operation codes command, SPC-4 suggests
3079  * each mode of this command should be reported separately; for future. */
3080 static int resp_write_buffer(struct scsi_cmnd *scp,
3081                              struct sdebug_dev_info *devip)
3082 {
3083         u8 *cmd = scp->cmnd;
3084         struct scsi_device *sdp = scp->device;
3085         struct sdebug_dev_info *dp;
3086         u8 mode;
3087
3088         mode = cmd[1] & 0x1f;
3089         switch (mode) {
3090         case 0x4:       /* download microcode (MC) and activate (ACT) */
3091                 /* set UAs on this device only */
3092                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3093                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3094                 break;
3095         case 0x5:       /* download MC, save and ACT */
3096                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3097                 break;
3098         case 0x6:       /* download MC with offsets and ACT */
3099                 /* set UAs on most devices (LUs) in this target */
3100                 list_for_each_entry(dp,
3101                                     &devip->sdbg_host->dev_info_list,
3102                                     dev_list)
3103                         if (dp->target == sdp->id) {
3104                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3105                                 if (devip != dp)
3106                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3107                                                 dp->uas_bm);
3108                         }
3109                 break;
3110         case 0x7:       /* download MC with offsets, save, and ACT */
3111                 /* set UA on all devices (LUs) in this target */
3112                 list_for_each_entry(dp,
3113                                     &devip->sdbg_host->dev_info_list,
3114                                     dev_list)
3115                         if (dp->target == sdp->id)
3116                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3117                                         dp->uas_bm);
3118                 break;
3119         default:
3120                 /* do nothing for this command for other mode values */
3121                 break;
3122         }
3123         return 0;
3124 }
3125
3126 static int resp_comp_write(struct scsi_cmnd *scp,
3127                            struct sdebug_dev_info *devip)
3128 {
3129         u8 *cmd = scp->cmnd;
3130         u8 *arr;
3131         u8 *fake_storep_hold;
3132         u64 lba;
3133         u32 dnum;
3134         u32 lb_size = sdebug_sector_size;
3135         u8 num;
3136         unsigned long iflags;
3137         int ret;
3138         int retval = 0;
3139
3140         lba = get_unaligned_be64(cmd + 2);
3141         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3142         if (0 == num)
3143                 return 0;       /* degenerate case, not an error */
3144         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3145             (cmd[1] & 0xe0)) {
3146                 mk_sense_invalid_opcode(scp);
3147                 return check_condition_result;
3148         }
3149         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3150              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3151             (cmd[1] & 0xe0) == 0)
3152                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3153                             "to DIF device\n");
3154
3155         /* inline check_device_access_params() */
3156         if (lba + num > sdebug_capacity) {
3157                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3158                 return check_condition_result;
3159         }
3160         /* transfer length excessive (tie in to block limits VPD page) */
3161         if (num > sdebug_store_sectors) {
3162                 /* needs work to find which cdb byte 'num' comes from */
3163                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3164                 return check_condition_result;
3165         }
3166         dnum = 2 * num;
3167         arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3168         if (NULL == arr) {
3169                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3170                                 INSUFF_RES_ASCQ);
3171                 return check_condition_result;
3172         }
3173
3174         write_lock_irqsave(&atomic_rw, iflags);
3175
3176         /* trick do_device_access() to fetch both compare and write buffers
3177          * from data-in into arr. Safe (atomic) since write_lock held. */
3178         fake_storep_hold = fake_storep;
3179         fake_storep = arr;
3180         ret = do_device_access(scp, 0, dnum, true);
3181         fake_storep = fake_storep_hold;
3182         if (ret == -1) {
3183                 retval = DID_ERROR << 16;
3184                 goto cleanup;
3185         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3186                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3187                             "indicated=%u, IO sent=%d bytes\n", my_name,
3188                             dnum * lb_size, ret);
3189         if (!comp_write_worker(lba, num, arr)) {
3190                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3191                 retval = check_condition_result;
3192                 goto cleanup;
3193         }
3194         if (scsi_debug_lbp())
3195                 map_region(lba, num);
3196 cleanup:
3197         write_unlock_irqrestore(&atomic_rw, iflags);
3198         kfree(arr);
3199         return retval;
3200 }
3201
3202 struct unmap_block_desc {
3203         __be64  lba;
3204         __be32  blocks;
3205         __be32  __reserved;
3206 };
3207
3208 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3209 {
3210         unsigned char *buf;
3211         struct unmap_block_desc *desc;
3212         unsigned int i, payload_len, descriptors;
3213         int ret;
3214         unsigned long iflags;
3215
3216
3217         if (!scsi_debug_lbp())
3218                 return 0;       /* fib and say its done */
3219         payload_len = get_unaligned_be16(scp->cmnd + 7);
3220         BUG_ON(scsi_bufflen(scp) != payload_len);
3221
3222         descriptors = (payload_len - 8) / 16;
3223         if (descriptors > sdebug_unmap_max_desc) {
3224                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3225                 return check_condition_result;
3226         }
3227
3228         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3229         if (!buf) {
3230                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3231                                 INSUFF_RES_ASCQ);
3232                 return check_condition_result;
3233         }
3234
3235         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3236
3237         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3238         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3239
3240         desc = (void *)&buf[8];
3241
3242         write_lock_irqsave(&atomic_rw, iflags);
3243
3244         for (i = 0 ; i < descriptors ; i++) {
3245                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3246                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3247
3248                 ret = check_device_access_params(scp, lba, num);
3249                 if (ret)
3250                         goto out;
3251
3252                 unmap_region(lba, num);
3253         }
3254
3255         ret = 0;
3256
3257 out:
3258         write_unlock_irqrestore(&atomic_rw, iflags);
3259         kfree(buf);
3260
3261         return ret;
3262 }
3263
3264 #define SDEBUG_GET_LBA_STATUS_LEN 32
3265
3266 static int resp_get_lba_status(struct scsi_cmnd *scp,
3267                                struct sdebug_dev_info *devip)
3268 {
3269         u8 *cmd = scp->cmnd;
3270         u64 lba;
3271         u32 alloc_len, mapped, num;
3272         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3273         int ret;
3274
3275         lba = get_unaligned_be64(cmd + 2);
3276         alloc_len = get_unaligned_be32(cmd + 10);
3277
3278         if (alloc_len < 24)
3279                 return 0;
3280
3281         ret = check_device_access_params(scp, lba, 1);
3282         if (ret)
3283                 return ret;
3284
3285         if (scsi_debug_lbp())
3286                 mapped = map_state(lba, &num);
3287         else {
3288                 mapped = 1;
3289                 /* following just in case virtual_gb changed */
3290                 sdebug_capacity = get_sdebug_capacity();
3291                 if (sdebug_capacity - lba <= 0xffffffff)
3292                         num = sdebug_capacity - lba;
3293                 else
3294                         num = 0xffffffff;
3295         }
3296
3297         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3298         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3299         put_unaligned_be64(lba, arr + 8);       /* LBA */
3300         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3301         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3302
3303         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3304 }
3305
3306 #define RL_BUCKET_ELEMS 8
3307
3308 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3309  * (W-LUN), the normal Linux scanning logic does not associate it with a
3310  * device (e.g. /dev/sg7). The following magic will make that association:
3311  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3312  * where <n> is a host number. If there are multiple targets in a host then
3313  * the above will associate a W-LUN to each target. To only get a W-LUN
3314  * for target 2, then use "echo '- 2 49409' > scan" .
3315  */
3316 static int resp_report_luns(struct scsi_cmnd *scp,
3317                             struct sdebug_dev_info *devip)
3318 {
3319         unsigned char *cmd = scp->cmnd;
3320         unsigned int alloc_len;
3321         unsigned char select_report;
3322         u64 lun;
3323         struct scsi_lun *lun_p;
3324         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3325         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
3326         unsigned int wlun_cnt;  /* report luns W-LUN count */
3327         unsigned int tlun_cnt;  /* total LUN count */
3328         unsigned int rlen;      /* response length (in bytes) */
3329         int k, j, n, res;
3330         unsigned int off_rsp = 0;
3331         const int sz_lun = sizeof(struct scsi_lun);
3332
3333         clear_luns_changed_on_target(devip);
3334
3335         select_report = cmd[2];
3336         alloc_len = get_unaligned_be32(cmd + 6);
3337
3338         if (alloc_len < 4) {
3339                 pr_err("alloc len too small %d\n", alloc_len);
3340                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3341                 return check_condition_result;
3342         }
3343
3344         switch (select_report) {
3345         case 0:         /* all LUNs apart from W-LUNs */
3346                 lun_cnt = sdebug_max_luns;
3347                 wlun_cnt = 0;
3348                 break;
3349         case 1:         /* only W-LUNs */
3350                 lun_cnt = 0;
3351                 wlun_cnt = 1;
3352                 break;
3353         case 2:         /* all LUNs */
3354                 lun_cnt = sdebug_max_luns;
3355                 wlun_cnt = 1;
3356                 break;
3357         case 0x10:      /* only administrative LUs */
3358         case 0x11:      /* see SPC-5 */
3359         case 0x12:      /* only subsiduary LUs owned by referenced LU */
3360         default:
3361                 pr_debug("select report invalid %d\n", select_report);
3362                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3363                 return check_condition_result;
3364         }
3365
3366         if (sdebug_no_lun_0 && (lun_cnt > 0))
3367                 --lun_cnt;
3368
3369         tlun_cnt = lun_cnt + wlun_cnt;
3370         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
3371         scsi_set_resid(scp, scsi_bufflen(scp));
3372         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3373                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3374
3375         /* loops rely on sizeof response header same as sizeof lun (both 8) */
3376         lun = sdebug_no_lun_0 ? 1 : 0;
3377         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3378                 memset(arr, 0, sizeof(arr));
3379                 lun_p = (struct scsi_lun *)&arr[0];
3380                 if (k == 0) {
3381                         put_unaligned_be32(rlen, &arr[0]);
3382                         ++lun_p;
3383                         j = 1;
3384                 }
3385                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3386                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3387                                 break;
3388                         int_to_scsilun(lun++, lun_p);
3389                 }
3390                 if (j < RL_BUCKET_ELEMS)
3391                         break;
3392                 n = j * sz_lun;
3393                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3394                 if (res)
3395                         return res;
3396                 off_rsp += n;
3397         }
3398         if (wlun_cnt) {
3399                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3400                 ++j;
3401         }
3402         if (j > 0)
3403                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3404         return res;
3405 }
3406
3407 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3408                             unsigned int num, struct sdebug_dev_info *devip)
3409 {
3410         int j;
3411         unsigned char *kaddr, *buf;
3412         unsigned int offset;
3413         struct scsi_data_buffer *sdb = scsi_in(scp);
3414         struct sg_mapping_iter miter;
3415
3416         /* better not to use temporary buffer. */
3417         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3418         if (!buf) {
3419                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3420                                 INSUFF_RES_ASCQ);
3421                 return check_condition_result;
3422         }
3423
3424         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3425
3426         offset = 0;
3427         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3428                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
3429
3430         while (sg_miter_next(&miter)) {
3431                 kaddr = miter.addr;
3432                 for (j = 0; j < miter.length; j++)
3433                         *(kaddr + j) ^= *(buf + offset + j);
3434
3435                 offset += miter.length;
3436         }
3437         sg_miter_stop(&miter);
3438         kfree(buf);
3439
3440         return 0;
3441 }
3442
3443 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3444                                struct sdebug_dev_info *devip)
3445 {
3446         u8 *cmd = scp->cmnd;
3447         u64 lba;
3448         u32 num;
3449         int errsts;
3450
3451         if (!scsi_bidi_cmnd(scp)) {
3452                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3453                                 INSUFF_RES_ASCQ);
3454                 return check_condition_result;
3455         }
3456         errsts = resp_read_dt0(scp, devip);
3457         if (errsts)
3458                 return errsts;
3459         if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3460                 errsts = resp_write_dt0(scp, devip);
3461                 if (errsts)
3462                         return errsts;
3463         }
3464         lba = get_unaligned_be32(cmd + 2);
3465         num = get_unaligned_be16(cmd + 7);
3466         return resp_xdwriteread(scp, lba, num, devip);
3467 }
3468
3469 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3470 {
3471         struct sdebug_queue *sqp = sdebug_q_arr;
3472
3473         if (sdebug_mq_active) {
3474                 u32 tag = blk_mq_unique_tag(cmnd->request);
3475                 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3476
3477                 if (unlikely(hwq >= submit_queues)) {
3478                         pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3479                         hwq %= submit_queues;
3480                 }
3481                 pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3482                 return sqp + hwq;
3483         } else
3484                 return sqp;
3485 }
3486
3487 /* Queued (deferred) command completions converge here. */
3488 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3489 {
3490         int qc_idx;
3491         int retiring = 0;
3492         unsigned long iflags;
3493         struct sdebug_queue *sqp;
3494         struct sdebug_queued_cmd *sqcp;
3495         struct scsi_cmnd *scp;
3496         struct sdebug_dev_info *devip;
3497
3498         qc_idx = sd_dp->qc_idx;
3499         sqp = sdebug_q_arr + sd_dp->sqa_idx;
3500         if (sdebug_statistics) {
3501                 atomic_inc(&sdebug_completions);
3502                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3503                         atomic_inc(&sdebug_miss_cpus);
3504         }
3505         if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3506                 pr_err("wild qc_idx=%d\n", qc_idx);
3507                 return;
3508         }
3509         spin_lock_irqsave(&sqp->qc_lock, iflags);
3510         sqcp = &sqp->qc_arr[qc_idx];
3511         scp = sqcp->a_cmnd;
3512         if (unlikely(scp == NULL)) {
3513                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3514                 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3515                        sd_dp->sqa_idx, qc_idx);
3516                 return;
3517         }
3518         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3519         if (likely(devip))
3520                 atomic_dec(&devip->num_in_q);
3521         else
3522                 pr_err("devip=NULL\n");
3523         if (unlikely(atomic_read(&retired_max_queue) > 0))
3524                 retiring = 1;
3525
3526         sqcp->a_cmnd = NULL;
3527         if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3528                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3529                 pr_err("Unexpected completion\n");
3530                 return;
3531         }
3532
3533         if (unlikely(retiring)) {       /* user has reduced max_queue */
3534                 int k, retval;
3535
3536                 retval = atomic_read(&retired_max_queue);
3537                 if (qc_idx >= retval) {
3538                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3539                         pr_err("index %d too large\n", retval);
3540                         return;
3541                 }
3542                 k = find_last_bit(sqp->in_use_bm, retval);
3543                 if ((k < sdebug_max_queue) || (k == retval))
3544                         atomic_set(&retired_max_queue, 0);
3545                 else
3546                         atomic_set(&retired_max_queue, k + 1);
3547         }
3548         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3549         scp->scsi_done(scp); /* callback to mid level */
3550 }
3551
3552 /* When high resolution timer goes off this function is called. */
3553 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3554 {
3555         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3556                                                   hrt);
3557         sdebug_q_cmd_complete(sd_dp);
3558         return HRTIMER_NORESTART;
3559 }
3560
3561 /* When work queue schedules work, it calls this function. */
3562 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3563 {
3564         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3565                                                   ew.work);
3566         sdebug_q_cmd_complete(sd_dp);
3567 }
3568
3569 static bool got_shared_uuid;
3570 static uuid_t shared_uuid;
3571
3572 static struct sdebug_dev_info *sdebug_device_create(
3573                         struct sdebug_host_info *sdbg_host, gfp_t flags)
3574 {
3575         struct sdebug_dev_info *devip;
3576
3577         devip = kzalloc(sizeof(*devip), flags);
3578         if (devip) {
3579                 if (sdebug_uuid_ctl == 1)
3580                         uuid_gen(&devip->lu_name);
3581                 else if (sdebug_uuid_ctl == 2) {
3582                         if (got_shared_uuid)
3583                                 devip->lu_name = shared_uuid;
3584                         else {
3585                                 uuid_gen(&shared_uuid);
3586                                 got_shared_uuid = true;
3587                                 devip->lu_name = shared_uuid;
3588                         }
3589                 }
3590                 devip->sdbg_host = sdbg_host;
3591                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3592         }
3593         return devip;
3594 }
3595
3596 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3597 {
3598         struct sdebug_host_info *sdbg_host;
3599         struct sdebug_dev_info *open_devip = NULL;
3600         struct sdebug_dev_info *devip;
3601
3602         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3603         if (!sdbg_host) {
3604                 pr_err("Host info NULL\n");
3605                 return NULL;
3606         }
3607         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3608                 if ((devip->used) && (devip->channel == sdev->channel) &&
3609                     (devip->target == sdev->id) &&
3610                     (devip->lun == sdev->lun))
3611                         return devip;
3612                 else {
3613                         if ((!devip->used) && (!open_devip))
3614                                 open_devip = devip;
3615                 }
3616         }
3617         if (!open_devip) { /* try and make a new one */
3618                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3619                 if (!open_devip) {
3620                         pr_err("out of memory at line %d\n", __LINE__);
3621                         return NULL;
3622                 }
3623         }
3624
3625         open_devip->channel = sdev->channel;
3626         open_devip->target = sdev->id;
3627         open_devip->lun = sdev->lun;
3628         open_devip->sdbg_host = sdbg_host;
3629         atomic_set(&open_devip->num_in_q, 0);
3630         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3631         open_devip->used = true;
3632         return open_devip;
3633 }
3634
3635 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3636 {
3637         if (sdebug_verbose)
3638                 pr_info("slave_alloc <%u %u %u %llu>\n",
3639                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3640         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3641         return 0;
3642 }
3643
3644 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3645 {
3646         struct sdebug_dev_info *devip =
3647                         (struct sdebug_dev_info *)sdp->hostdata;
3648
3649         if (sdebug_verbose)
3650                 pr_info("slave_configure <%u %u %u %llu>\n",
3651                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3652         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3653                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3654         if (devip == NULL) {
3655                 devip = find_build_dev_info(sdp);
3656                 if (devip == NULL)
3657                         return 1;  /* no resources, will be marked offline */
3658         }
3659         sdp->hostdata = devip;
3660         blk_queue_max_segment_size(sdp->request_queue, -1U);
3661         if (sdebug_no_uld)
3662                 sdp->no_uld_attach = 1;
3663         return 0;
3664 }
3665
3666 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3667 {
3668         struct sdebug_dev_info *devip =
3669                 (struct sdebug_dev_info *)sdp->hostdata;
3670
3671         if (sdebug_verbose)
3672                 pr_info("slave_destroy <%u %u %u %llu>\n",
3673                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3674         if (devip) {
3675                 /* make this slot available for re-use */
3676                 devip->used = false;
3677                 sdp->hostdata = NULL;
3678         }
3679 }
3680
3681 static void stop_qc_helper(struct sdebug_defer *sd_dp)
3682 {
3683         if (!sd_dp)
3684                 return;
3685         if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
3686                 hrtimer_cancel(&sd_dp->hrt);
3687         else if (sdebug_jdelay < 0)
3688                 cancel_work_sync(&sd_dp->ew.work);
3689 }
3690
3691 /* If @cmnd found deletes its timer or work queue and returns true; else
3692    returns false */
3693 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3694 {
3695         unsigned long iflags;
3696         int j, k, qmax, r_qmax;
3697         struct sdebug_queue *sqp;
3698         struct sdebug_queued_cmd *sqcp;
3699         struct sdebug_dev_info *devip;
3700         struct sdebug_defer *sd_dp;
3701
3702         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3703                 spin_lock_irqsave(&sqp->qc_lock, iflags);
3704                 qmax = sdebug_max_queue;
3705                 r_qmax = atomic_read(&retired_max_queue);
3706                 if (r_qmax > qmax)
3707                         qmax = r_qmax;
3708                 for (k = 0; k < qmax; ++k) {
3709                         if (test_bit(k, sqp->in_use_bm)) {
3710                                 sqcp = &sqp->qc_arr[k];
3711                                 if (cmnd != sqcp->a_cmnd)
3712                                         continue;
3713                                 /* found */
3714                                 devip = (struct sdebug_dev_info *)
3715                                                 cmnd->device->hostdata;
3716                                 if (devip)
3717                                         atomic_dec(&devip->num_in_q);
3718                                 sqcp->a_cmnd = NULL;
3719                                 sd_dp = sqcp->sd_dp;
3720                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3721                                 stop_qc_helper(sd_dp);
3722                                 clear_bit(k, sqp->in_use_bm);
3723                                 return true;
3724                         }
3725                 }
3726                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3727         }
3728         return false;
3729 }
3730
3731 /* Deletes (stops) timers or work queues of all queued commands */
3732 static void stop_all_queued(void)
3733 {
3734         unsigned long iflags;
3735         int j, k;
3736         struct sdebug_queue *sqp;
3737         struct sdebug_queued_cmd *sqcp;
3738         struct sdebug_dev_info *devip;
3739         struct sdebug_defer *sd_dp;
3740
3741         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3742                 spin_lock_irqsave(&sqp->qc_lock, iflags);
3743                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3744                         if (test_bit(k, sqp->in_use_bm)) {
3745                                 sqcp = &sqp->qc_arr[k];
3746                                 if (sqcp->a_cmnd == NULL)
3747                                         continue;
3748                                 devip = (struct sdebug_dev_info *)
3749                                         sqcp->a_cmnd->device->hostdata;
3750                                 if (devip)
3751                                         atomic_dec(&devip->num_in_q);
3752                                 sqcp->a_cmnd = NULL;
3753                                 sd_dp = sqcp->sd_dp;
3754                                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3755                                 stop_qc_helper(sd_dp);
3756                                 clear_bit(k, sqp->in_use_bm);
3757                                 spin_lock_irqsave(&sqp->qc_lock, iflags);
3758                         }
3759                 }
3760                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3761         }
3762 }
3763
3764 /* Free queued command memory on heap */
3765 static void free_all_queued(void)
3766 {
3767         int j, k;
3768         struct sdebug_queue *sqp;
3769         struct sdebug_queued_cmd *sqcp;
3770
3771         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3772                 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
3773                         sqcp = &sqp->qc_arr[k];
3774                         kfree(sqcp->sd_dp);
3775                         sqcp->sd_dp = NULL;
3776                 }
3777         }
3778 }
3779
3780 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3781 {
3782         bool ok;
3783
3784         ++num_aborts;
3785         if (SCpnt) {
3786                 ok = stop_queued_cmnd(SCpnt);
3787                 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3788                         sdev_printk(KERN_INFO, SCpnt->device,
3789                                     "%s: command%s found\n", __func__,
3790                                     ok ? "" : " not");
3791         }
3792         return SUCCESS;
3793 }
3794
3795 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3796 {
3797         ++num_dev_resets;
3798         if (SCpnt && SCpnt->device) {
3799                 struct scsi_device *sdp = SCpnt->device;
3800                 struct sdebug_dev_info *devip =
3801                                 (struct sdebug_dev_info *)sdp->hostdata;
3802
3803                 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3804                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3805                 if (devip)
3806                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
3807         }
3808         return SUCCESS;
3809 }
3810
3811 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3812 {
3813         struct sdebug_host_info *sdbg_host;
3814         struct sdebug_dev_info *devip;
3815         struct scsi_device *sdp;
3816         struct Scsi_Host *hp;
3817         int k = 0;
3818
3819         ++num_target_resets;
3820         if (!SCpnt)
3821                 goto lie;
3822         sdp = SCpnt->device;
3823         if (!sdp)
3824                 goto lie;
3825         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3826                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3827         hp = sdp->host;
3828         if (!hp)
3829                 goto lie;
3830         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3831         if (sdbg_host) {
3832                 list_for_each_entry(devip,
3833                                     &sdbg_host->dev_info_list,
3834                                     dev_list)
3835                         if (devip->target == sdp->id) {
3836                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3837                                 ++k;
3838                         }
3839         }
3840         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3841                 sdev_printk(KERN_INFO, sdp,
3842                             "%s: %d device(s) found in target\n", __func__, k);
3843 lie:
3844         return SUCCESS;
3845 }
3846
3847 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3848 {
3849         struct sdebug_host_info *sdbg_host;
3850         struct sdebug_dev_info *devip;
3851         struct scsi_device * sdp;
3852         struct Scsi_Host * hp;
3853         int k = 0;
3854
3855         ++num_bus_resets;
3856         if (!(SCpnt && SCpnt->device))
3857                 goto lie;
3858         sdp = SCpnt->device;
3859         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
3860                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3861         hp = sdp->host;
3862         if (hp) {
3863                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3864                 if (sdbg_host) {
3865                         list_for_each_entry(devip,
3866                                             &sdbg_host->dev_info_list,
3867                                             dev_list) {
3868                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3869                                 ++k;
3870                         }
3871                 }
3872         }
3873         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3874                 sdev_printk(KERN_INFO, sdp,
3875                             "%s: %d device(s) found in host\n", __func__, k);
3876 lie:
3877         return SUCCESS;
3878 }
3879
3880 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3881 {
3882         struct sdebug_host_info * sdbg_host;
3883         struct sdebug_dev_info *devip;
3884         int k = 0;
3885
3886         ++num_host_resets;
3887         if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3888                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3889         spin_lock(&sdebug_host_list_lock);
3890         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3891                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3892                                     dev_list) {
3893                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3894                         ++k;
3895                 }
3896         }
3897         spin_unlock(&sdebug_host_list_lock);
3898         stop_all_queued();
3899         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3900                 sdev_printk(KERN_INFO, SCpnt->device,
3901                             "%s: %d device(s) found\n", __func__, k);
3902         return SUCCESS;
3903 }
3904
3905 static void __init sdebug_build_parts(unsigned char *ramp,
3906                                       unsigned long store_size)
3907 {
3908         struct partition * pp;
3909         int starts[SDEBUG_MAX_PARTS + 2];
3910         int sectors_per_part, num_sectors, k;
3911         int heads_by_sects, start_sec, end_sec;
3912
3913         /* assume partition table already zeroed */
3914         if ((sdebug_num_parts < 1) || (store_size < 1048576))
3915                 return;
3916         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
3917                 sdebug_num_parts = SDEBUG_MAX_PARTS;
3918                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
3919         }
3920         num_sectors = (int)sdebug_store_sectors;
3921         sectors_per_part = (num_sectors - sdebug_sectors_per)
3922                            / sdebug_num_parts;
3923         heads_by_sects = sdebug_heads * sdebug_sectors_per;
3924         starts[0] = sdebug_sectors_per;
3925         for (k = 1; k < sdebug_num_parts; ++k)
3926                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3927                             * heads_by_sects;
3928         starts[sdebug_num_parts] = num_sectors;
3929         starts[sdebug_num_parts + 1] = 0;
3930
3931         ramp[510] = 0x55;       /* magic partition markings */
3932         ramp[511] = 0xAA;
3933         pp = (struct partition *)(ramp + 0x1be);
3934         for (k = 0; starts[k + 1]; ++k, ++pp) {
3935                 start_sec = starts[k];
3936                 end_sec = starts[k + 1] - 1;
3937                 pp->boot_ind = 0;
3938
3939                 pp->cyl = start_sec / heads_by_sects;
3940                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3941                            / sdebug_sectors_per;
3942                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3943
3944                 pp->end_cyl = end_sec / heads_by_sects;
3945                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3946                                / sdebug_sectors_per;
3947                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3948
3949                 pp->start_sect = cpu_to_le32(start_sec);
3950                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3951                 pp->sys_ind = 0x83;     /* plain Linux partition */
3952         }
3953 }
3954
3955 static void block_unblock_all_queues(bool block)
3956 {
3957         int j;
3958         struct sdebug_queue *sqp;
3959
3960         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
3961                 atomic_set(&sqp->blocked, (int)block);
3962 }
3963
3964 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3965  * commands will be processed normally before triggers occur.
3966  */
3967 static void tweak_cmnd_count(void)
3968 {
3969         int count, modulo;
3970
3971         modulo = abs(sdebug_every_nth);
3972         if (modulo < 2)
3973                 return;
3974         block_unblock_all_queues(true);
3975         count = atomic_read(&sdebug_cmnd_count);
3976         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
3977         block_unblock_all_queues(false);
3978 }
3979
3980 static void clear_queue_stats(void)
3981 {
3982         atomic_set(&sdebug_cmnd_count, 0);
3983         atomic_set(&sdebug_completions, 0);
3984         atomic_set(&sdebug_miss_cpus, 0);
3985         atomic_set(&sdebug_a_tsf, 0);
3986 }
3987
3988 static void setup_inject(struct sdebug_queue *sqp,
3989                          struct sdebug_queued_cmd *sqcp)
3990 {
3991         if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
3992                 return;
3993         sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
3994         sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
3995         sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
3996         sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
3997         sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
3998 }
3999
4000 /* Complete the processing of the thread that queued a SCSI command to this
4001  * driver. It either completes the command by calling cmnd_done() or
4002  * schedules a hr timer or work queue then returns 0. Returns
4003  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4004  */
4005 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4006                          int scsi_result, int delta_jiff)
4007 {
4008         unsigned long iflags;
4009         int k, num_in_q, qdepth, inject;
4010         struct sdebug_queue *sqp;
4011         struct sdebug_queued_cmd *sqcp;
4012         struct scsi_device *sdp;
4013         struct sdebug_defer *sd_dp;
4014
4015         if (unlikely(devip == NULL)) {
4016                 if (scsi_result == 0)
4017                         scsi_result = DID_NO_CONNECT << 16;
4018                 goto respond_in_thread;
4019         }
4020         sdp = cmnd->device;
4021
4022         if (unlikely(sdebug_verbose && scsi_result))
4023                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4024                             __func__, scsi_result);
4025         if (delta_jiff == 0)
4026                 goto respond_in_thread;
4027
4028         /* schedule the response at a later time if resources permit */
4029         sqp = get_queue(cmnd);
4030         spin_lock_irqsave(&sqp->qc_lock, iflags);
4031         if (unlikely(atomic_read(&sqp->blocked))) {
4032                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4033                 return SCSI_MLQUEUE_HOST_BUSY;
4034         }
4035         num_in_q = atomic_read(&devip->num_in_q);
4036         qdepth = cmnd->device->queue_depth;
4037         inject = 0;
4038         if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4039                 if (scsi_result) {
4040                         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4041                         goto respond_in_thread;
4042                 } else
4043                         scsi_result = device_qfull_result;
4044         } else if (unlikely(sdebug_every_nth &&
4045                             (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4046                             (scsi_result == 0))) {
4047                 if ((num_in_q == (qdepth - 1)) &&
4048                     (atomic_inc_return(&sdebug_a_tsf) >=
4049                      abs(sdebug_every_nth))) {
4050                         atomic_set(&sdebug_a_tsf, 0);
4051                         inject = 1;
4052                         scsi_result = device_qfull_result;
4053                 }
4054         }
4055
4056         k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4057         if (unlikely(k >= sdebug_max_queue)) {
4058                 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4059                 if (scsi_result)
4060                         goto respond_in_thread;
4061                 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4062                         scsi_result = device_qfull_result;
4063                 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4064                         sdev_printk(KERN_INFO, sdp,
4065                                     "%s: max_queue=%d exceeded, %s\n",
4066                                     __func__, sdebug_max_queue,
4067                                     (scsi_result ?  "status: TASK SET FULL" :
4068                                                     "report: host busy"));
4069                 if (scsi_result)
4070                         goto respond_in_thread;
4071                 else
4072                         return SCSI_MLQUEUE_HOST_BUSY;
4073         }
4074         __set_bit(k, sqp->in_use_bm);
4075         atomic_inc(&devip->num_in_q);
4076         sqcp = &sqp->qc_arr[k];
4077         sqcp->a_cmnd = cmnd;
4078         cmnd->host_scribble = (unsigned char *)sqcp;
4079         cmnd->result = scsi_result;
4080         sd_dp = sqcp->sd_dp;
4081         spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4082         if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4083                 setup_inject(sqp, sqcp);
4084         if (delta_jiff > 0 || sdebug_ndelay > 0) {
4085                 ktime_t kt;
4086
4087                 if (delta_jiff > 0) {
4088                         struct timespec ts;
4089
4090                         jiffies_to_timespec(delta_jiff, &ts);
4091                         kt = ktime_set(ts.tv_sec, ts.tv_nsec);
4092                 } else
4093                         kt = sdebug_ndelay;
4094                 if (NULL == sd_dp) {
4095                         sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4096                         if (NULL == sd_dp)
4097                                 return SCSI_MLQUEUE_HOST_BUSY;
4098                         sqcp->sd_dp = sd_dp;
4099                         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4100                                      HRTIMER_MODE_REL_PINNED);
4101                         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4102                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
4103                         sd_dp->qc_idx = k;
4104                 }
4105                 if (sdebug_statistics)
4106                         sd_dp->issuing_cpu = raw_smp_processor_id();
4107                 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4108         } else {        /* jdelay < 0, use work queue */
4109                 if (NULL == sd_dp) {
4110                         sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
4111                         if (NULL == sd_dp)
4112                                 return SCSI_MLQUEUE_HOST_BUSY;
4113                         sqcp->sd_dp = sd_dp;
4114                         sd_dp->sqa_idx = sqp - sdebug_q_arr;
4115                         sd_dp->qc_idx = k;
4116                         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4117                 }
4118                 if (sdebug_statistics)
4119                         sd_dp->issuing_cpu = raw_smp_processor_id();
4120                 schedule_work(&sd_dp->ew.work);
4121         }
4122         if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4123                      (scsi_result == device_qfull_result)))
4124                 sdev_printk(KERN_INFO, sdp,
4125                             "%s: num_in_q=%d +1, %s%s\n", __func__,
4126                             num_in_q, (inject ? "<inject> " : ""),
4127                             "status: TASK SET FULL");
4128         return 0;
4129
4130 respond_in_thread:      /* call back to mid-layer using invocation thread */
4131         cmnd->result = scsi_result;
4132         cmnd->scsi_done(cmnd);
4133         return 0;
4134 }
4135
4136 /* Note: The following macros create attribute files in the
4137    /sys/module/scsi_debug/parameters directory. Unfortunately this
4138    driver is unaware of a change and cannot trigger auxiliary actions
4139    as it can when the corresponding attribute in the
4140    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4141  */
4142 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4143 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4144 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4145 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4146 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4147 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4148 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4149 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4150 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4151 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4152 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4153 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4154 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4155 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4156 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4157 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4158 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4159 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4160 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4161 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4162 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4163 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4164 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4165 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4166 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4167 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4168 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4169 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4170 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4171 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4172 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4173 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4174 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4175 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4176 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4177 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4178 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4179 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4180 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4181 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4182 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4183 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4184                    S_IRUGO | S_IWUSR);
4185 module_param_named(write_same_length, sdebug_write_same_length, int,
4186                    S_IRUGO | S_IWUSR);
4187
4188 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4189 MODULE_DESCRIPTION("SCSI debug adapter driver");
4190 MODULE_LICENSE("GPL");
4191 MODULE_VERSION(SDEBUG_VERSION);
4192
4193 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4194 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4195 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4196 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4197 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4198 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4199 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4200 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4201 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4202 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4203 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4204 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4205 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4206 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4207 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4208 MODULE_PARM_DESC(lbprz,
4209         "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4210 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4211 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4212 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4213 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4214 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4215 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4216 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4217 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4218 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4219 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4220 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4221 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4222 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4223 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4224 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4225 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4226 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4227 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4228 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4229 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4230 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4231 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4232 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4233 MODULE_PARM_DESC(uuid_ctl,
4234                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4235 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4236 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4237 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4238
4239 #define SDEBUG_INFO_LEN 256
4240 static char sdebug_info[SDEBUG_INFO_LEN];
4241
4242 static const char * scsi_debug_info(struct Scsi_Host * shp)
4243 {
4244         int k;
4245
4246         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4247                       my_name, SDEBUG_VERSION, sdebug_version_date);
4248         if (k >= (SDEBUG_INFO_LEN - 1))
4249                 return sdebug_info;
4250         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4251                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4252                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
4253                   "statistics", (int)sdebug_statistics);
4254         return sdebug_info;
4255 }
4256
4257 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4258 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4259                                  int length)
4260 {
4261         char arr[16];
4262         int opts;
4263         int minLen = length > 15 ? 15 : length;
4264
4265         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4266                 return -EACCES;
4267         memcpy(arr, buffer, minLen);
4268         arr[minLen] = '\0';
4269         if (1 != sscanf(arr, "%d", &opts))
4270                 return -EINVAL;
4271         sdebug_opts = opts;
4272         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4273         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4274         if (sdebug_every_nth != 0)
4275                 tweak_cmnd_count();
4276         return length;
4277 }
4278
4279 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4280  * same for each scsi_debug host (if more than one). Some of the counters
4281  * output are not atomics so might be inaccurate in a busy system. */
4282 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4283 {
4284         int f, j, l;
4285         struct sdebug_queue *sqp;
4286
4287         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4288                    SDEBUG_VERSION, sdebug_version_date);
4289         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4290                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4291                    sdebug_opts, sdebug_every_nth);
4292         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4293                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4294                    sdebug_sector_size, "bytes");
4295         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4296                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4297                    num_aborts);
4298         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4299                    num_dev_resets, num_target_resets, num_bus_resets,
4300                    num_host_resets);
4301         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4302                    dix_reads, dix_writes, dif_errors);
4303         seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4304                    TICK_NSEC / 1000, "statistics", sdebug_statistics,
4305                    sdebug_mq_active);
4306         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4307                    atomic_read(&sdebug_cmnd_count),
4308                    atomic_read(&sdebug_completions),
4309                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
4310                    atomic_read(&sdebug_a_tsf));
4311
4312         seq_printf(m, "submit_queues=%d\n", submit_queues);
4313         for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4314                 seq_printf(m, "  queue %d:\n", j);
4315                 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4316                 if (f != sdebug_max_queue) {
4317                         l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4318                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
4319                                    "first,last bits", f, l);
4320                 }
4321         }
4322         return 0;
4323 }
4324
4325 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4326 {
4327         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4328 }
4329 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4330  * of delay is jiffies.
4331  */
4332 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4333                            size_t count)
4334 {
4335         int jdelay, res;
4336
4337         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4338                 res = count;
4339                 if (sdebug_jdelay != jdelay) {
4340                         int j, k;
4341                         struct sdebug_queue *sqp;
4342
4343                         block_unblock_all_queues(true);
4344                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4345                              ++j, ++sqp) {
4346                                 k = find_first_bit(sqp->in_use_bm,
4347                                                    sdebug_max_queue);
4348                                 if (k != sdebug_max_queue) {
4349                                         res = -EBUSY;   /* queued commands */
4350                                         break;
4351                                 }
4352                         }
4353                         if (res > 0) {
4354                                 /* make sure sdebug_defer instances get
4355                                  * re-allocated for new delay variant */
4356                                 free_all_queued();
4357                                 sdebug_jdelay = jdelay;
4358                                 sdebug_ndelay = 0;
4359                         }
4360                         block_unblock_all_queues(false);
4361                 }
4362                 return res;
4363         }
4364         return -EINVAL;
4365 }
4366 static DRIVER_ATTR_RW(delay);
4367
4368 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4369 {
4370         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4371 }
4372 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4373 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4374 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4375                             size_t count)
4376 {
4377         int ndelay, res;
4378
4379         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4380             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4381                 res = count;
4382                 if (sdebug_ndelay != ndelay) {
4383                         int j, k;
4384                         struct sdebug_queue *sqp;
4385
4386                         block_unblock_all_queues(true);
4387                         for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4388                              ++j, ++sqp) {
4389                                 k = find_first_bit(sqp->in_use_bm,
4390                                                    sdebug_max_queue);
4391                                 if (k != sdebug_max_queue) {
4392                                         res = -EBUSY;   /* queued commands */
4393                                         break;
4394                                 }
4395                         }
4396                         if (res > 0) {
4397                                 /* make sure sdebug_defer instances get
4398                                  * re-allocated for new delay variant */
4399                                 free_all_queued();
4400                                 sdebug_ndelay = ndelay;
4401                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
4402                                                         : DEF_JDELAY;
4403                         }
4404                         block_unblock_all_queues(false);
4405                 }
4406                 return res;
4407         }
4408         return -EINVAL;
4409 }
4410 static DRIVER_ATTR_RW(ndelay);
4411
4412 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4413 {
4414         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4415 }
4416
4417 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4418                           size_t count)
4419 {
4420         int opts;
4421         char work[20];
4422
4423         if (1 == sscanf(buf, "%10s", work)) {
4424                 if (0 == strncasecmp(work,"0x", 2)) {
4425                         if (1 == sscanf(&work[2], "%x", &opts))
4426                                 goto opts_done;
4427                 } else {
4428                         if (1 == sscanf(work, "%d", &opts))
4429                                 goto opts_done;
4430                 }
4431         }
4432         return -EINVAL;
4433 opts_done:
4434         sdebug_opts = opts;
4435         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4436         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4437         tweak_cmnd_count();
4438         return count;
4439 }
4440 static DRIVER_ATTR_RW(opts);
4441
4442 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4443 {
4444         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4445 }
4446 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4447                            size_t count)
4448 {
4449         int n;
4450
4451         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4452                 sdebug_ptype = n;
4453                 return count;
4454         }
4455         return -EINVAL;
4456 }
4457 static DRIVER_ATTR_RW(ptype);
4458
4459 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4460 {
4461         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4462 }
4463 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4464                             size_t count)
4465 {
4466         int n;
4467
4468         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4469                 sdebug_dsense = n;
4470                 return count;
4471         }
4472         return -EINVAL;
4473 }
4474 static DRIVER_ATTR_RW(dsense);
4475
4476 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4477 {
4478         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4479 }
4480 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4481                              size_t count)
4482 {
4483         int n;
4484
4485         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4486                 n = (n > 0);
4487                 sdebug_fake_rw = (sdebug_fake_rw > 0);
4488                 if (sdebug_fake_rw != n) {
4489                         if ((0 == n) && (NULL == fake_storep)) {
4490                                 unsigned long sz =
4491                                         (unsigned long)sdebug_dev_size_mb *
4492                                         1048576;
4493
4494                                 fake_storep = vmalloc(sz);
4495                                 if (NULL == fake_storep) {
4496                                         pr_err("out of memory, 9\n");
4497                                         return -ENOMEM;
4498                                 }
4499                                 memset(fake_storep, 0, sz);
4500                         }
4501                         sdebug_fake_rw = n;
4502                 }
4503                 return count;
4504         }
4505         return -EINVAL;
4506 }
4507 static DRIVER_ATTR_RW(fake_rw);
4508
4509 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4510 {
4511         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4512 }
4513 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4514                               size_t count)
4515 {
4516         int n;
4517
4518         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4519                 sdebug_no_lun_0 = n;
4520                 return count;
4521         }
4522         return -EINVAL;
4523 }
4524 static DRIVER_ATTR_RW(no_lun_0);
4525
4526 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4527 {
4528         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4529 }
4530 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4531                               size_t count)
4532 {
4533         int n;
4534
4535         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4536                 sdebug_num_tgts = n;
4537                 sdebug_max_tgts_luns();
4538                 return count;
4539         }
4540         return -EINVAL;
4541 }
4542 static DRIVER_ATTR_RW(num_tgts);
4543
4544 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4545 {
4546         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4547 }
4548 static DRIVER_ATTR_RO(dev_size_mb);
4549
4550 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4551 {
4552         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4553 }
4554 static DRIVER_ATTR_RO(num_parts);
4555
4556 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4557 {
4558         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4559 }
4560 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4561                                size_t count)
4562 {
4563         int nth;
4564
4565         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4566                 sdebug_every_nth = nth;
4567                 if (nth && !sdebug_statistics) {
4568                         pr_info("every_nth needs statistics=1, set it\n");
4569                         sdebug_statistics = true;
4570                 }
4571                 tweak_cmnd_count();
4572                 return count;
4573         }
4574         return -EINVAL;
4575 }
4576 static DRIVER_ATTR_RW(every_nth);
4577
4578 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4579 {
4580         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4581 }
4582 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4583                               size_t count)
4584 {
4585         int n;
4586         bool changed;
4587
4588         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4589                 if (n > 256) {
4590                         pr_warn("max_luns can be no more than 256\n");
4591                         return -EINVAL;
4592                 }
4593                 changed = (sdebug_max_luns != n);
4594                 sdebug_max_luns = n;
4595                 sdebug_max_tgts_luns();
4596                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
4597                         struct sdebug_host_info *sdhp;
4598                         struct sdebug_dev_info *dp;
4599
4600                         spin_lock(&sdebug_host_list_lock);
4601                         list_for_each_entry(sdhp, &sdebug_host_list,
4602                                             host_list) {
4603                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4604                                                     dev_list) {
4605                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
4606                                                 dp->uas_bm);
4607                                 }
4608                         }
4609                         spin_unlock(&sdebug_host_list_lock);
4610                 }
4611                 return count;
4612         }
4613         return -EINVAL;
4614 }
4615 static DRIVER_ATTR_RW(max_luns);
4616
4617 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4618 {
4619         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4620 }
4621 /* N.B. max_queue can be changed while there are queued commands. In flight
4622  * commands beyond the new max_queue will be completed. */
4623 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4624                                size_t count)
4625 {
4626         int j, n, k, a;
4627         struct sdebug_queue *sqp;
4628
4629         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4630             (n <= SDEBUG_CANQUEUE)) {
4631                 block_unblock_all_queues(true);
4632                 k = 0;
4633                 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4634                      ++j, ++sqp) {
4635                         a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4636                         if (a > k)
4637                                 k = a;
4638                 }
4639                 sdebug_max_queue = n;
4640                 if (k == SDEBUG_CANQUEUE)
4641                         atomic_set(&retired_max_queue, 0);
4642                 else if (k >= n)
4643                         atomic_set(&retired_max_queue, k + 1);
4644                 else
4645                         atomic_set(&retired_max_queue, 0);
4646                 block_unblock_all_queues(false);
4647                 return count;
4648         }
4649         return -EINVAL;
4650 }
4651 static DRIVER_ATTR_RW(max_queue);
4652
4653 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4654 {
4655         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4656 }
4657 static DRIVER_ATTR_RO(no_uld);
4658
4659 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4660 {
4661         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4662 }
4663 static DRIVER_ATTR_RO(scsi_level);
4664
4665 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4666 {
4667         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4668 }
4669 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4670                                 size_t count)
4671 {
4672         int n;
4673         bool changed;
4674
4675         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4676                 changed = (sdebug_virtual_gb != n);
4677                 sdebug_virtual_gb = n;
4678                 sdebug_capacity = get_sdebug_capacity();
4679                 if (changed) {
4680                         struct sdebug_host_info *sdhp;
4681                         struct sdebug_dev_info *dp;
4682
4683                         spin_lock(&sdebug_host_list_lock);
4684                         list_for_each_entry(sdhp, &sdebug_host_list,
4685                                             host_list) {
4686                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4687                                                     dev_list) {
4688                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4689                                                 dp->uas_bm);
4690                                 }
4691                         }
4692                         spin_unlock(&sdebug_host_list_lock);
4693                 }
4694                 return count;
4695         }
4696         return -EINVAL;
4697 }
4698 static DRIVER_ATTR_RW(virtual_gb);
4699
4700 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4701 {
4702         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4703 }
4704
4705 static int sdebug_add_adapter(void);
4706 static void sdebug_remove_adapter(void);
4707
4708 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4709                               size_t count)
4710 {
4711         int delta_hosts;
4712
4713         if (sscanf(buf, "%d", &delta_hosts) != 1)
4714                 return -EINVAL;
4715         if (delta_hosts > 0) {
4716                 do {
4717                         sdebug_add_adapter();
4718                 } while (--delta_hosts);
4719         } else if (delta_hosts < 0) {
4720                 do {
4721                         sdebug_remove_adapter();
4722                 } while (++delta_hosts);
4723         }
4724         return count;
4725 }
4726 static DRIVER_ATTR_RW(add_host);
4727
4728 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4729 {
4730         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
4731 }
4732 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4733                                     size_t count)
4734 {
4735         int n;
4736
4737         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4738                 sdebug_vpd_use_hostno = n;
4739                 return count;
4740         }
4741         return -EINVAL;
4742 }
4743 static DRIVER_ATTR_RW(vpd_use_hostno);
4744
4745 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
4746 {
4747         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
4748 }
4749 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
4750                                 size_t count)
4751 {
4752         int n;
4753
4754         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
4755                 if (n > 0)
4756                         sdebug_statistics = true;
4757                 else {
4758                         clear_queue_stats();
4759                         sdebug_statistics = false;
4760                 }
4761                 return count;
4762         }
4763         return -EINVAL;
4764 }
4765 static DRIVER_ATTR_RW(statistics);
4766
4767 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4768 {
4769         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
4770 }
4771 static DRIVER_ATTR_RO(sector_size);
4772
4773 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
4774 {
4775         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
4776 }
4777 static DRIVER_ATTR_RO(submit_queues);
4778
4779 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4780 {
4781         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
4782 }
4783 static DRIVER_ATTR_RO(dix);
4784
4785 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4786 {
4787         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
4788 }
4789 static DRIVER_ATTR_RO(dif);
4790
4791 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4792 {
4793         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
4794 }
4795 static DRIVER_ATTR_RO(guard);
4796
4797 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4798 {
4799         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
4800 }
4801 static DRIVER_ATTR_RO(ato);
4802
4803 static ssize_t map_show(struct device_driver *ddp, char *buf)
4804 {
4805         ssize_t count;
4806
4807         if (!scsi_debug_lbp())
4808                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4809                                  sdebug_store_sectors);
4810
4811         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4812                           (int)map_size, map_storep);
4813         buf[count++] = '\n';
4814         buf[count] = '\0';
4815
4816         return count;
4817 }
4818 static DRIVER_ATTR_RO(map);
4819
4820 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4821 {
4822         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
4823 }
4824 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4825                                size_t count)
4826 {
4827         int n;
4828
4829         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4830                 sdebug_removable = (n > 0);
4831                 return count;
4832         }
4833         return -EINVAL;
4834 }
4835 static DRIVER_ATTR_RW(removable);
4836
4837 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4838 {
4839         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
4840 }
4841 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4842 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4843                                size_t count)
4844 {
4845         int n;
4846
4847         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4848                 sdebug_host_lock = (n > 0);
4849                 return count;
4850         }
4851         return -EINVAL;
4852 }
4853 static DRIVER_ATTR_RW(host_lock);
4854
4855 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4856 {
4857         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
4858 }
4859 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4860                             size_t count)
4861 {
4862         int n;
4863
4864         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4865                 sdebug_strict = (n > 0);
4866                 return count;
4867         }
4868         return -EINVAL;
4869 }
4870 static DRIVER_ATTR_RW(strict);
4871
4872 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4873 {
4874         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
4875 }
4876 static DRIVER_ATTR_RO(uuid_ctl);
4877
4878
4879 /* Note: The following array creates attribute files in the
4880    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4881    files (over those found in the /sys/module/scsi_debug/parameters
4882    directory) is that auxiliary actions can be triggered when an attribute
4883    is changed. For example see: sdebug_add_host_store() above.
4884  */
4885
4886 static struct attribute *sdebug_drv_attrs[] = {
4887         &driver_attr_delay.attr,
4888         &driver_attr_opts.attr,
4889         &driver_attr_ptype.attr,
4890         &driver_attr_dsense.attr,
4891         &driver_attr_fake_rw.attr,
4892         &driver_attr_no_lun_0.attr,
4893         &driver_attr_num_tgts.attr,
4894         &driver_attr_dev_size_mb.attr,
4895         &driver_attr_num_parts.attr,
4896         &driver_attr_every_nth.attr,
4897         &driver_attr_max_luns.attr,
4898         &driver_attr_max_queue.attr,
4899         &driver_attr_no_uld.attr,
4900         &driver_attr_scsi_level.attr,
4901         &driver_attr_virtual_gb.attr,
4902         &driver_attr_add_host.attr,
4903         &driver_attr_vpd_use_hostno.attr,
4904         &driver_attr_sector_size.attr,
4905         &driver_attr_statistics.attr,
4906         &driver_attr_submit_queues.attr,
4907         &driver_attr_dix.attr,
4908         &driver_attr_dif.attr,
4909         &driver_attr_guard.attr,
4910         &driver_attr_ato.attr,
4911         &driver_attr_map.attr,
4912         &driver_attr_removable.attr,
4913         &driver_attr_host_lock.attr,
4914         &driver_attr_ndelay.attr,
4915         &driver_attr_strict.attr,
4916         &driver_attr_uuid_ctl.attr,
4917         NULL,
4918 };
4919 ATTRIBUTE_GROUPS(sdebug_drv);
4920
4921 static struct device *pseudo_primary;
4922
4923 static int __init scsi_debug_init(void)
4924 {
4925         unsigned long sz;
4926         int host_to_add;
4927         int k;
4928         int ret;
4929
4930         atomic_set(&retired_max_queue, 0);
4931
4932         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
4933                 pr_warn("ndelay must be less than 1 second, ignored\n");
4934                 sdebug_ndelay = 0;
4935         } else if (sdebug_ndelay > 0)
4936                 sdebug_jdelay = JDELAY_OVERRIDDEN;
4937
4938         switch (sdebug_sector_size) {
4939         case  512:
4940         case 1024:
4941         case 2048:
4942         case 4096:
4943                 break;
4944         default:
4945                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
4946                 return -EINVAL;
4947         }
4948
4949         switch (sdebug_dif) {
4950         case T10_PI_TYPE0_PROTECTION:
4951                 break;
4952         case T10_PI_TYPE1_PROTECTION:
4953         case T10_PI_TYPE2_PROTECTION:
4954         case T10_PI_TYPE3_PROTECTION:
4955                 have_dif_prot = true;
4956                 break;
4957
4958         default:
4959                 pr_err("dif must be 0, 1, 2 or 3\n");
4960                 return -EINVAL;
4961         }
4962
4963         if (sdebug_guard > 1) {
4964                 pr_err("guard must be 0 or 1\n");
4965                 return -EINVAL;
4966         }
4967
4968         if (sdebug_ato > 1) {
4969                 pr_err("ato must be 0 or 1\n");
4970                 return -EINVAL;
4971         }
4972
4973         if (sdebug_physblk_exp > 15) {
4974                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
4975                 return -EINVAL;
4976         }
4977         if (sdebug_max_luns > 256) {
4978                 pr_warn("max_luns can be no more than 256, use default\n");
4979                 sdebug_max_luns = DEF_MAX_LUNS;
4980         }
4981
4982         if (sdebug_lowest_aligned > 0x3fff) {
4983                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
4984                 return -EINVAL;
4985         }
4986
4987         if (submit_queues < 1) {
4988                 pr_err("submit_queues must be 1 or more\n");
4989                 return -EINVAL;
4990         }
4991         sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
4992                                GFP_KERNEL);
4993         if (sdebug_q_arr == NULL)
4994                 return -ENOMEM;
4995         for (k = 0; k < submit_queues; ++k)
4996                 spin_lock_init(&sdebug_q_arr[k].qc_lock);
4997
4998         if (sdebug_dev_size_mb < 1)
4999                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
5000         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5001         sdebug_store_sectors = sz / sdebug_sector_size;
5002         sdebug_capacity = get_sdebug_capacity();
5003
5004         /* play around with geometry, don't waste too much on track 0 */
5005         sdebug_heads = 8;
5006         sdebug_sectors_per = 32;
5007         if (sdebug_dev_size_mb >= 256)
5008                 sdebug_heads = 64;
5009         else if (sdebug_dev_size_mb >= 16)
5010                 sdebug_heads = 32;
5011         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5012                                (sdebug_sectors_per * sdebug_heads);
5013         if (sdebug_cylinders_per >= 1024) {
5014                 /* other LLDs do this; implies >= 1GB ram disk ... */
5015                 sdebug_heads = 255;
5016                 sdebug_sectors_per = 63;
5017                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5018                                (sdebug_sectors_per * sdebug_heads);
5019         }
5020
5021         if (sdebug_fake_rw == 0) {
5022                 fake_storep = vmalloc(sz);
5023                 if (NULL == fake_storep) {
5024                         pr_err("out of memory, 1\n");
5025                         ret = -ENOMEM;
5026                         goto free_q_arr;
5027                 }
5028                 memset(fake_storep, 0, sz);
5029                 if (sdebug_num_parts > 0)
5030                         sdebug_build_parts(fake_storep, sz);
5031         }
5032
5033         if (sdebug_dix) {
5034                 int dif_size;
5035
5036                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5037                 dif_storep = vmalloc(dif_size);
5038
5039                 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5040
5041                 if (dif_storep == NULL) {
5042                         pr_err("out of mem. (DIX)\n");
5043                         ret = -ENOMEM;
5044                         goto free_vm;
5045                 }
5046
5047                 memset(dif_storep, 0xff, dif_size);
5048         }
5049
5050         /* Logical Block Provisioning */
5051         if (scsi_debug_lbp()) {
5052                 sdebug_unmap_max_blocks =
5053                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5054
5055                 sdebug_unmap_max_desc =
5056                         clamp(sdebug_unmap_max_desc, 0U, 256U);
5057
5058                 sdebug_unmap_granularity =
5059                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5060
5061                 if (sdebug_unmap_alignment &&
5062                     sdebug_unmap_granularity <=
5063                     sdebug_unmap_alignment) {
5064                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5065                         ret = -EINVAL;
5066                         goto free_vm;
5067                 }
5068
5069                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5070                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5071
5072                 pr_info("%lu provisioning blocks\n", map_size);
5073
5074                 if (map_storep == NULL) {
5075                         pr_err("out of mem. (MAP)\n");
5076                         ret = -ENOMEM;
5077                         goto free_vm;
5078                 }
5079
5080                 bitmap_zero(map_storep, map_size);
5081
5082                 /* Map first 1KB for partition table */
5083                 if (sdebug_num_parts)
5084                         map_region(0, 2);
5085         }
5086
5087         pseudo_primary = root_device_register("pseudo_0");
5088         if (IS_ERR(pseudo_primary)) {
5089                 pr_warn("root_device_register() error\n");
5090                 ret = PTR_ERR(pseudo_primary);
5091                 goto free_vm;
5092         }
5093         ret = bus_register(&pseudo_lld_bus);
5094         if (ret < 0) {
5095                 pr_warn("bus_register error: %d\n", ret);
5096                 goto dev_unreg;
5097         }
5098         ret = driver_register(&sdebug_driverfs_driver);
5099         if (ret < 0) {
5100                 pr_warn("driver_register error: %d\n", ret);
5101                 goto bus_unreg;
5102         }
5103
5104         host_to_add = sdebug_add_host;
5105         sdebug_add_host = 0;
5106
5107         for (k = 0; k < host_to_add; k++) {
5108                 if (sdebug_add_adapter()) {
5109                         pr_err("sdebug_add_adapter failed k=%d\n", k);
5110                         break;
5111                 }
5112         }
5113
5114         if (sdebug_verbose)
5115                 pr_info("built %d host(s)\n", sdebug_add_host);
5116
5117         return 0;
5118
5119 bus_unreg:
5120         bus_unregister(&pseudo_lld_bus);
5121 dev_unreg:
5122         root_device_unregister(pseudo_primary);
5123 free_vm:
5124         vfree(map_storep);
5125         vfree(dif_storep);
5126         vfree(fake_storep);
5127 free_q_arr:
5128         kfree(sdebug_q_arr);
5129         return ret;
5130 }
5131
5132 static void __exit scsi_debug_exit(void)
5133 {
5134         int k = sdebug_add_host;
5135
5136         stop_all_queued();
5137         free_all_queued();
5138         for (; k; k--)
5139                 sdebug_remove_adapter();
5140         driver_unregister(&sdebug_driverfs_driver);
5141         bus_unregister(&pseudo_lld_bus);
5142         root_device_unregister(pseudo_primary);
5143
5144         vfree(map_storep);
5145         vfree(dif_storep);
5146         vfree(fake_storep);
5147         kfree(sdebug_q_arr);
5148 }
5149
5150 device_initcall(scsi_debug_init);
5151 module_exit(scsi_debug_exit);
5152
5153 static void sdebug_release_adapter(struct device * dev)
5154 {
5155         struct sdebug_host_info *sdbg_host;
5156
5157         sdbg_host = to_sdebug_host(dev);
5158         kfree(sdbg_host);
5159 }
5160
5161 static int sdebug_add_adapter(void)
5162 {
5163         int k, devs_per_host;
5164         int error = 0;
5165         struct sdebug_host_info *sdbg_host;
5166         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5167
5168         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5169         if (NULL == sdbg_host) {
5170                 pr_err("out of memory at line %d\n", __LINE__);
5171                 return -ENOMEM;
5172         }
5173
5174         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5175
5176         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5177         for (k = 0; k < devs_per_host; k++) {
5178                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5179                 if (!sdbg_devinfo) {
5180                         pr_err("out of memory at line %d\n", __LINE__);
5181                         error = -ENOMEM;
5182                         goto clean;
5183                 }
5184         }
5185
5186         spin_lock(&sdebug_host_list_lock);
5187         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5188         spin_unlock(&sdebug_host_list_lock);
5189
5190         sdbg_host->dev.bus = &pseudo_lld_bus;
5191         sdbg_host->dev.parent = pseudo_primary;
5192         sdbg_host->dev.release = &sdebug_release_adapter;
5193         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5194
5195         error = device_register(&sdbg_host->dev);
5196
5197         if (error)
5198                 goto clean;
5199
5200         ++sdebug_add_host;
5201         return error;
5202
5203 clean:
5204         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5205                                  dev_list) {
5206                 list_del(&sdbg_devinfo->dev_list);
5207                 kfree(sdbg_devinfo);
5208         }
5209
5210         kfree(sdbg_host);
5211         return error;
5212 }
5213
5214 static void sdebug_remove_adapter(void)
5215 {
5216         struct sdebug_host_info * sdbg_host = NULL;
5217
5218         spin_lock(&sdebug_host_list_lock);
5219         if (!list_empty(&sdebug_host_list)) {
5220                 sdbg_host = list_entry(sdebug_host_list.prev,
5221                                        struct sdebug_host_info, host_list);
5222                 list_del(&sdbg_host->host_list);
5223         }
5224         spin_unlock(&sdebug_host_list_lock);
5225
5226         if (!sdbg_host)
5227                 return;
5228
5229         device_unregister(&sdbg_host->dev);
5230         --sdebug_add_host;
5231 }
5232
5233 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5234 {
5235         int num_in_q = 0;
5236         struct sdebug_dev_info *devip;
5237
5238         block_unblock_all_queues(true);
5239         devip = (struct sdebug_dev_info *)sdev->hostdata;
5240         if (NULL == devip) {
5241                 block_unblock_all_queues(false);
5242                 return  -ENODEV;
5243         }
5244         num_in_q = atomic_read(&devip->num_in_q);
5245
5246         if (qdepth < 1)
5247                 qdepth = 1;
5248         /* allow to exceed max host qc_arr elements for testing */
5249         if (qdepth > SDEBUG_CANQUEUE + 10)
5250                 qdepth = SDEBUG_CANQUEUE + 10;
5251         scsi_change_queue_depth(sdev, qdepth);
5252
5253         if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5254                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5255                             __func__, qdepth, num_in_q);
5256         }
5257         block_unblock_all_queues(false);
5258         return sdev->queue_depth;
5259 }
5260
5261 static bool fake_timeout(struct scsi_cmnd *scp)
5262 {
5263         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5264                 if (sdebug_every_nth < -1)
5265                         sdebug_every_nth = -1;
5266                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5267                         return true; /* ignore command causing timeout */
5268                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5269                          scsi_medium_access_command(scp))
5270                         return true; /* time out reads and writes */
5271         }
5272         return false;
5273 }
5274
5275 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5276                                    struct scsi_cmnd *scp)
5277 {
5278         u8 sdeb_i;
5279         struct scsi_device *sdp = scp->device;
5280         const struct opcode_info_t *oip;
5281         const struct opcode_info_t *r_oip;
5282         struct sdebug_dev_info *devip;
5283         u8 *cmd = scp->cmnd;
5284         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5285         int k, na;
5286         int errsts = 0;
5287         u32 flags;
5288         u16 sa;
5289         u8 opcode = cmd[0];
5290         bool has_wlun_rl;
5291
5292         scsi_set_resid(scp, 0);
5293         if (sdebug_statistics)
5294                 atomic_inc(&sdebug_cmnd_count);
5295         if (unlikely(sdebug_verbose &&
5296                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5297                 char b[120];
5298                 int n, len, sb;
5299
5300                 len = scp->cmd_len;
5301                 sb = (int)sizeof(b);
5302                 if (len > 32)
5303                         strcpy(b, "too long, over 32 bytes");
5304                 else {
5305                         for (k = 0, n = 0; k < len && n < sb; ++k)
5306                                 n += scnprintf(b + n, sb - n, "%02x ",
5307                                                (u32)cmd[k]);
5308                 }
5309                 if (sdebug_mq_active)
5310                         sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5311                                     my_name, blk_mq_unique_tag(scp->request),
5312                                     b);
5313                 else
5314                         sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5315                                     b);
5316         }
5317         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5318         if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5319                 goto err_out;
5320
5321         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5322         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5323         devip = (struct sdebug_dev_info *)sdp->hostdata;
5324         if (unlikely(!devip)) {
5325                 devip = find_build_dev_info(sdp);
5326                 if (NULL == devip)
5327                         goto err_out;
5328         }
5329         na = oip->num_attached;
5330         r_pfp = oip->pfp;
5331         if (na) {       /* multiple commands with this opcode */
5332                 r_oip = oip;
5333                 if (FF_SA & r_oip->flags) {
5334                         if (F_SA_LOW & oip->flags)
5335                                 sa = 0x1f & cmd[1];
5336                         else
5337                                 sa = get_unaligned_be16(cmd + 8);
5338                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5339                                 if (opcode == oip->opcode && sa == oip->sa)
5340                                         break;
5341                         }
5342                 } else {   /* since no service action only check opcode */
5343                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5344                                 if (opcode == oip->opcode)
5345                                         break;
5346                         }
5347                 }
5348                 if (k > na) {
5349                         if (F_SA_LOW & r_oip->flags)
5350                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5351                         else if (F_SA_HIGH & r_oip->flags)
5352                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5353                         else
5354                                 mk_sense_invalid_opcode(scp);
5355                         goto check_cond;
5356                 }
5357         }       /* else (when na==0) we assume the oip is a match */
5358         flags = oip->flags;
5359         if (unlikely(F_INV_OP & flags)) {
5360                 mk_sense_invalid_opcode(scp);
5361                 goto check_cond;
5362         }
5363         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5364                 if (sdebug_verbose)
5365                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5366                                     my_name, opcode, " supported for wlun");
5367                 mk_sense_invalid_opcode(scp);
5368                 goto check_cond;
5369         }
5370         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
5371                 u8 rem;
5372                 int j;
5373
5374                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5375                         rem = ~oip->len_mask[k] & cmd[k];
5376                         if (rem) {
5377                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5378                                         if (0x80 & rem)
5379                                                 break;
5380                                 }
5381                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5382                                 goto check_cond;
5383                         }
5384                 }
5385         }
5386         if (unlikely(!(F_SKIP_UA & flags) &&
5387                      find_first_bit(devip->uas_bm,
5388                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5389                 errsts = make_ua(scp, devip);
5390                 if (errsts)
5391                         goto check_cond;
5392         }
5393         if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5394                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5395                 if (sdebug_verbose)
5396                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5397                                     "%s\n", my_name, "initializing command "
5398                                     "required");
5399                 errsts = check_condition_result;
5400                 goto fini;
5401         }
5402         if (sdebug_fake_rw && (F_FAKE_RW & flags))
5403                 goto fini;
5404         if (unlikely(sdebug_every_nth)) {
5405                 if (fake_timeout(scp))
5406                         return 0;       /* ignore command: make trouble */
5407         }
5408         if (likely(oip->pfp))
5409                 errsts = oip->pfp(scp, devip);  /* calls a resp_* function */
5410         else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5411                 errsts = r_pfp(scp, devip);
5412
5413 fini:
5414         return schedule_resp(scp, devip, errsts,
5415                              ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
5416 check_cond:
5417         return schedule_resp(scp, devip, check_condition_result, 0);
5418 err_out:
5419         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
5420 }
5421
5422 static struct scsi_host_template sdebug_driver_template = {
5423         .show_info =            scsi_debug_show_info,
5424         .write_info =           scsi_debug_write_info,
5425         .proc_name =            sdebug_proc_name,
5426         .name =                 "SCSI DEBUG",
5427         .info =                 scsi_debug_info,
5428         .slave_alloc =          scsi_debug_slave_alloc,
5429         .slave_configure =      scsi_debug_slave_configure,
5430         .slave_destroy =        scsi_debug_slave_destroy,
5431         .ioctl =                scsi_debug_ioctl,
5432         .queuecommand =         scsi_debug_queuecommand,
5433         .change_queue_depth =   sdebug_change_qdepth,
5434         .eh_abort_handler =     scsi_debug_abort,
5435         .eh_device_reset_handler = scsi_debug_device_reset,
5436         .eh_target_reset_handler = scsi_debug_target_reset,
5437         .eh_bus_reset_handler = scsi_debug_bus_reset,
5438         .eh_host_reset_handler = scsi_debug_host_reset,
5439         .can_queue =            SDEBUG_CANQUEUE,
5440         .this_id =              7,
5441         .sg_tablesize =         SG_MAX_SEGMENTS,
5442         .cmd_per_lun =          DEF_CMD_PER_LUN,
5443         .max_sectors =          -1U,
5444         .use_clustering =       DISABLE_CLUSTERING,
5445         .module =               THIS_MODULE,
5446         .track_queue_depth =    1,
5447 };
5448
5449 static int sdebug_driver_probe(struct device * dev)
5450 {
5451         int error = 0;
5452         struct sdebug_host_info *sdbg_host;
5453         struct Scsi_Host *hpnt;
5454         int hprot;
5455
5456         sdbg_host = to_sdebug_host(dev);
5457
5458         sdebug_driver_template.can_queue = sdebug_max_queue;
5459         if (sdebug_clustering)
5460                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5461         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5462         if (NULL == hpnt) {
5463                 pr_err("scsi_host_alloc failed\n");
5464                 error = -ENODEV;
5465                 return error;
5466         }
5467         if (submit_queues > nr_cpu_ids) {
5468                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5469                         my_name, submit_queues, nr_cpu_ids);
5470                 submit_queues = nr_cpu_ids;
5471         }
5472         /* Decide whether to tell scsi subsystem that we want mq */
5473         /* Following should give the same answer for each host */
5474         sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5475         if (sdebug_mq_active)
5476                 hpnt->nr_hw_queues = submit_queues;
5477
5478         sdbg_host->shost = hpnt;
5479         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5480         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5481                 hpnt->max_id = sdebug_num_tgts + 1;
5482         else
5483                 hpnt->max_id = sdebug_num_tgts;
5484         /* = sdebug_max_luns; */
5485         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5486
5487         hprot = 0;
5488
5489         switch (sdebug_dif) {
5490
5491         case T10_PI_TYPE1_PROTECTION:
5492                 hprot = SHOST_DIF_TYPE1_PROTECTION;
5493                 if (sdebug_dix)
5494                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
5495                 break;
5496
5497         case T10_PI_TYPE2_PROTECTION:
5498                 hprot = SHOST_DIF_TYPE2_PROTECTION;
5499                 if (sdebug_dix)
5500                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
5501                 break;
5502
5503         case T10_PI_TYPE3_PROTECTION:
5504                 hprot = SHOST_DIF_TYPE3_PROTECTION;
5505                 if (sdebug_dix)
5506                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
5507                 break;
5508
5509         default:
5510                 if (sdebug_dix)
5511                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
5512                 break;
5513         }
5514
5515         scsi_host_set_prot(hpnt, hprot);
5516
5517         if (have_dif_prot || sdebug_dix)
5518                 pr_info("host protection%s%s%s%s%s%s%s\n",
5519                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5520                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5521                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5522                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5523                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5524                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5525                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5526
5527         if (sdebug_guard == 1)
5528                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5529         else
5530                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5531
5532         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5533         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5534         if (sdebug_every_nth)   /* need stats counters for every_nth */
5535                 sdebug_statistics = true;
5536         error = scsi_add_host(hpnt, &sdbg_host->dev);
5537         if (error) {
5538                 pr_err("scsi_add_host failed\n");
5539                 error = -ENODEV;
5540                 scsi_host_put(hpnt);
5541         } else
5542                 scsi_scan_host(hpnt);
5543
5544         return error;
5545 }
5546
5547 static int sdebug_driver_remove(struct device * dev)
5548 {
5549         struct sdebug_host_info *sdbg_host;
5550         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5551
5552         sdbg_host = to_sdebug_host(dev);
5553
5554         if (!sdbg_host) {
5555                 pr_err("Unable to locate host info\n");
5556                 return -ENODEV;
5557         }
5558
5559         scsi_remove_host(sdbg_host->shost);
5560
5561         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5562                                  dev_list) {
5563                 list_del(&sdbg_devinfo->dev_list);
5564                 kfree(sdbg_devinfo);
5565         }
5566
5567         scsi_host_put(sdbg_host->shost);
5568         return 0;
5569 }
5570
5571 static int pseudo_lld_bus_match(struct device *dev,
5572                                 struct device_driver *dev_driver)
5573 {
5574         return 1;
5575 }
5576
5577 static struct bus_type pseudo_lld_bus = {
5578         .name = "pseudo",
5579         .match = pseudo_lld_bus_match,
5580         .probe = sdebug_driver_probe,
5581         .remove = sdebug_driver_remove,
5582         .drv_groups = sdebug_drv_groups,
5583 };