Merge branch 'linux-3.17' of git://anongit.freedesktop.org/git/nouveau/linux-2.6...
[sfrench/cifs-2.6.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
49
50 #include <net/checksum.h>
51
52 #include <asm/unaligned.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
62
63 #include "sd.h"
64 #include "scsi_logging.h"
65
66 #define SCSI_DEBUG_VERSION "1.84"
67 static const char *scsi_debug_version_date = "20140706";
68
69 #define MY_NAME "scsi_debug"
70
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define ADDR_OUT_OF_RANGE 0x21
79 #define INVALID_COMMAND_OPCODE 0x20
80 #define INVALID_FIELD_IN_CDB 0x24
81 #define INVALID_FIELD_IN_PARAM_LIST 0x26
82 #define UA_RESET_ASC 0x29
83 #define UA_CHANGED_ASC 0x2a
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
87 #define SAVING_PARAMS_UNSUP 0x39
88 #define TRANSPORT_PROBLEM 0x4b
89 #define THRESHOLD_EXCEEDED 0x5d
90 #define LOW_POWER_COND_ON 0x5e
91
92 /* Additional Sense Code Qualifier (ASCQ) */
93 #define ACK_NAK_TO 0x3
94
95
96 /* Default values for driver parameters */
97 #define DEF_NUM_HOST   1
98 #define DEF_NUM_TGTS   1
99 #define DEF_MAX_LUNS   1
100 /* With these defaults, this driver will make 1 host with 1 target
101  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
102  */
103 #define DEF_ATO 1
104 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
105 #define DEF_DEV_SIZE_MB   8
106 #define DEF_DIF 0
107 #define DEF_DIX 0
108 #define DEF_D_SENSE   0
109 #define DEF_EVERY_NTH   0
110 #define DEF_FAKE_RW     0
111 #define DEF_GUARD 0
112 #define DEF_HOST_LOCK 0
113 #define DEF_LBPU 0
114 #define DEF_LBPWS 0
115 #define DEF_LBPWS10 0
116 #define DEF_LBPRZ 1
117 #define DEF_LOWEST_ALIGNED 0
118 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
119 #define DEF_NO_LUN_0   0
120 #define DEF_NUM_PARTS   0
121 #define DEF_OPTS   0
122 #define DEF_OPT_BLKS 64
123 #define DEF_PHYSBLK_EXP 0
124 #define DEF_PTYPE   0
125 #define DEF_REMOVABLE false
126 #define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
127 #define DEF_SECTOR_SIZE 512
128 #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
129 #define DEF_UNMAP_ALIGNMENT 0
130 #define DEF_UNMAP_GRANULARITY 1
131 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
132 #define DEF_UNMAP_MAX_DESC 256
133 #define DEF_VIRTUAL_GB   0
134 #define DEF_VPD_USE_HOSTNO 1
135 #define DEF_WRITESAME_LENGTH 0xFFFF
136 #define DELAY_OVERRIDDEN -9999
137
138 /* bit mask values for scsi_debug_opts */
139 #define SCSI_DEBUG_OPT_NOISE   1
140 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
141 #define SCSI_DEBUG_OPT_TIMEOUT   4
142 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
143 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
144 #define SCSI_DEBUG_OPT_DIF_ERR   32
145 #define SCSI_DEBUG_OPT_DIX_ERR   64
146 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
147 #define SCSI_DEBUG_OPT_SHORT_TRANSFER   0x100
148 #define SCSI_DEBUG_OPT_Q_NOISE  0x200
149 #define SCSI_DEBUG_OPT_ALL_TSF  0x400
150 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
151 #define SCSI_DEBUG_OPT_N_WCE    0x1000
152 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
153 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
154 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
155 /* When "every_nth" > 0 then modulo "every_nth" commands:
156  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
157  *   - a RECOVERED_ERROR is simulated on successful read and write
158  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
159  *   - a TRANSPORT_ERROR is simulated on successful read and write
160  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
161  *
162  * When "every_nth" < 0 then after "- every_nth" commands:
163  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
164  *   - a RECOVERED_ERROR is simulated on successful read and write
165  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
166  *   - a TRANSPORT_ERROR is simulated on successful read and write
167  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
168  * This will continue until some other action occurs (e.g. the user
169  * writing a new value (other than -1 or 1) to every_nth via sysfs).
170  */
171
172 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
173  * priority order. In the subset implemented here lower numbers have higher
174  * priority. The UA numbers should be a sequence starting from 0 with
175  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
176 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
177 #define SDEBUG_UA_BUS_RESET 1
178 #define SDEBUG_UA_MODE_CHANGED 2
179 #define SDEBUG_NUM_UAS 3
180
181 /* for check_readiness() */
182 #define UAS_ONLY 1
183 #define UAS_TUR 0
184
185 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
186  * sector on read commands: */
187 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
188 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
189
190 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
191  * or "peripheral device" addressing (value 0) */
192 #define SAM2_LUN_ADDRESS_METHOD 0
193 #define SAM2_WLUN_REPORT_LUNS 0xc101
194
195 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
196  * (for response) at one time. Can be reduced by max_queue option. Command
197  * responses are not queued when delay=0 and ndelay=0. The per-device
198  * DEF_CMD_PER_LUN can be changed via sysfs:
199  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
200  * SCSI_DEBUG_CANQUEUE. */
201 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
202 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
203 #define DEF_CMD_PER_LUN  255
204
205 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
206 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
207 #endif
208
209 static int scsi_debug_add_host = DEF_NUM_HOST;
210 static int scsi_debug_ato = DEF_ATO;
211 static int scsi_debug_delay = DEF_DELAY;
212 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
213 static int scsi_debug_dif = DEF_DIF;
214 static int scsi_debug_dix = DEF_DIX;
215 static int scsi_debug_dsense = DEF_D_SENSE;
216 static int scsi_debug_every_nth = DEF_EVERY_NTH;
217 static int scsi_debug_fake_rw = DEF_FAKE_RW;
218 static unsigned int scsi_debug_guard = DEF_GUARD;
219 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
220 static int scsi_debug_max_luns = DEF_MAX_LUNS;
221 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
222 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
223 static int scsi_debug_ndelay = DEF_NDELAY;
224 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
225 static int scsi_debug_no_uld = 0;
226 static int scsi_debug_num_parts = DEF_NUM_PARTS;
227 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
228 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
229 static int scsi_debug_opts = DEF_OPTS;
230 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
231 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
232 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
233 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
234 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
235 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
236 static unsigned int scsi_debug_lbpu = DEF_LBPU;
237 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
238 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
239 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
240 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
241 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
242 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
243 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
244 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
245 static bool scsi_debug_removable = DEF_REMOVABLE;
246 static bool scsi_debug_clustering;
247 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
248
249 static atomic_t sdebug_cmnd_count;
250 static atomic_t sdebug_completions;
251 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
252
253 #define DEV_READONLY(TGT)      (0)
254
255 static unsigned int sdebug_store_sectors;
256 static sector_t sdebug_capacity;        /* in sectors */
257
258 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
259    may still need them */
260 static int sdebug_heads;                /* heads per disk */
261 static int sdebug_cylinders_per;        /* cylinders per surface */
262 static int sdebug_sectors_per;          /* sectors per cylinder */
263
264 #define SDEBUG_MAX_PARTS 4
265
266 #define SCSI_DEBUG_MAX_CMD_LEN 32
267
268 static unsigned int scsi_debug_lbp(void)
269 {
270         return ((0 == scsi_debug_fake_rw) &&
271                 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
272 }
273
274 struct sdebug_dev_info {
275         struct list_head dev_list;
276         unsigned int channel;
277         unsigned int target;
278         u64 lun;
279         struct sdebug_host_info *sdbg_host;
280         u64 wlun;
281         unsigned long uas_bm[1];
282         atomic_t num_in_q;
283         char stopped;
284         char used;
285 };
286
287 struct sdebug_host_info {
288         struct list_head host_list;
289         struct Scsi_Host *shost;
290         struct device dev;
291         struct list_head dev_info_list;
292 };
293
294 #define to_sdebug_host(d)       \
295         container_of(d, struct sdebug_host_info, dev)
296
297 static LIST_HEAD(sdebug_host_list);
298 static DEFINE_SPINLOCK(sdebug_host_list_lock);
299
300
301 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
302         struct hrtimer hrt;     /* must be first element */
303         int qa_indx;
304 };
305
306 struct sdebug_queued_cmd {
307         /* in_use flagged by a bit in queued_in_use_bm[] */
308         struct timer_list *cmnd_timerp;
309         struct tasklet_struct *tletp;
310         struct sdebug_hrtimer *sd_hrtp;
311         struct scsi_cmnd * a_cmnd;
312 };
313 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
314 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
315
316
317 static unsigned char * fake_storep;     /* ramdisk storage */
318 static struct sd_dif_tuple *dif_storep; /* protection info */
319 static void *map_storep;                /* provisioning map */
320
321 static unsigned long map_size;
322 static int num_aborts;
323 static int num_dev_resets;
324 static int num_target_resets;
325 static int num_bus_resets;
326 static int num_host_resets;
327 static int dix_writes;
328 static int dix_reads;
329 static int dif_errors;
330
331 static DEFINE_SPINLOCK(queued_arr_lock);
332 static DEFINE_RWLOCK(atomic_rw);
333
334 static char sdebug_proc_name[] = MY_NAME;
335 static const char *my_name = MY_NAME;
336
337 static struct bus_type pseudo_lld_bus;
338
339 static struct device_driver sdebug_driverfs_driver = {
340         .name           = sdebug_proc_name,
341         .bus            = &pseudo_lld_bus,
342 };
343
344 static const int check_condition_result =
345                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
346
347 static const int illegal_condition_result =
348         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
349
350 static const int device_qfull_result =
351         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
352
353 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
354                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
355                                      0, 0, 0, 0};
356 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
357                                     0, 0, 0x2, 0x4b};
358 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
359                                    0, 0, 0x0, 0x0};
360
361 static void *fake_store(unsigned long long lba)
362 {
363         lba = do_div(lba, sdebug_store_sectors);
364
365         return fake_storep + lba * scsi_debug_sector_size;
366 }
367
368 static struct sd_dif_tuple *dif_store(sector_t sector)
369 {
370         sector = do_div(sector, sdebug_store_sectors);
371
372         return dif_storep + sector;
373 }
374
375 static int sdebug_add_adapter(void);
376 static void sdebug_remove_adapter(void);
377
378 static void sdebug_max_tgts_luns(void)
379 {
380         struct sdebug_host_info *sdbg_host;
381         struct Scsi_Host *hpnt;
382
383         spin_lock(&sdebug_host_list_lock);
384         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
385                 hpnt = sdbg_host->shost;
386                 if ((hpnt->this_id >= 0) &&
387                     (scsi_debug_num_tgts > hpnt->this_id))
388                         hpnt->max_id = scsi_debug_num_tgts + 1;
389                 else
390                         hpnt->max_id = scsi_debug_num_tgts;
391                 /* scsi_debug_max_luns; */
392                 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
393         }
394         spin_unlock(&sdebug_host_list_lock);
395 }
396
397 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
398 {
399         unsigned char *sbuff;
400
401         sbuff = scp->sense_buffer;
402         if (!sbuff) {
403                 sdev_printk(KERN_ERR, scp->device,
404                             "%s: sense_buffer is NULL\n", __func__);
405                 return;
406         }
407         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
408
409         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
410
411         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
412                 sdev_printk(KERN_INFO, scp->device,
413                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
414                             my_name, key, asc, asq);
415 }
416
417 static void get_data_transfer_info(unsigned char *cmd,
418                                    unsigned long long *lba, unsigned int *num,
419                                    u32 *ei_lba)
420 {
421         *ei_lba = 0;
422
423         switch (*cmd) {
424         case VARIABLE_LENGTH_CMD:
425                 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
426                         (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
427                         (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
428                         (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
429
430                 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
431                         (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
432
433                 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
434                         (u32)cmd[28] << 24;
435                 break;
436
437         case WRITE_SAME_16:
438         case WRITE_16:
439         case READ_16:
440                 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
441                         (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
442                         (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
443                         (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
444
445                 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
446                         (u32)cmd[10] << 24;
447                 break;
448         case WRITE_12:
449         case READ_12:
450                 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
451                         (u32)cmd[2] << 24;
452
453                 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
454                         (u32)cmd[6] << 24;
455                 break;
456         case WRITE_SAME:
457         case WRITE_10:
458         case READ_10:
459         case XDWRITEREAD_10:
460                 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
461                         (u32)cmd[2] << 24;
462
463                 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
464                 break;
465         case WRITE_6:
466         case READ_6:
467                 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
468                         (u32)(cmd[1] & 0x1f) << 16;
469                 *num = (0 == cmd[4]) ? 256 : cmd[4];
470                 break;
471         default:
472                 break;
473         }
474 }
475
476 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
477 {
478         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
479                 if (0x1261 == cmd)
480                         sdev_printk(KERN_INFO, dev,
481                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
482                 else if (0x5331 == cmd)
483                         sdev_printk(KERN_INFO, dev,
484                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
485                                     __func__);
486                 else
487                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
488                                     __func__, cmd);
489         }
490         return -EINVAL;
491         /* return -ENOTTY; // correct return but upsets fdisk */
492 }
493
494 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
495                            struct sdebug_dev_info * devip)
496 {
497         int k;
498         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
499
500         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
501         if (k != SDEBUG_NUM_UAS) {
502                 const char *cp = NULL;
503
504                 switch (k) {
505                 case SDEBUG_UA_POR:
506                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
507                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
508                         if (debug)
509                                 cp = "power on reset";
510                         break;
511                 case SDEBUG_UA_BUS_RESET:
512                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
513                                         UA_RESET_ASC, BUS_RESET_ASCQ);
514                         if (debug)
515                                 cp = "bus reset";
516                         break;
517                 case SDEBUG_UA_MODE_CHANGED:
518                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
519                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
520                         if (debug)
521                                 cp = "mode parameters changed";
522                         break;
523                 default:
524                         pr_warn("%s: unexpected unit attention code=%d\n",
525                                 __func__, k);
526                         if (debug)
527                                 cp = "unknown";
528                         break;
529                 }
530                 clear_bit(k, devip->uas_bm);
531                 if (debug)
532                         sdev_printk(KERN_INFO, SCpnt->device,
533                                    "%s reports: Unit attention: %s\n",
534                                    my_name, cp);
535                 return check_condition_result;
536         }
537         if ((UAS_TUR == uas_only) && devip->stopped) {
538                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
539                                 0x2);
540                 if (debug)
541                         sdev_printk(KERN_INFO, SCpnt->device,
542                                     "%s reports: Not ready: %s\n", my_name,
543                                     "initializing command required");
544                 return check_condition_result;
545         }
546         return 0;
547 }
548
549 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
550 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
551                                 int arr_len)
552 {
553         int act_len;
554         struct scsi_data_buffer *sdb = scsi_in(scp);
555
556         if (!sdb->length)
557                 return 0;
558         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
559                 return (DID_ERROR << 16);
560
561         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
562                                       arr, arr_len);
563         sdb->resid = scsi_bufflen(scp) - act_len;
564
565         return 0;
566 }
567
568 /* Returns number of bytes fetched into 'arr' or -1 if error. */
569 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
570                                int arr_len)
571 {
572         if (!scsi_bufflen(scp))
573                 return 0;
574         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
575                 return -1;
576
577         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
578 }
579
580
581 static const char * inq_vendor_id = "Linux   ";
582 static const char * inq_product_id = "scsi_debug      ";
583 static const char *inq_product_rev = "0184";    /* version less '.' */
584
585 /* Device identification VPD page. Returns number of bytes placed in arr */
586 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
587                            int target_dev_id, int dev_id_num,
588                            const char * dev_id_str,
589                            int dev_id_str_len)
590 {
591         int num, port_a;
592         char b[32];
593
594         port_a = target_dev_id + 1;
595         /* T10 vendor identifier field format (faked) */
596         arr[0] = 0x2;   /* ASCII */
597         arr[1] = 0x1;
598         arr[2] = 0x0;
599         memcpy(&arr[4], inq_vendor_id, 8);
600         memcpy(&arr[12], inq_product_id, 16);
601         memcpy(&arr[28], dev_id_str, dev_id_str_len);
602         num = 8 + 16 + dev_id_str_len;
603         arr[3] = num;
604         num += 4;
605         if (dev_id_num >= 0) {
606                 /* NAA-5, Logical unit identifier (binary) */
607                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
608                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
609                 arr[num++] = 0x0;
610                 arr[num++] = 0x8;
611                 arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
612                 arr[num++] = 0x33;
613                 arr[num++] = 0x33;
614                 arr[num++] = 0x30;
615                 arr[num++] = (dev_id_num >> 24);
616                 arr[num++] = (dev_id_num >> 16) & 0xff;
617                 arr[num++] = (dev_id_num >> 8) & 0xff;
618                 arr[num++] = dev_id_num & 0xff;
619                 /* Target relative port number */
620                 arr[num++] = 0x61;      /* proto=sas, binary */
621                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
622                 arr[num++] = 0x0;       /* reserved */
623                 arr[num++] = 0x4;       /* length */
624                 arr[num++] = 0x0;       /* reserved */
625                 arr[num++] = 0x0;       /* reserved */
626                 arr[num++] = 0x0;
627                 arr[num++] = 0x1;       /* relative port A */
628         }
629         /* NAA-5, Target port identifier */
630         arr[num++] = 0x61;      /* proto=sas, binary */
631         arr[num++] = 0x93;      /* piv=1, target port, naa */
632         arr[num++] = 0x0;
633         arr[num++] = 0x8;
634         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
635         arr[num++] = 0x22;
636         arr[num++] = 0x22;
637         arr[num++] = 0x20;
638         arr[num++] = (port_a >> 24);
639         arr[num++] = (port_a >> 16) & 0xff;
640         arr[num++] = (port_a >> 8) & 0xff;
641         arr[num++] = port_a & 0xff;
642         /* NAA-5, Target port group identifier */
643         arr[num++] = 0x61;      /* proto=sas, binary */
644         arr[num++] = 0x95;      /* piv=1, target port group id */
645         arr[num++] = 0x0;
646         arr[num++] = 0x4;
647         arr[num++] = 0;
648         arr[num++] = 0;
649         arr[num++] = (port_group_id >> 8) & 0xff;
650         arr[num++] = port_group_id & 0xff;
651         /* NAA-5, Target device identifier */
652         arr[num++] = 0x61;      /* proto=sas, binary */
653         arr[num++] = 0xa3;      /* piv=1, target device, naa */
654         arr[num++] = 0x0;
655         arr[num++] = 0x8;
656         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
657         arr[num++] = 0x22;
658         arr[num++] = 0x22;
659         arr[num++] = 0x20;
660         arr[num++] = (target_dev_id >> 24);
661         arr[num++] = (target_dev_id >> 16) & 0xff;
662         arr[num++] = (target_dev_id >> 8) & 0xff;
663         arr[num++] = target_dev_id & 0xff;
664         /* SCSI name string: Target device identifier */
665         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
666         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
667         arr[num++] = 0x0;
668         arr[num++] = 24;
669         memcpy(arr + num, "naa.52222220", 12);
670         num += 12;
671         snprintf(b, sizeof(b), "%08X", target_dev_id);
672         memcpy(arr + num, b, 8);
673         num += 8;
674         memset(arr + num, 0, 4);
675         num += 4;
676         return num;
677 }
678
679
680 static unsigned char vpd84_data[] = {
681 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
682     0x22,0x22,0x22,0x0,0xbb,0x1,
683     0x22,0x22,0x22,0x0,0xbb,0x2,
684 };
685
686 /*  Software interface identification VPD page */
687 static int inquiry_evpd_84(unsigned char * arr)
688 {
689         memcpy(arr, vpd84_data, sizeof(vpd84_data));
690         return sizeof(vpd84_data);
691 }
692
693 /* Management network addresses VPD page */
694 static int inquiry_evpd_85(unsigned char * arr)
695 {
696         int num = 0;
697         const char * na1 = "https://www.kernel.org/config";
698         const char * na2 = "http://www.kernel.org/log";
699         int plen, olen;
700
701         arr[num++] = 0x1;       /* lu, storage config */
702         arr[num++] = 0x0;       /* reserved */
703         arr[num++] = 0x0;
704         olen = strlen(na1);
705         plen = olen + 1;
706         if (plen % 4)
707                 plen = ((plen / 4) + 1) * 4;
708         arr[num++] = plen;      /* length, null termianted, padded */
709         memcpy(arr + num, na1, olen);
710         memset(arr + num + olen, 0, plen - olen);
711         num += plen;
712
713         arr[num++] = 0x4;       /* lu, logging */
714         arr[num++] = 0x0;       /* reserved */
715         arr[num++] = 0x0;
716         olen = strlen(na2);
717         plen = olen + 1;
718         if (plen % 4)
719                 plen = ((plen / 4) + 1) * 4;
720         arr[num++] = plen;      /* length, null terminated, padded */
721         memcpy(arr + num, na2, olen);
722         memset(arr + num + olen, 0, plen - olen);
723         num += plen;
724
725         return num;
726 }
727
728 /* SCSI ports VPD page */
729 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
730 {
731         int num = 0;
732         int port_a, port_b;
733
734         port_a = target_dev_id + 1;
735         port_b = port_a + 1;
736         arr[num++] = 0x0;       /* reserved */
737         arr[num++] = 0x0;       /* reserved */
738         arr[num++] = 0x0;
739         arr[num++] = 0x1;       /* relative port 1 (primary) */
740         memset(arr + num, 0, 6);
741         num += 6;
742         arr[num++] = 0x0;
743         arr[num++] = 12;        /* length tp descriptor */
744         /* naa-5 target port identifier (A) */
745         arr[num++] = 0x61;      /* proto=sas, binary */
746         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
747         arr[num++] = 0x0;       /* reserved */
748         arr[num++] = 0x8;       /* length */
749         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
750         arr[num++] = 0x22;
751         arr[num++] = 0x22;
752         arr[num++] = 0x20;
753         arr[num++] = (port_a >> 24);
754         arr[num++] = (port_a >> 16) & 0xff;
755         arr[num++] = (port_a >> 8) & 0xff;
756         arr[num++] = port_a & 0xff;
757
758         arr[num++] = 0x0;       /* reserved */
759         arr[num++] = 0x0;       /* reserved */
760         arr[num++] = 0x0;
761         arr[num++] = 0x2;       /* relative port 2 (secondary) */
762         memset(arr + num, 0, 6);
763         num += 6;
764         arr[num++] = 0x0;
765         arr[num++] = 12;        /* length tp descriptor */
766         /* naa-5 target port identifier (B) */
767         arr[num++] = 0x61;      /* proto=sas, binary */
768         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
769         arr[num++] = 0x0;       /* reserved */
770         arr[num++] = 0x8;       /* length */
771         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
772         arr[num++] = 0x22;
773         arr[num++] = 0x22;
774         arr[num++] = 0x20;
775         arr[num++] = (port_b >> 24);
776         arr[num++] = (port_b >> 16) & 0xff;
777         arr[num++] = (port_b >> 8) & 0xff;
778         arr[num++] = port_b & 0xff;
779
780         return num;
781 }
782
783
784 static unsigned char vpd89_data[] = {
785 /* from 4th byte */ 0,0,0,0,
786 'l','i','n','u','x',' ',' ',' ',
787 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
788 '1','2','3','4',
789 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
790 0xec,0,0,0,
791 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
792 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
793 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
794 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
795 0x53,0x41,
796 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
797 0x20,0x20,
798 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
799 0x10,0x80,
800 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
801 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
802 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
803 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
804 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
805 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
806 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
807 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
808 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
809 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
810 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
811 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
812 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
813 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
814 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
815 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
816 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
817 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
818 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
819 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
820 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
821 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
822 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
823 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
824 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
825 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
826 };
827
828 /* ATA Information VPD page */
829 static int inquiry_evpd_89(unsigned char * arr)
830 {
831         memcpy(arr, vpd89_data, sizeof(vpd89_data));
832         return sizeof(vpd89_data);
833 }
834
835
836 static unsigned char vpdb0_data[] = {
837         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
838         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
839         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
840         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
841 };
842
843 /* Block limits VPD page (SBC-3) */
844 static int inquiry_evpd_b0(unsigned char * arr)
845 {
846         unsigned int gran;
847
848         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
849
850         /* Optimal transfer length granularity */
851         gran = 1 << scsi_debug_physblk_exp;
852         arr[2] = (gran >> 8) & 0xff;
853         arr[3] = gran & 0xff;
854
855         /* Maximum Transfer Length */
856         if (sdebug_store_sectors > 0x400) {
857                 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
858                 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
859                 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
860                 arr[7] = sdebug_store_sectors & 0xff;
861         }
862
863         /* Optimal Transfer Length */
864         put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
865
866         if (scsi_debug_lbpu) {
867                 /* Maximum Unmap LBA Count */
868                 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
869
870                 /* Maximum Unmap Block Descriptor Count */
871                 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
872         }
873
874         /* Unmap Granularity Alignment */
875         if (scsi_debug_unmap_alignment) {
876                 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
877                 arr[28] |= 0x80; /* UGAVALID */
878         }
879
880         /* Optimal Unmap Granularity */
881         put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
882
883         /* Maximum WRITE SAME Length */
884         put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
885
886         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
887
888         return sizeof(vpdb0_data);
889 }
890
891 /* Block device characteristics VPD page (SBC-3) */
892 static int inquiry_evpd_b1(unsigned char *arr)
893 {
894         memset(arr, 0, 0x3c);
895         arr[0] = 0;
896         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
897         arr[2] = 0;
898         arr[3] = 5;     /* less than 1.8" */
899
900         return 0x3c;
901 }
902
903 /* Logical block provisioning VPD page (SBC-3) */
904 static int inquiry_evpd_b2(unsigned char *arr)
905 {
906         memset(arr, 0, 0x4);
907         arr[0] = 0;                     /* threshold exponent */
908
909         if (scsi_debug_lbpu)
910                 arr[1] = 1 << 7;
911
912         if (scsi_debug_lbpws)
913                 arr[1] |= 1 << 6;
914
915         if (scsi_debug_lbpws10)
916                 arr[1] |= 1 << 5;
917
918         if (scsi_debug_lbprz)
919                 arr[1] |= 1 << 2;
920
921         return 0x4;
922 }
923
924 #define SDEBUG_LONG_INQ_SZ 96
925 #define SDEBUG_MAX_INQ_ARR_SZ 584
926
927 static int resp_inquiry(struct scsi_cmnd *scp, int target,
928                         struct sdebug_dev_info * devip)
929 {
930         unsigned char pq_pdt;
931         unsigned char * arr;
932         unsigned char *cmd = (unsigned char *)scp->cmnd;
933         int alloc_len, n, ret;
934
935         alloc_len = (cmd[3] << 8) + cmd[4];
936         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
937         if (! arr)
938                 return DID_REQUEUE << 16;
939         if (devip->wlun)
940                 pq_pdt = 0x1e;  /* present, wlun */
941         else if (scsi_debug_no_lun_0 && (0 == devip->lun))
942                 pq_pdt = 0x7f;  /* not present, no device type */
943         else
944                 pq_pdt = (scsi_debug_ptype & 0x1f);
945         arr[0] = pq_pdt;
946         if (0x2 & cmd[1]) {  /* CMDDT bit set */
947                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
948                                 0);
949                 kfree(arr);
950                 return check_condition_result;
951         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
952                 int lu_id_num, port_group_id, target_dev_id, len;
953                 char lu_id_str[6];
954                 int host_no = devip->sdbg_host->shost->host_no;
955                 
956                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
957                     (devip->channel & 0x7f);
958                 if (0 == scsi_debug_vpd_use_hostno)
959                         host_no = 0;
960                 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
961                             (devip->target * 1000) + devip->lun);
962                 target_dev_id = ((host_no + 1) * 2000) +
963                                  (devip->target * 1000) - 3;
964                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
965                 if (0 == cmd[2]) { /* supported vital product data pages */
966                         arr[1] = cmd[2];        /*sanity */
967                         n = 4;
968                         arr[n++] = 0x0;   /* this page */
969                         arr[n++] = 0x80;  /* unit serial number */
970                         arr[n++] = 0x83;  /* device identification */
971                         arr[n++] = 0x84;  /* software interface ident. */
972                         arr[n++] = 0x85;  /* management network addresses */
973                         arr[n++] = 0x86;  /* extended inquiry */
974                         arr[n++] = 0x87;  /* mode page policy */
975                         arr[n++] = 0x88;  /* SCSI ports */
976                         arr[n++] = 0x89;  /* ATA information */
977                         arr[n++] = 0xb0;  /* Block limits (SBC) */
978                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
979                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
980                                 arr[n++] = 0xb2;
981                         arr[3] = n - 4;   /* number of supported VPD pages */
982                 } else if (0x80 == cmd[2]) { /* unit serial number */
983                         arr[1] = cmd[2];        /*sanity */
984                         arr[3] = len;
985                         memcpy(&arr[4], lu_id_str, len);
986                 } else if (0x83 == cmd[2]) { /* device identification */
987                         arr[1] = cmd[2];        /*sanity */
988                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
989                                                  target_dev_id, lu_id_num,
990                                                  lu_id_str, len);
991                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
992                         arr[1] = cmd[2];        /*sanity */
993                         arr[3] = inquiry_evpd_84(&arr[4]);
994                 } else if (0x85 == cmd[2]) { /* Management network addresses */
995                         arr[1] = cmd[2];        /*sanity */
996                         arr[3] = inquiry_evpd_85(&arr[4]);
997                 } else if (0x86 == cmd[2]) { /* extended inquiry */
998                         arr[1] = cmd[2];        /*sanity */
999                         arr[3] = 0x3c;  /* number of following entries */
1000                         if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1001                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1002                         else if (scsi_debug_dif)
1003                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1004                         else
1005                                 arr[4] = 0x0;   /* no protection stuff */
1006                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1007                 } else if (0x87 == cmd[2]) { /* mode page policy */
1008                         arr[1] = cmd[2];        /*sanity */
1009                         arr[3] = 0x8;   /* number of following entries */
1010                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1011                         arr[6] = 0x80;  /* mlus, shared */
1012                         arr[8] = 0x18;   /* protocol specific lu */
1013                         arr[10] = 0x82;  /* mlus, per initiator port */
1014                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1015                         arr[1] = cmd[2];        /*sanity */
1016                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1017                 } else if (0x89 == cmd[2]) { /* ATA information */
1018                         arr[1] = cmd[2];        /*sanity */
1019                         n = inquiry_evpd_89(&arr[4]);
1020                         arr[2] = (n >> 8);
1021                         arr[3] = (n & 0xff);
1022                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1023                         arr[1] = cmd[2];        /*sanity */
1024                         arr[3] = inquiry_evpd_b0(&arr[4]);
1025                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1026                         arr[1] = cmd[2];        /*sanity */
1027                         arr[3] = inquiry_evpd_b1(&arr[4]);
1028                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1029                         arr[1] = cmd[2];        /*sanity */
1030                         arr[3] = inquiry_evpd_b2(&arr[4]);
1031                 } else {
1032                         /* Illegal request, invalid field in cdb */
1033                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1034                                         INVALID_FIELD_IN_CDB, 0);
1035                         kfree(arr);
1036                         return check_condition_result;
1037                 }
1038                 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1039                 ret = fill_from_dev_buffer(scp, arr,
1040                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1041                 kfree(arr);
1042                 return ret;
1043         }
1044         /* drops through here for a standard inquiry */
1045         arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
1046         arr[2] = scsi_debug_scsi_level;
1047         arr[3] = 2;    /* response_data_format==2 */
1048         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1049         arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1050         if (0 == scsi_debug_vpd_use_hostno)
1051                 arr[5] = 0x10; /* claim: implicit TGPS */
1052         arr[6] = 0x10; /* claim: MultiP */
1053         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1054         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1055         memcpy(&arr[8], inq_vendor_id, 8);
1056         memcpy(&arr[16], inq_product_id, 16);
1057         memcpy(&arr[32], inq_product_rev, 4);
1058         /* version descriptors (2 bytes each) follow */
1059         arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
1060         arr[60] = 0x3; arr[61] = 0x14;  /* SPC-3 ANSI */
1061         n = 62;
1062         if (scsi_debug_ptype == 0) {
1063                 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
1064         } else if (scsi_debug_ptype == 1) {
1065                 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
1066         }
1067         arr[n++] = 0xc; arr[n++] = 0xf;  /* SAS-1.1 rev 10 */
1068         ret = fill_from_dev_buffer(scp, arr,
1069                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1070         kfree(arr);
1071         return ret;
1072 }
1073
1074 static int resp_requests(struct scsi_cmnd * scp,
1075                          struct sdebug_dev_info * devip)
1076 {
1077         unsigned char * sbuff;
1078         unsigned char *cmd = (unsigned char *)scp->cmnd;
1079         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1080         int want_dsense;
1081         int len = 18;
1082
1083         memset(arr, 0, sizeof(arr));
1084         want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
1085         sbuff = scp->sense_buffer;
1086         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1087                 if (want_dsense) {
1088                         arr[0] = 0x72;
1089                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1090                         arr[2] = THRESHOLD_EXCEEDED;
1091                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1092                 } else {
1093                         arr[0] = 0x70;
1094                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1095                         arr[7] = 0xa;           /* 18 byte sense buffer */
1096                         arr[12] = THRESHOLD_EXCEEDED;
1097                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1098                 }
1099         } else {
1100                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1101                 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
1102                         /* DESC bit set and sense_buff in fixed format */
1103                         memset(arr, 0, sizeof(arr));
1104                         arr[0] = 0x72;
1105                         arr[1] = sbuff[2];     /* sense key */
1106                         arr[2] = sbuff[12];    /* asc */
1107                         arr[3] = sbuff[13];    /* ascq */
1108                         len = 8;
1109                 }
1110         }
1111         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1112         return fill_from_dev_buffer(scp, arr, len);
1113 }
1114
1115 static int resp_start_stop(struct scsi_cmnd * scp,
1116                            struct sdebug_dev_info * devip)
1117 {
1118         unsigned char *cmd = (unsigned char *)scp->cmnd;
1119         int power_cond, errsts, start;
1120
1121         errsts = check_readiness(scp, UAS_ONLY, devip);
1122         if (errsts)
1123                 return errsts;
1124         power_cond = (cmd[4] & 0xf0) >> 4;
1125         if (power_cond) {
1126                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1127                                 0);
1128                 return check_condition_result;
1129         }
1130         start = cmd[4] & 1;
1131         if (start == devip->stopped)
1132                 devip->stopped = !start;
1133         return 0;
1134 }
1135
1136 static sector_t get_sdebug_capacity(void)
1137 {
1138         if (scsi_debug_virtual_gb > 0)
1139                 return (sector_t)scsi_debug_virtual_gb *
1140                         (1073741824 / scsi_debug_sector_size);
1141         else
1142                 return sdebug_store_sectors;
1143 }
1144
1145 #define SDEBUG_READCAP_ARR_SZ 8
1146 static int resp_readcap(struct scsi_cmnd * scp,
1147                         struct sdebug_dev_info * devip)
1148 {
1149         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1150         unsigned int capac;
1151         int errsts;
1152
1153         errsts = check_readiness(scp, UAS_ONLY, devip);
1154         if (errsts)
1155                 return errsts;
1156         /* following just in case virtual_gb changed */
1157         sdebug_capacity = get_sdebug_capacity();
1158         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1159         if (sdebug_capacity < 0xffffffff) {
1160                 capac = (unsigned int)sdebug_capacity - 1;
1161                 arr[0] = (capac >> 24);
1162                 arr[1] = (capac >> 16) & 0xff;
1163                 arr[2] = (capac >> 8) & 0xff;
1164                 arr[3] = capac & 0xff;
1165         } else {
1166                 arr[0] = 0xff;
1167                 arr[1] = 0xff;
1168                 arr[2] = 0xff;
1169                 arr[3] = 0xff;
1170         }
1171         arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1172         arr[7] = scsi_debug_sector_size & 0xff;
1173         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1174 }
1175
1176 #define SDEBUG_READCAP16_ARR_SZ 32
1177 static int resp_readcap16(struct scsi_cmnd * scp,
1178                           struct sdebug_dev_info * devip)
1179 {
1180         unsigned char *cmd = (unsigned char *)scp->cmnd;
1181         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1182         unsigned long long capac;
1183         int errsts, k, alloc_len;
1184
1185         errsts = check_readiness(scp, UAS_ONLY, devip);
1186         if (errsts)
1187                 return errsts;
1188         alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1189                      + cmd[13]);
1190         /* following just in case virtual_gb changed */
1191         sdebug_capacity = get_sdebug_capacity();
1192         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1193         capac = sdebug_capacity - 1;
1194         for (k = 0; k < 8; ++k, capac >>= 8)
1195                 arr[7 - k] = capac & 0xff;
1196         arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1197         arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1198         arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1199         arr[11] = scsi_debug_sector_size & 0xff;
1200         arr[13] = scsi_debug_physblk_exp & 0xf;
1201         arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1202
1203         if (scsi_debug_lbp()) {
1204                 arr[14] |= 0x80; /* LBPME */
1205                 if (scsi_debug_lbprz)
1206                         arr[14] |= 0x40; /* LBPRZ */
1207         }
1208
1209         arr[15] = scsi_debug_lowest_aligned & 0xff;
1210
1211         if (scsi_debug_dif) {
1212                 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1213                 arr[12] |= 1; /* PROT_EN */
1214         }
1215
1216         return fill_from_dev_buffer(scp, arr,
1217                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1218 }
1219
1220 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1221
1222 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1223                               struct sdebug_dev_info * devip)
1224 {
1225         unsigned char *cmd = (unsigned char *)scp->cmnd;
1226         unsigned char * arr;
1227         int host_no = devip->sdbg_host->shost->host_no;
1228         int n, ret, alen, rlen;
1229         int port_group_a, port_group_b, port_a, port_b;
1230
1231         alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1232                 + cmd[9]);
1233
1234         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1235         if (! arr)
1236                 return DID_REQUEUE << 16;
1237         /*
1238          * EVPD page 0x88 states we have two ports, one
1239          * real and a fake port with no device connected.
1240          * So we create two port groups with one port each
1241          * and set the group with port B to unavailable.
1242          */
1243         port_a = 0x1; /* relative port A */
1244         port_b = 0x2; /* relative port B */
1245         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1246             (devip->channel & 0x7f);
1247         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1248             (devip->channel & 0x7f) + 0x80;
1249
1250         /*
1251          * The asymmetric access state is cycled according to the host_id.
1252          */
1253         n = 4;
1254         if (0 == scsi_debug_vpd_use_hostno) {
1255             arr[n++] = host_no % 3; /* Asymm access state */
1256             arr[n++] = 0x0F; /* claim: all states are supported */
1257         } else {
1258             arr[n++] = 0x0; /* Active/Optimized path */
1259             arr[n++] = 0x01; /* claim: only support active/optimized paths */
1260         }
1261         arr[n++] = (port_group_a >> 8) & 0xff;
1262         arr[n++] = port_group_a & 0xff;
1263         arr[n++] = 0;    /* Reserved */
1264         arr[n++] = 0;    /* Status code */
1265         arr[n++] = 0;    /* Vendor unique */
1266         arr[n++] = 0x1;  /* One port per group */
1267         arr[n++] = 0;    /* Reserved */
1268         arr[n++] = 0;    /* Reserved */
1269         arr[n++] = (port_a >> 8) & 0xff;
1270         arr[n++] = port_a & 0xff;
1271         arr[n++] = 3;    /* Port unavailable */
1272         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1273         arr[n++] = (port_group_b >> 8) & 0xff;
1274         arr[n++] = port_group_b & 0xff;
1275         arr[n++] = 0;    /* Reserved */
1276         arr[n++] = 0;    /* Status code */
1277         arr[n++] = 0;    /* Vendor unique */
1278         arr[n++] = 0x1;  /* One port per group */
1279         arr[n++] = 0;    /* Reserved */
1280         arr[n++] = 0;    /* Reserved */
1281         arr[n++] = (port_b >> 8) & 0xff;
1282         arr[n++] = port_b & 0xff;
1283
1284         rlen = n - 4;
1285         arr[0] = (rlen >> 24) & 0xff;
1286         arr[1] = (rlen >> 16) & 0xff;
1287         arr[2] = (rlen >> 8) & 0xff;
1288         arr[3] = rlen & 0xff;
1289
1290         /*
1291          * Return the smallest value of either
1292          * - The allocated length
1293          * - The constructed command length
1294          * - The maximum array size
1295          */
1296         rlen = min(alen,n);
1297         ret = fill_from_dev_buffer(scp, arr,
1298                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1299         kfree(arr);
1300         return ret;
1301 }
1302
1303 /* <<Following mode page info copied from ST318451LW>> */
1304
1305 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1306 {       /* Read-Write Error Recovery page for mode_sense */
1307         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1308                                         5, 0, 0xff, 0xff};
1309
1310         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1311         if (1 == pcontrol)
1312                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1313         return sizeof(err_recov_pg);
1314 }
1315
1316 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1317 {       /* Disconnect-Reconnect page for mode_sense */
1318         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1319                                          0, 0, 0, 0, 0, 0, 0, 0};
1320
1321         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1322         if (1 == pcontrol)
1323                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1324         return sizeof(disconnect_pg);
1325 }
1326
1327 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1328 {       /* Format device page for mode_sense */
1329         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1330                                      0, 0, 0, 0, 0, 0, 0, 0,
1331                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1332
1333         memcpy(p, format_pg, sizeof(format_pg));
1334         p[10] = (sdebug_sectors_per >> 8) & 0xff;
1335         p[11] = sdebug_sectors_per & 0xff;
1336         p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1337         p[13] = scsi_debug_sector_size & 0xff;
1338         if (scsi_debug_removable)
1339                 p[20] |= 0x20; /* should agree with INQUIRY */
1340         if (1 == pcontrol)
1341                 memset(p + 2, 0, sizeof(format_pg) - 2);
1342         return sizeof(format_pg);
1343 }
1344
1345 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1346 {       /* Caching page for mode_sense */
1347         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1348                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1349         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1350                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1351
1352         if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1353                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1354         memcpy(p, caching_pg, sizeof(caching_pg));
1355         if (1 == pcontrol)
1356                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1357         else if (2 == pcontrol)
1358                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1359         return sizeof(caching_pg);
1360 }
1361
1362 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1363 {       /* Control mode page for mode_sense */
1364         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1365                                         0, 0, 0, 0};
1366         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1367                                      0, 0, 0x2, 0x4b};
1368
1369         if (scsi_debug_dsense)
1370                 ctrl_m_pg[2] |= 0x4;
1371         else
1372                 ctrl_m_pg[2] &= ~0x4;
1373
1374         if (scsi_debug_ato)
1375                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1376
1377         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1378         if (1 == pcontrol)
1379                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1380         else if (2 == pcontrol)
1381                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1382         return sizeof(ctrl_m_pg);
1383 }
1384
1385
1386 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1387 {       /* Informational Exceptions control mode page for mode_sense */
1388         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1389                                        0, 0, 0x0, 0x0};
1390         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1391                                       0, 0, 0x0, 0x0};
1392
1393         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1394         if (1 == pcontrol)
1395                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1396         else if (2 == pcontrol)
1397                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1398         return sizeof(iec_m_pg);
1399 }
1400
1401 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1402 {       /* SAS SSP mode page - short format for mode_sense */
1403         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1404                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1405
1406         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1407         if (1 == pcontrol)
1408                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1409         return sizeof(sas_sf_m_pg);
1410 }
1411
1412
1413 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1414                               int target_dev_id)
1415 {       /* SAS phy control and discover mode page for mode_sense */
1416         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1417                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1418                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1419                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1420                     0x2, 0, 0, 0, 0, 0, 0, 0,
1421                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1422                     0, 0, 0, 0, 0, 0, 0, 0,
1423                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1424                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1425                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1426                     0x3, 0, 0, 0, 0, 0, 0, 0,
1427                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1428                     0, 0, 0, 0, 0, 0, 0, 0,
1429                 };
1430         int port_a, port_b;
1431
1432         port_a = target_dev_id + 1;
1433         port_b = port_a + 1;
1434         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1435         p[20] = (port_a >> 24);
1436         p[21] = (port_a >> 16) & 0xff;
1437         p[22] = (port_a >> 8) & 0xff;
1438         p[23] = port_a & 0xff;
1439         p[48 + 20] = (port_b >> 24);
1440         p[48 + 21] = (port_b >> 16) & 0xff;
1441         p[48 + 22] = (port_b >> 8) & 0xff;
1442         p[48 + 23] = port_b & 0xff;
1443         if (1 == pcontrol)
1444                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1445         return sizeof(sas_pcd_m_pg);
1446 }
1447
1448 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1449 {       /* SAS SSP shared protocol specific port mode subpage */
1450         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1451                     0, 0, 0, 0, 0, 0, 0, 0,
1452                 };
1453
1454         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1455         if (1 == pcontrol)
1456                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1457         return sizeof(sas_sha_m_pg);
1458 }
1459
1460 #define SDEBUG_MAX_MSENSE_SZ 256
1461
1462 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1463                            struct sdebug_dev_info * devip)
1464 {
1465         unsigned char dbd, llbaa;
1466         int pcontrol, pcode, subpcode, bd_len;
1467         unsigned char dev_spec;
1468         int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1469         unsigned char * ap;
1470         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1471         unsigned char *cmd = (unsigned char *)scp->cmnd;
1472
1473         errsts = check_readiness(scp, UAS_ONLY, devip);
1474         if (errsts)
1475                 return errsts;
1476         dbd = !!(cmd[1] & 0x8);
1477         pcontrol = (cmd[2] & 0xc0) >> 6;
1478         pcode = cmd[2] & 0x3f;
1479         subpcode = cmd[3];
1480         msense_6 = (MODE_SENSE == cmd[0]);
1481         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1482         if ((0 == scsi_debug_ptype) && (0 == dbd))
1483                 bd_len = llbaa ? 16 : 8;
1484         else
1485                 bd_len = 0;
1486         alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1487         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1488         if (0x3 == pcontrol) {  /* Saving values not supported */
1489                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1490                 return check_condition_result;
1491         }
1492         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1493                         (devip->target * 1000) - 3;
1494         /* set DPOFUA bit for disks */
1495         if (0 == scsi_debug_ptype)
1496                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1497         else
1498                 dev_spec = 0x0;
1499         if (msense_6) {
1500                 arr[2] = dev_spec;
1501                 arr[3] = bd_len;
1502                 offset = 4;
1503         } else {
1504                 arr[3] = dev_spec;
1505                 if (16 == bd_len)
1506                         arr[4] = 0x1;   /* set LONGLBA bit */
1507                 arr[7] = bd_len;        /* assume 255 or less */
1508                 offset = 8;
1509         }
1510         ap = arr + offset;
1511         if ((bd_len > 0) && (!sdebug_capacity))
1512                 sdebug_capacity = get_sdebug_capacity();
1513
1514         if (8 == bd_len) {
1515                 if (sdebug_capacity > 0xfffffffe) {
1516                         ap[0] = 0xff;
1517                         ap[1] = 0xff;
1518                         ap[2] = 0xff;
1519                         ap[3] = 0xff;
1520                 } else {
1521                         ap[0] = (sdebug_capacity >> 24) & 0xff;
1522                         ap[1] = (sdebug_capacity >> 16) & 0xff;
1523                         ap[2] = (sdebug_capacity >> 8) & 0xff;
1524                         ap[3] = sdebug_capacity & 0xff;
1525                 }
1526                 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1527                 ap[7] = scsi_debug_sector_size & 0xff;
1528                 offset += bd_len;
1529                 ap = arr + offset;
1530         } else if (16 == bd_len) {
1531                 unsigned long long capac = sdebug_capacity;
1532
1533                 for (k = 0; k < 8; ++k, capac >>= 8)
1534                         ap[7 - k] = capac & 0xff;
1535                 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1536                 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1537                 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1538                 ap[15] = scsi_debug_sector_size & 0xff;
1539                 offset += bd_len;
1540                 ap = arr + offset;
1541         }
1542
1543         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1544                 /* TODO: Control Extension page */
1545                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1546                                 0);
1547                 return check_condition_result;
1548         }
1549         switch (pcode) {
1550         case 0x1:       /* Read-Write error recovery page, direct access */
1551                 len = resp_err_recov_pg(ap, pcontrol, target);
1552                 offset += len;
1553                 break;
1554         case 0x2:       /* Disconnect-Reconnect page, all devices */
1555                 len = resp_disconnect_pg(ap, pcontrol, target);
1556                 offset += len;
1557                 break;
1558         case 0x3:       /* Format device page, direct access */
1559                 len = resp_format_pg(ap, pcontrol, target);
1560                 offset += len;
1561                 break;
1562         case 0x8:       /* Caching page, direct access */
1563                 len = resp_caching_pg(ap, pcontrol, target);
1564                 offset += len;
1565                 break;
1566         case 0xa:       /* Control Mode page, all devices */
1567                 len = resp_ctrl_m_pg(ap, pcontrol, target);
1568                 offset += len;
1569                 break;
1570         case 0x19:      /* if spc==1 then sas phy, control+discover */
1571                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1572                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1573                                         INVALID_FIELD_IN_CDB, 0);
1574                         return check_condition_result;
1575                 }
1576                 len = 0;
1577                 if ((0x0 == subpcode) || (0xff == subpcode))
1578                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1579                 if ((0x1 == subpcode) || (0xff == subpcode))
1580                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1581                                                   target_dev_id);
1582                 if ((0x2 == subpcode) || (0xff == subpcode))
1583                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
1584                 offset += len;
1585                 break;
1586         case 0x1c:      /* Informational Exceptions Mode page, all devices */
1587                 len = resp_iec_m_pg(ap, pcontrol, target);
1588                 offset += len;
1589                 break;
1590         case 0x3f:      /* Read all Mode pages */
1591                 if ((0 == subpcode) || (0xff == subpcode)) {
1592                         len = resp_err_recov_pg(ap, pcontrol, target);
1593                         len += resp_disconnect_pg(ap + len, pcontrol, target);
1594                         len += resp_format_pg(ap + len, pcontrol, target);
1595                         len += resp_caching_pg(ap + len, pcontrol, target);
1596                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1597                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1598                         if (0xff == subpcode) {
1599                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1600                                                   target, target_dev_id);
1601                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1602                         }
1603                         len += resp_iec_m_pg(ap + len, pcontrol, target);
1604                 } else {
1605                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1606                                         INVALID_FIELD_IN_CDB, 0);
1607                         return check_condition_result;
1608                 }
1609                 offset += len;
1610                 break;
1611         default:
1612                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1613                                 0);
1614                 return check_condition_result;
1615         }
1616         if (msense_6)
1617                 arr[0] = offset - 1;
1618         else {
1619                 arr[0] = ((offset - 2) >> 8) & 0xff;
1620                 arr[1] = (offset - 2) & 0xff;
1621         }
1622         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1623 }
1624
1625 #define SDEBUG_MAX_MSELECT_SZ 512
1626
1627 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1628                             struct sdebug_dev_info * devip)
1629 {
1630         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1631         int param_len, res, errsts, mpage;
1632         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1633         unsigned char *cmd = (unsigned char *)scp->cmnd;
1634
1635         errsts = check_readiness(scp, UAS_ONLY, devip);
1636         if (errsts)
1637                 return errsts;
1638         memset(arr, 0, sizeof(arr));
1639         pf = cmd[1] & 0x10;
1640         sp = cmd[1] & 0x1;
1641         param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1642         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1643                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1644                                 INVALID_FIELD_IN_CDB, 0);
1645                 return check_condition_result;
1646         }
1647         res = fetch_to_dev_buffer(scp, arr, param_len);
1648         if (-1 == res)
1649                 return (DID_ERROR << 16);
1650         else if ((res < param_len) &&
1651                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1652                 sdev_printk(KERN_INFO, scp->device,
1653                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
1654                             __func__, param_len, res);
1655         md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1656         bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1657         if (md_len > 2) {
1658                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1659                                 INVALID_FIELD_IN_PARAM_LIST, 0);
1660                 return check_condition_result;
1661         }
1662         off = bd_len + (mselect6 ? 4 : 8);
1663         mpage = arr[off] & 0x3f;
1664         ps = !!(arr[off] & 0x80);
1665         if (ps) {
1666                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1667                                 INVALID_FIELD_IN_PARAM_LIST, 0);
1668                 return check_condition_result;
1669         }
1670         spf = !!(arr[off] & 0x40);
1671         pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1672                        (arr[off + 1] + 2);
1673         if ((pg_len + off) > param_len) {
1674                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1675                                 PARAMETER_LIST_LENGTH_ERR, 0);
1676                 return check_condition_result;
1677         }
1678         switch (mpage) {
1679         case 0x8:      /* Caching Mode page */
1680                 if (caching_pg[1] == arr[off + 1]) {
1681                         memcpy(caching_pg + 2, arr + off + 2,
1682                                sizeof(caching_pg) - 2);
1683                         goto set_mode_changed_ua;
1684                 }
1685                 break;
1686         case 0xa:      /* Control Mode page */
1687                 if (ctrl_m_pg[1] == arr[off + 1]) {
1688                         memcpy(ctrl_m_pg + 2, arr + off + 2,
1689                                sizeof(ctrl_m_pg) - 2);
1690                         scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1691                         goto set_mode_changed_ua;
1692                 }
1693                 break;
1694         case 0x1c:      /* Informational Exceptions Mode page */
1695                 if (iec_m_pg[1] == arr[off + 1]) {
1696                         memcpy(iec_m_pg + 2, arr + off + 2,
1697                                sizeof(iec_m_pg) - 2);
1698                         goto set_mode_changed_ua;
1699                 }
1700                 break;
1701         default:
1702                 break;
1703         }
1704         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1705                         INVALID_FIELD_IN_PARAM_LIST, 0);
1706         return check_condition_result;
1707 set_mode_changed_ua:
1708         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
1709         return 0;
1710 }
1711
1712 static int resp_temp_l_pg(unsigned char * arr)
1713 {
1714         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1715                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
1716                 };
1717
1718         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1719         return sizeof(temp_l_pg);
1720 }
1721
1722 static int resp_ie_l_pg(unsigned char * arr)
1723 {
1724         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1725                 };
1726
1727         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1728         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
1729                 arr[4] = THRESHOLD_EXCEEDED;
1730                 arr[5] = 0xff;
1731         }
1732         return sizeof(ie_l_pg);
1733 }
1734
1735 #define SDEBUG_MAX_LSENSE_SZ 512
1736
1737 static int resp_log_sense(struct scsi_cmnd * scp,
1738                           struct sdebug_dev_info * devip)
1739 {
1740         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1741         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1742         unsigned char *cmd = (unsigned char *)scp->cmnd;
1743
1744         errsts = check_readiness(scp, UAS_ONLY, devip);
1745         if (errsts)
1746                 return errsts;
1747         memset(arr, 0, sizeof(arr));
1748         ppc = cmd[1] & 0x2;
1749         sp = cmd[1] & 0x1;
1750         if (ppc || sp) {
1751                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1752                                 INVALID_FIELD_IN_CDB, 0);
1753                 return check_condition_result;
1754         }
1755         pcontrol = (cmd[2] & 0xc0) >> 6;
1756         pcode = cmd[2] & 0x3f;
1757         subpcode = cmd[3] & 0xff;
1758         alloc_len = (cmd[7] << 8) + cmd[8];
1759         arr[0] = pcode;
1760         if (0 == subpcode) {
1761                 switch (pcode) {
1762                 case 0x0:       /* Supported log pages log page */
1763                         n = 4;
1764                         arr[n++] = 0x0;         /* this page */
1765                         arr[n++] = 0xd;         /* Temperature */
1766                         arr[n++] = 0x2f;        /* Informational exceptions */
1767                         arr[3] = n - 4;
1768                         break;
1769                 case 0xd:       /* Temperature log page */
1770                         arr[3] = resp_temp_l_pg(arr + 4);
1771                         break;
1772                 case 0x2f:      /* Informational exceptions log page */
1773                         arr[3] = resp_ie_l_pg(arr + 4);
1774                         break;
1775                 default:
1776                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1777                                         INVALID_FIELD_IN_CDB, 0);
1778                         return check_condition_result;
1779                 }
1780         } else if (0xff == subpcode) {
1781                 arr[0] |= 0x40;
1782                 arr[1] = subpcode;
1783                 switch (pcode) {
1784                 case 0x0:       /* Supported log pages and subpages log page */
1785                         n = 4;
1786                         arr[n++] = 0x0;
1787                         arr[n++] = 0x0;         /* 0,0 page */
1788                         arr[n++] = 0x0;
1789                         arr[n++] = 0xff;        /* this page */
1790                         arr[n++] = 0xd;
1791                         arr[n++] = 0x0;         /* Temperature */
1792                         arr[n++] = 0x2f;
1793                         arr[n++] = 0x0; /* Informational exceptions */
1794                         arr[3] = n - 4;
1795                         break;
1796                 case 0xd:       /* Temperature subpages */
1797                         n = 4;
1798                         arr[n++] = 0xd;
1799                         arr[n++] = 0x0;         /* Temperature */
1800                         arr[3] = n - 4;
1801                         break;
1802                 case 0x2f:      /* Informational exceptions subpages */
1803                         n = 4;
1804                         arr[n++] = 0x2f;
1805                         arr[n++] = 0x0;         /* Informational exceptions */
1806                         arr[3] = n - 4;
1807                         break;
1808                 default:
1809                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1810                                         INVALID_FIELD_IN_CDB, 0);
1811                         return check_condition_result;
1812                 }
1813         } else {
1814                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1815                                 INVALID_FIELD_IN_CDB, 0);
1816                 return check_condition_result;
1817         }
1818         len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1819         return fill_from_dev_buffer(scp, arr,
1820                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
1821 }
1822
1823 static int check_device_access_params(struct scsi_cmnd *scp,
1824                                       unsigned long long lba, unsigned int num)
1825 {
1826         if (lba + num > sdebug_capacity) {
1827                 mk_sense_buffer(scp, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1828                 return check_condition_result;
1829         }
1830         /* transfer length excessive (tie in to block limits VPD page) */
1831         if (num > sdebug_store_sectors) {
1832                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1833                 return check_condition_result;
1834         }
1835         return 0;
1836 }
1837
1838 /* Returns number of bytes copied or -1 if error. */
1839 static int do_device_access(struct scsi_cmnd *scmd,
1840                             unsigned long long lba, unsigned int num, int write)
1841 {
1842         int ret;
1843         unsigned long long block, rest = 0;
1844         struct scsi_data_buffer *sdb;
1845         enum dma_data_direction dir;
1846         size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1847                        off_t);
1848
1849         if (write) {
1850                 sdb = scsi_out(scmd);
1851                 dir = DMA_TO_DEVICE;
1852                 func = sg_pcopy_to_buffer;
1853         } else {
1854                 sdb = scsi_in(scmd);
1855                 dir = DMA_FROM_DEVICE;
1856                 func = sg_pcopy_from_buffer;
1857         }
1858
1859         if (!sdb->length)
1860                 return 0;
1861         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1862                 return -1;
1863
1864         block = do_div(lba, sdebug_store_sectors);
1865         if (block + num > sdebug_store_sectors)
1866                 rest = block + num - sdebug_store_sectors;
1867
1868         ret = func(sdb->table.sgl, sdb->table.nents,
1869                    fake_storep + (block * scsi_debug_sector_size),
1870                    (num - rest) * scsi_debug_sector_size, 0);
1871         if (ret != (num - rest) * scsi_debug_sector_size)
1872                 return ret;
1873
1874         if (rest) {
1875                 ret += func(sdb->table.sgl, sdb->table.nents,
1876                             fake_storep, rest * scsi_debug_sector_size,
1877                             (num - rest) * scsi_debug_sector_size);
1878         }
1879
1880         return ret;
1881 }
1882
1883 static __be16 dif_compute_csum(const void *buf, int len)
1884 {
1885         __be16 csum;
1886
1887         if (scsi_debug_guard)
1888                 csum = (__force __be16)ip_compute_csum(buf, len);
1889         else
1890                 csum = cpu_to_be16(crc_t10dif(buf, len));
1891
1892         return csum;
1893 }
1894
1895 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1896                       sector_t sector, u32 ei_lba)
1897 {
1898         __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1899
1900         if (sdt->guard_tag != csum) {
1901                 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1902                         __func__,
1903                         (unsigned long)sector,
1904                         be16_to_cpu(sdt->guard_tag),
1905                         be16_to_cpu(csum));
1906                 return 0x01;
1907         }
1908         if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1909             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1910                 pr_err("%s: REF check failed on sector %lu\n",
1911                         __func__, (unsigned long)sector);
1912                 return 0x03;
1913         }
1914         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1915             be32_to_cpu(sdt->ref_tag) != ei_lba) {
1916                 pr_err("%s: REF check failed on sector %lu\n",
1917                         __func__, (unsigned long)sector);
1918                 return 0x03;
1919         }
1920         return 0;
1921 }
1922
1923 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1924                           unsigned int sectors, bool read)
1925 {
1926         size_t resid;
1927         void *paddr;
1928         const void *dif_store_end = dif_storep + sdebug_store_sectors;
1929         struct sg_mapping_iter miter;
1930
1931         /* Bytes of protection data to copy into sgl */
1932         resid = sectors * sizeof(*dif_storep);
1933
1934         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1935                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1936                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1937
1938         while (sg_miter_next(&miter) && resid > 0) {
1939                 size_t len = min(miter.length, resid);
1940                 void *start = dif_store(sector);
1941                 size_t rest = 0;
1942
1943                 if (dif_store_end < start + len)
1944                         rest = start + len - dif_store_end;
1945
1946                 paddr = miter.addr;
1947
1948                 if (read)
1949                         memcpy(paddr, start, len - rest);
1950                 else
1951                         memcpy(start, paddr, len - rest);
1952
1953                 if (rest) {
1954                         if (read)
1955                                 memcpy(paddr + len - rest, dif_storep, rest);
1956                         else
1957                                 memcpy(dif_storep, paddr + len - rest, rest);
1958                 }
1959
1960                 sector += len / sizeof(*dif_storep);
1961                 resid -= len;
1962         }
1963         sg_miter_stop(&miter);
1964 }
1965
1966 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1967                             unsigned int sectors, u32 ei_lba)
1968 {
1969         unsigned int i;
1970         struct sd_dif_tuple *sdt;
1971         sector_t sector;
1972
1973         for (i = 0; i < sectors; i++, ei_lba++) {
1974                 int ret;
1975
1976                 sector = start_sec + i;
1977                 sdt = dif_store(sector);
1978
1979                 if (sdt->app_tag == cpu_to_be16(0xffff))
1980                         continue;
1981
1982                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1983                 if (ret) {
1984                         dif_errors++;
1985                         return ret;
1986                 }
1987         }
1988
1989         dif_copy_prot(SCpnt, start_sec, sectors, true);
1990         dix_reads++;
1991
1992         return 0;
1993 }
1994
1995 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1996                      unsigned int num, u32 ei_lba)
1997 {
1998         unsigned long iflags;
1999         int ret;
2000
2001         ret = check_device_access_params(SCpnt, lba, num);
2002         if (ret)
2003                 return ret;
2004
2005         if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2006             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2007             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2008                 /* claim unrecoverable read error */
2009                 mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2010                 /* set info field and valid bit for fixed descriptor */
2011                 if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) {
2012                         SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */
2013                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2014                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2015                         SCpnt->sense_buffer[3] = (ret >> 24) & 0xff;
2016                         SCpnt->sense_buffer[4] = (ret >> 16) & 0xff;
2017                         SCpnt->sense_buffer[5] = (ret >> 8) & 0xff;
2018                         SCpnt->sense_buffer[6] = ret & 0xff;
2019                 }
2020                 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
2021                 return check_condition_result;
2022         }
2023
2024         read_lock_irqsave(&atomic_rw, iflags);
2025
2026         /* DIX + T10 DIF */
2027         if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2028                 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
2029
2030                 if (prot_ret) {
2031                         read_unlock_irqrestore(&atomic_rw, iflags);
2032                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret);
2033                         return illegal_condition_result;
2034                 }
2035         }
2036
2037         ret = do_device_access(SCpnt, lba, num, 0);
2038         read_unlock_irqrestore(&atomic_rw, iflags);
2039         if (ret == -1)
2040                 return DID_ERROR << 16;
2041
2042         scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
2043
2044         return 0;
2045 }
2046
2047 void dump_sector(unsigned char *buf, int len)
2048 {
2049         int i, j, n;
2050
2051         pr_err(">>> Sector Dump <<<\n");
2052         for (i = 0 ; i < len ; i += 16) {
2053                 char b[128];
2054
2055                 for (j = 0, n = 0; j < 16; j++) {
2056                         unsigned char c = buf[i+j];
2057
2058                         if (c >= 0x20 && c < 0x7e)
2059                                 n += scnprintf(b + n, sizeof(b) - n,
2060                                                " %c ", buf[i+j]);
2061                         else
2062                                 n += scnprintf(b + n, sizeof(b) - n,
2063                                                "%02x ", buf[i+j]);
2064                 }
2065                 pr_err("%04d: %s\n", i, b);
2066         }
2067 }
2068
2069 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2070                              unsigned int sectors, u32 ei_lba)
2071 {
2072         int ret;
2073         struct sd_dif_tuple *sdt;
2074         void *daddr;
2075         sector_t sector = start_sec;
2076         int ppage_offset;
2077         int dpage_offset;
2078         struct sg_mapping_iter diter;
2079         struct sg_mapping_iter piter;
2080
2081         BUG_ON(scsi_sg_count(SCpnt) == 0);
2082         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2083
2084         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2085                         scsi_prot_sg_count(SCpnt),
2086                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2087         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2088                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2089
2090         /* For each protection page */
2091         while (sg_miter_next(&piter)) {
2092                 dpage_offset = 0;
2093                 if (WARN_ON(!sg_miter_next(&diter))) {
2094                         ret = 0x01;
2095                         goto out;
2096                 }
2097
2098                 for (ppage_offset = 0; ppage_offset < piter.length;
2099                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2100                         /* If we're at the end of the current
2101                          * data page advance to the next one
2102                          */
2103                         if (dpage_offset >= diter.length) {
2104                                 if (WARN_ON(!sg_miter_next(&diter))) {
2105                                         ret = 0x01;
2106                                         goto out;
2107                                 }
2108                                 dpage_offset = 0;
2109                         }
2110
2111                         sdt = piter.addr + ppage_offset;
2112                         daddr = diter.addr + dpage_offset;
2113
2114                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2115                         if (ret) {
2116                                 dump_sector(daddr, scsi_debug_sector_size);
2117                                 goto out;
2118                         }
2119
2120                         sector++;
2121                         ei_lba++;
2122                         dpage_offset += scsi_debug_sector_size;
2123                 }
2124                 diter.consumed = dpage_offset;
2125                 sg_miter_stop(&diter);
2126         }
2127         sg_miter_stop(&piter);
2128
2129         dif_copy_prot(SCpnt, start_sec, sectors, false);
2130         dix_writes++;
2131
2132         return 0;
2133
2134 out:
2135         dif_errors++;
2136         sg_miter_stop(&diter);
2137         sg_miter_stop(&piter);
2138         return ret;
2139 }
2140
2141 static unsigned long lba_to_map_index(sector_t lba)
2142 {
2143         if (scsi_debug_unmap_alignment) {
2144                 lba += scsi_debug_unmap_granularity -
2145                         scsi_debug_unmap_alignment;
2146         }
2147         do_div(lba, scsi_debug_unmap_granularity);
2148
2149         return lba;
2150 }
2151
2152 static sector_t map_index_to_lba(unsigned long index)
2153 {
2154         sector_t lba = index * scsi_debug_unmap_granularity;
2155
2156         if (scsi_debug_unmap_alignment) {
2157                 lba -= scsi_debug_unmap_granularity -
2158                         scsi_debug_unmap_alignment;
2159         }
2160
2161         return lba;
2162 }
2163
2164 static unsigned int map_state(sector_t lba, unsigned int *num)
2165 {
2166         sector_t end;
2167         unsigned int mapped;
2168         unsigned long index;
2169         unsigned long next;
2170
2171         index = lba_to_map_index(lba);
2172         mapped = test_bit(index, map_storep);
2173
2174         if (mapped)
2175                 next = find_next_zero_bit(map_storep, map_size, index);
2176         else
2177                 next = find_next_bit(map_storep, map_size, index);
2178
2179         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2180         *num = end - lba;
2181
2182         return mapped;
2183 }
2184
2185 static void map_region(sector_t lba, unsigned int len)
2186 {
2187         sector_t end = lba + len;
2188
2189         while (lba < end) {
2190                 unsigned long index = lba_to_map_index(lba);
2191
2192                 if (index < map_size)
2193                         set_bit(index, map_storep);
2194
2195                 lba = map_index_to_lba(index + 1);
2196         }
2197 }
2198
2199 static void unmap_region(sector_t lba, unsigned int len)
2200 {
2201         sector_t end = lba + len;
2202
2203         while (lba < end) {
2204                 unsigned long index = lba_to_map_index(lba);
2205
2206                 if (lba == map_index_to_lba(index) &&
2207                     lba + scsi_debug_unmap_granularity <= end &&
2208                     index < map_size) {
2209                         clear_bit(index, map_storep);
2210                         if (scsi_debug_lbprz) {
2211                                 memset(fake_storep +
2212                                        lba * scsi_debug_sector_size, 0,
2213                                        scsi_debug_sector_size *
2214                                        scsi_debug_unmap_granularity);
2215                         }
2216                         if (dif_storep) {
2217                                 memset(dif_storep + lba, 0xff,
2218                                        sizeof(*dif_storep) *
2219                                        scsi_debug_unmap_granularity);
2220                         }
2221                 }
2222                 lba = map_index_to_lba(index + 1);
2223         }
2224 }
2225
2226 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2227                       unsigned int num, u32 ei_lba)
2228 {
2229         unsigned long iflags;
2230         int ret;
2231
2232         ret = check_device_access_params(SCpnt, lba, num);
2233         if (ret)
2234                 return ret;
2235
2236         write_lock_irqsave(&atomic_rw, iflags);
2237
2238         /* DIX + T10 DIF */
2239         if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2240                 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2241
2242                 if (prot_ret) {
2243                         write_unlock_irqrestore(&atomic_rw, iflags);
2244                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10,
2245                                         prot_ret);
2246                         return illegal_condition_result;
2247                 }
2248         }
2249
2250         ret = do_device_access(SCpnt, lba, num, 1);
2251         if (scsi_debug_lbp())
2252                 map_region(lba, num);
2253         write_unlock_irqrestore(&atomic_rw, iflags);
2254         if (-1 == ret)
2255                 return (DID_ERROR << 16);
2256         else if ((ret < (num * scsi_debug_sector_size)) &&
2257                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2258                 sdev_printk(KERN_INFO, SCpnt->device,
2259                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2260                             my_name, num * scsi_debug_sector_size, ret);
2261
2262         return 0;
2263 }
2264
2265 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2266                       unsigned int num, u32 ei_lba, unsigned int unmap)
2267 {
2268         unsigned long iflags;
2269         unsigned long long i;
2270         int ret;
2271
2272         ret = check_device_access_params(scmd, lba, num);
2273         if (ret)
2274                 return ret;
2275
2276         if (num > scsi_debug_write_same_length) {
2277                 mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2278                                 0);
2279                 return check_condition_result;
2280         }
2281
2282         write_lock_irqsave(&atomic_rw, iflags);
2283
2284         if (unmap && scsi_debug_lbp()) {
2285                 unmap_region(lba, num);
2286                 goto out;
2287         }
2288
2289         /* Else fetch one logical block */
2290         ret = fetch_to_dev_buffer(scmd,
2291                                   fake_storep + (lba * scsi_debug_sector_size),
2292                                   scsi_debug_sector_size);
2293
2294         if (-1 == ret) {
2295                 write_unlock_irqrestore(&atomic_rw, iflags);
2296                 return (DID_ERROR << 16);
2297         } else if ((ret < (num * scsi_debug_sector_size)) &&
2298                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2299                 sdev_printk(KERN_INFO, scmd->device,
2300                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2301                             my_name, "write same",
2302                             num * scsi_debug_sector_size, ret);
2303
2304         /* Copy first sector to remaining blocks */
2305         for (i = 1 ; i < num ; i++)
2306                 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2307                        fake_storep + (lba * scsi_debug_sector_size),
2308                        scsi_debug_sector_size);
2309
2310         if (scsi_debug_lbp())
2311                 map_region(lba, num);
2312 out:
2313         write_unlock_irqrestore(&atomic_rw, iflags);
2314
2315         return 0;
2316 }
2317
2318 struct unmap_block_desc {
2319         __be64  lba;
2320         __be32  blocks;
2321         __be32  __reserved;
2322 };
2323
2324 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2325 {
2326         unsigned char *buf;
2327         struct unmap_block_desc *desc;
2328         unsigned int i, payload_len, descriptors;
2329         int ret;
2330         unsigned long iflags;
2331
2332         ret = check_readiness(scmd, UAS_ONLY, devip);
2333         if (ret)
2334                 return ret;
2335
2336         payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2337         BUG_ON(scsi_bufflen(scmd) != payload_len);
2338
2339         descriptors = (payload_len - 8) / 16;
2340
2341         buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2342         if (!buf)
2343                 return check_condition_result;
2344
2345         scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2346
2347         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2348         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2349
2350         desc = (void *)&buf[8];
2351
2352         write_lock_irqsave(&atomic_rw, iflags);
2353
2354         for (i = 0 ; i < descriptors ; i++) {
2355                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2356                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2357
2358                 ret = check_device_access_params(scmd, lba, num);
2359                 if (ret)
2360                         goto out;
2361
2362                 unmap_region(lba, num);
2363         }
2364
2365         ret = 0;
2366
2367 out:
2368         write_unlock_irqrestore(&atomic_rw, iflags);
2369         kfree(buf);
2370
2371         return ret;
2372 }
2373
2374 #define SDEBUG_GET_LBA_STATUS_LEN 32
2375
2376 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2377                                struct sdebug_dev_info * devip)
2378 {
2379         unsigned long long lba;
2380         unsigned int alloc_len, mapped, num;
2381         unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2382         int ret;
2383
2384         ret = check_readiness(scmd, UAS_ONLY, devip);
2385         if (ret)
2386                 return ret;
2387
2388         lba = get_unaligned_be64(&scmd->cmnd[2]);
2389         alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2390
2391         if (alloc_len < 24)
2392                 return 0;
2393
2394         ret = check_device_access_params(scmd, lba, 1);
2395         if (ret)
2396                 return ret;
2397
2398         mapped = map_state(lba, &num);
2399
2400         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2401         put_unaligned_be32(20, &arr[0]);        /* Parameter Data Length */
2402         put_unaligned_be64(lba, &arr[8]);       /* LBA */
2403         put_unaligned_be32(num, &arr[16]);      /* Number of blocks */
2404         arr[20] = !mapped;                      /* mapped = 0, unmapped = 1 */
2405
2406         return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2407 }
2408
2409 #define SDEBUG_RLUN_ARR_SZ 256
2410
2411 static int resp_report_luns(struct scsi_cmnd * scp,
2412                             struct sdebug_dev_info * devip)
2413 {
2414         unsigned int alloc_len;
2415         int lun_cnt, i, upper, num, n;
2416         u64 wlun, lun;
2417         unsigned char *cmd = (unsigned char *)scp->cmnd;
2418         int select_report = (int)cmd[2];
2419         struct scsi_lun *one_lun;
2420         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2421         unsigned char * max_addr;
2422
2423         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2424         if ((alloc_len < 4) || (select_report > 2)) {
2425                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2426                                 0);
2427                 return check_condition_result;
2428         }
2429         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2430         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2431         lun_cnt = scsi_debug_max_luns;
2432         if (1 == select_report)
2433                 lun_cnt = 0;
2434         else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2435                 --lun_cnt;
2436         wlun = (select_report > 0) ? 1 : 0;
2437         num = lun_cnt + wlun;
2438         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2439         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2440         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2441                             sizeof(struct scsi_lun)), num);
2442         if (n < num) {
2443                 wlun = 0;
2444                 lun_cnt = n;
2445         }
2446         one_lun = (struct scsi_lun *) &arr[8];
2447         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2448         for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2449              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2450              i++, lun++) {
2451                 upper = (lun >> 8) & 0x3f;
2452                 if (upper)
2453                         one_lun[i].scsi_lun[0] =
2454                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2455                 one_lun[i].scsi_lun[1] = lun & 0xff;
2456         }
2457         if (wlun) {
2458                 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2459                 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2460                 i++;
2461         }
2462         alloc_len = (unsigned char *)(one_lun + i) - arr;
2463         return fill_from_dev_buffer(scp, arr,
2464                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2465 }
2466
2467 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2468                             unsigned int num, struct sdebug_dev_info *devip)
2469 {
2470         int j;
2471         unsigned char *kaddr, *buf;
2472         unsigned int offset;
2473         struct scsi_data_buffer *sdb = scsi_in(scp);
2474         struct sg_mapping_iter miter;
2475
2476         /* better not to use temporary buffer. */
2477         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2478         if (!buf) {
2479                 mk_sense_buffer(scp, NOT_READY,
2480                                 LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
2481                 return check_condition_result;
2482         }
2483
2484         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2485
2486         offset = 0;
2487         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2488                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
2489
2490         while (sg_miter_next(&miter)) {
2491                 kaddr = miter.addr;
2492                 for (j = 0; j < miter.length; j++)
2493                         *(kaddr + j) ^= *(buf + offset + j);
2494
2495                 offset += miter.length;
2496         }
2497         sg_miter_stop(&miter);
2498         kfree(buf);
2499
2500         return 0;
2501 }
2502
2503 /* When timer or tasklet goes off this function is called. */
2504 static void sdebug_q_cmd_complete(unsigned long indx)
2505 {
2506         int qa_indx;
2507         int retiring = 0;
2508         unsigned long iflags;
2509         struct sdebug_queued_cmd *sqcp;
2510         struct scsi_cmnd *scp;
2511         struct sdebug_dev_info *devip;
2512
2513         atomic_inc(&sdebug_completions);
2514         qa_indx = indx;
2515         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2516                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2517                 return;
2518         }
2519         spin_lock_irqsave(&queued_arr_lock, iflags);
2520         sqcp = &queued_arr[qa_indx];
2521         scp = sqcp->a_cmnd;
2522         if (NULL == scp) {
2523                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2524                 pr_err("%s: scp is NULL\n", __func__);
2525                 return;
2526         }
2527         devip = (struct sdebug_dev_info *)scp->device->hostdata;
2528         if (devip)
2529                 atomic_dec(&devip->num_in_q);
2530         else
2531                 pr_err("%s: devip=NULL\n", __func__);
2532         if (atomic_read(&retired_max_queue) > 0)
2533                 retiring = 1;
2534
2535         sqcp->a_cmnd = NULL;
2536         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2537                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2538                 pr_err("%s: Unexpected completion\n", __func__);
2539                 return;
2540         }
2541
2542         if (unlikely(retiring)) {       /* user has reduced max_queue */
2543                 int k, retval;
2544
2545                 retval = atomic_read(&retired_max_queue);
2546                 if (qa_indx >= retval) {
2547                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2548                         pr_err("%s: index %d too large\n", __func__, retval);
2549                         return;
2550                 }
2551                 k = find_last_bit(queued_in_use_bm, retval);
2552                 if ((k < scsi_debug_max_queue) || (k == retval))
2553                         atomic_set(&retired_max_queue, 0);
2554                 else
2555                         atomic_set(&retired_max_queue, k + 1);
2556         }
2557         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2558         scp->scsi_done(scp); /* callback to mid level */
2559 }
2560
2561 /* When high resolution timer goes off this function is called. */
2562 static enum hrtimer_restart
2563 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
2564 {
2565         int qa_indx;
2566         int retiring = 0;
2567         unsigned long iflags;
2568         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
2569         struct sdebug_queued_cmd *sqcp;
2570         struct scsi_cmnd *scp;
2571         struct sdebug_dev_info *devip;
2572
2573         atomic_inc(&sdebug_completions);
2574         qa_indx = sd_hrtp->qa_indx;
2575         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2576                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2577                 goto the_end;
2578         }
2579         spin_lock_irqsave(&queued_arr_lock, iflags);
2580         sqcp = &queued_arr[qa_indx];
2581         scp = sqcp->a_cmnd;
2582         if (NULL == scp) {
2583                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2584                 pr_err("%s: scp is NULL\n", __func__);
2585                 goto the_end;
2586         }
2587         devip = (struct sdebug_dev_info *)scp->device->hostdata;
2588         if (devip)
2589                 atomic_dec(&devip->num_in_q);
2590         else
2591                 pr_err("%s: devip=NULL\n", __func__);
2592         if (atomic_read(&retired_max_queue) > 0)
2593                 retiring = 1;
2594
2595         sqcp->a_cmnd = NULL;
2596         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2597                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2598                 pr_err("%s: Unexpected completion\n", __func__);
2599                 goto the_end;
2600         }
2601
2602         if (unlikely(retiring)) {       /* user has reduced max_queue */
2603                 int k, retval;
2604
2605                 retval = atomic_read(&retired_max_queue);
2606                 if (qa_indx >= retval) {
2607                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2608                         pr_err("%s: index %d too large\n", __func__, retval);
2609                         goto the_end;
2610                 }
2611                 k = find_last_bit(queued_in_use_bm, retval);
2612                 if ((k < scsi_debug_max_queue) || (k == retval))
2613                         atomic_set(&retired_max_queue, 0);
2614                 else
2615                         atomic_set(&retired_max_queue, k + 1);
2616         }
2617         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2618         scp->scsi_done(scp); /* callback to mid level */
2619 the_end:
2620         return HRTIMER_NORESTART;
2621 }
2622
2623 static struct sdebug_dev_info *
2624 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2625 {
2626         struct sdebug_dev_info *devip;
2627
2628         devip = kzalloc(sizeof(*devip), flags);
2629         if (devip) {
2630                 devip->sdbg_host = sdbg_host;
2631                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2632         }
2633         return devip;
2634 }
2635
2636 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2637 {
2638         struct sdebug_host_info * sdbg_host;
2639         struct sdebug_dev_info * open_devip = NULL;
2640         struct sdebug_dev_info * devip =
2641                         (struct sdebug_dev_info *)sdev->hostdata;
2642
2643         if (devip)
2644                 return devip;
2645         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2646         if (!sdbg_host) {
2647                 pr_err("%s: Host info NULL\n", __func__);
2648                 return NULL;
2649         }
2650         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2651                 if ((devip->used) && (devip->channel == sdev->channel) &&
2652                     (devip->target == sdev->id) &&
2653                     (devip->lun == sdev->lun))
2654                         return devip;
2655                 else {
2656                         if ((!devip->used) && (!open_devip))
2657                                 open_devip = devip;
2658                 }
2659         }
2660         if (!open_devip) { /* try and make a new one */
2661                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2662                 if (!open_devip) {
2663                         printk(KERN_ERR "%s: out of memory at line %d\n",
2664                                 __func__, __LINE__);
2665                         return NULL;
2666                 }
2667         }
2668
2669         open_devip->channel = sdev->channel;
2670         open_devip->target = sdev->id;
2671         open_devip->lun = sdev->lun;
2672         open_devip->sdbg_host = sdbg_host;
2673         atomic_set(&open_devip->num_in_q, 0);
2674         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
2675         open_devip->used = 1;
2676         if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2677                 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2678
2679         return open_devip;
2680 }
2681
2682 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2683 {
2684         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2685                 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
2686                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2687         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2688         return 0;
2689 }
2690
2691 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2692 {
2693         struct sdebug_dev_info *devip;
2694
2695         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2696                 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
2697                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2698         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2699                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2700         devip = devInfoReg(sdp);
2701         if (NULL == devip)
2702                 return 1;       /* no resources, will be marked offline */
2703         sdp->hostdata = devip;
2704         sdp->tagged_supported = 1;
2705         if (sdp->host->cmd_per_lun)
2706                 scsi_adjust_queue_depth(sdp, DEF_TAGGED_QUEUING,
2707                                         DEF_CMD_PER_LUN);
2708         blk_queue_max_segment_size(sdp->request_queue, -1U);
2709         if (scsi_debug_no_uld)
2710                 sdp->no_uld_attach = 1;
2711         return 0;
2712 }
2713
2714 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2715 {
2716         struct sdebug_dev_info *devip =
2717                 (struct sdebug_dev_info *)sdp->hostdata;
2718
2719         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2720                 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
2721                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2722         if (devip) {
2723                 /* make this slot available for re-use */
2724                 devip->used = 0;
2725                 sdp->hostdata = NULL;
2726         }
2727 }
2728
2729 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
2730 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2731 {
2732         unsigned long iflags;
2733         int k, qmax, r_qmax;
2734         struct sdebug_queued_cmd *sqcp;
2735         struct sdebug_dev_info *devip;
2736
2737         spin_lock_irqsave(&queued_arr_lock, iflags);
2738         qmax = scsi_debug_max_queue;
2739         r_qmax = atomic_read(&retired_max_queue);
2740         if (r_qmax > qmax)
2741                 qmax = r_qmax;
2742         for (k = 0; k < qmax; ++k) {
2743                 if (test_bit(k, queued_in_use_bm)) {
2744                         sqcp = &queued_arr[k];
2745                         if (cmnd == sqcp->a_cmnd) {
2746                                 if (scsi_debug_ndelay > 0) {
2747                                         if (sqcp->sd_hrtp)
2748                                                 hrtimer_cancel(
2749                                                         &sqcp->sd_hrtp->hrt);
2750                                 } else if (scsi_debug_delay > 0) {
2751                                         if (sqcp->cmnd_timerp)
2752                                                 del_timer_sync(
2753                                                         sqcp->cmnd_timerp);
2754                                 } else if (scsi_debug_delay < 0) {
2755                                         if (sqcp->tletp)
2756                                                 tasklet_kill(sqcp->tletp);
2757                                 }
2758                                 __clear_bit(k, queued_in_use_bm);
2759                                 devip = (struct sdebug_dev_info *)
2760                                         cmnd->device->hostdata;
2761                                 if (devip)
2762                                         atomic_dec(&devip->num_in_q);
2763                                 sqcp->a_cmnd = NULL;
2764                                 break;
2765                         }
2766                 }
2767         }
2768         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2769         return (k < qmax) ? 1 : 0;
2770 }
2771
2772 /* Deletes (stops) timers or tasklets of all queued commands */
2773 static void stop_all_queued(void)
2774 {
2775         unsigned long iflags;
2776         int k;
2777         struct sdebug_queued_cmd *sqcp;
2778         struct sdebug_dev_info *devip;
2779
2780         spin_lock_irqsave(&queued_arr_lock, iflags);
2781         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2782                 if (test_bit(k, queued_in_use_bm)) {
2783                         sqcp = &queued_arr[k];
2784                         if (sqcp->a_cmnd) {
2785                                 if (scsi_debug_ndelay > 0) {
2786                                         if (sqcp->sd_hrtp)
2787                                                 hrtimer_cancel(
2788                                                         &sqcp->sd_hrtp->hrt);
2789                                 } else if (scsi_debug_delay > 0) {
2790                                         if (sqcp->cmnd_timerp)
2791                                                 del_timer_sync(
2792                                                         sqcp->cmnd_timerp);
2793                                 } else if (scsi_debug_delay < 0) {
2794                                         if (sqcp->tletp)
2795                                                 tasklet_kill(sqcp->tletp);
2796                                 }
2797                                 __clear_bit(k, queued_in_use_bm);
2798                                 devip = (struct sdebug_dev_info *)
2799                                         sqcp->a_cmnd->device->hostdata;
2800                                 if (devip)
2801                                         atomic_dec(&devip->num_in_q);
2802                                 sqcp->a_cmnd = NULL;
2803                         }
2804                 }
2805         }
2806         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2807 }
2808
2809 /* Free queued command memory on heap */
2810 static void free_all_queued(void)
2811 {
2812         unsigned long iflags;
2813         int k;
2814         struct sdebug_queued_cmd *sqcp;
2815
2816         spin_lock_irqsave(&queued_arr_lock, iflags);
2817         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2818                 sqcp = &queued_arr[k];
2819                 kfree(sqcp->cmnd_timerp);
2820                 sqcp->cmnd_timerp = NULL;
2821                 kfree(sqcp->tletp);
2822                 sqcp->tletp = NULL;
2823                 kfree(sqcp->sd_hrtp);
2824                 sqcp->sd_hrtp = NULL;
2825         }
2826         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2827 }
2828
2829 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
2830 {
2831         ++num_aborts;
2832         if (SCpnt) {
2833                 if (SCpnt->device &&
2834                     (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2835                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
2836                                     __func__);
2837                 stop_queued_cmnd(SCpnt);
2838         }
2839         return SUCCESS;
2840 }
2841
2842 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2843 {
2844         struct sdebug_dev_info * devip;
2845
2846         ++num_dev_resets;
2847         if (SCpnt && SCpnt->device) {
2848                 struct scsi_device *sdp = SCpnt->device;
2849
2850                 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2851                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2852                 devip = devInfoReg(sdp);
2853                 if (devip)
2854                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
2855         }
2856         return SUCCESS;
2857 }
2858
2859 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
2860 {
2861         struct sdebug_host_info *sdbg_host;
2862         struct sdebug_dev_info *devip;
2863         struct scsi_device *sdp;
2864         struct Scsi_Host *hp;
2865         int k = 0;
2866
2867         ++num_target_resets;
2868         if (!SCpnt)
2869                 goto lie;
2870         sdp = SCpnt->device;
2871         if (!sdp)
2872                 goto lie;
2873         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2874                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2875         hp = sdp->host;
2876         if (!hp)
2877                 goto lie;
2878         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2879         if (sdbg_host) {
2880                 list_for_each_entry(devip,
2881                                     &sdbg_host->dev_info_list,
2882                                     dev_list)
2883                         if (devip->target == sdp->id) {
2884                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2885                                 ++k;
2886                         }
2887         }
2888         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2889                 sdev_printk(KERN_INFO, sdp,
2890                             "%s: %d device(s) found in target\n", __func__, k);
2891 lie:
2892         return SUCCESS;
2893 }
2894
2895 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2896 {
2897         struct sdebug_host_info *sdbg_host;
2898         struct sdebug_dev_info *devip;
2899         struct scsi_device * sdp;
2900         struct Scsi_Host * hp;
2901         int k = 0;
2902
2903         ++num_bus_resets;
2904         if (!(SCpnt && SCpnt->device))
2905                 goto lie;
2906         sdp = SCpnt->device;
2907         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2908                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2909         hp = sdp->host;
2910         if (hp) {
2911                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2912                 if (sdbg_host) {
2913                         list_for_each_entry(devip,
2914                                             &sdbg_host->dev_info_list,
2915                                             dev_list) {
2916                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2917                                 ++k;
2918                         }
2919                 }
2920         }
2921         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2922                 sdev_printk(KERN_INFO, sdp,
2923                             "%s: %d device(s) found in host\n", __func__, k);
2924 lie:
2925         return SUCCESS;
2926 }
2927
2928 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2929 {
2930         struct sdebug_host_info * sdbg_host;
2931         struct sdebug_dev_info *devip;
2932         int k = 0;
2933
2934         ++num_host_resets;
2935         if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2936                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
2937         spin_lock(&sdebug_host_list_lock);
2938         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2939                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
2940                                     dev_list) {
2941                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2942                         ++k;
2943                 }
2944         }
2945         spin_unlock(&sdebug_host_list_lock);
2946         stop_all_queued();
2947         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2948                 sdev_printk(KERN_INFO, SCpnt->device,
2949                             "%s: %d device(s) found\n", __func__, k);
2950         return SUCCESS;
2951 }
2952
2953 static void __init sdebug_build_parts(unsigned char *ramp,
2954                                       unsigned long store_size)
2955 {
2956         struct partition * pp;
2957         int starts[SDEBUG_MAX_PARTS + 2];
2958         int sectors_per_part, num_sectors, k;
2959         int heads_by_sects, start_sec, end_sec;
2960
2961         /* assume partition table already zeroed */
2962         if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2963                 return;
2964         if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2965                 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2966                 pr_warn("%s: reducing partitions to %d\n", __func__,
2967                         SDEBUG_MAX_PARTS);
2968         }
2969         num_sectors = (int)sdebug_store_sectors;
2970         sectors_per_part = (num_sectors - sdebug_sectors_per)
2971                            / scsi_debug_num_parts;
2972         heads_by_sects = sdebug_heads * sdebug_sectors_per;
2973         starts[0] = sdebug_sectors_per;
2974         for (k = 1; k < scsi_debug_num_parts; ++k)
2975                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2976                             * heads_by_sects;
2977         starts[scsi_debug_num_parts] = num_sectors;
2978         starts[scsi_debug_num_parts + 1] = 0;
2979
2980         ramp[510] = 0x55;       /* magic partition markings */
2981         ramp[511] = 0xAA;
2982         pp = (struct partition *)(ramp + 0x1be);
2983         for (k = 0; starts[k + 1]; ++k, ++pp) {
2984                 start_sec = starts[k];
2985                 end_sec = starts[k + 1] - 1;
2986                 pp->boot_ind = 0;
2987
2988                 pp->cyl = start_sec / heads_by_sects;
2989                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2990                            / sdebug_sectors_per;
2991                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2992
2993                 pp->end_cyl = end_sec / heads_by_sects;
2994                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2995                                / sdebug_sectors_per;
2996                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2997
2998                 pp->start_sect = cpu_to_le32(start_sec);
2999                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3000                 pp->sys_ind = 0x83;     /* plain Linux partition */
3001         }
3002 }
3003
3004 static int
3005 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3006               int scsi_result, int delta_jiff)
3007 {
3008         unsigned long iflags;
3009         int k, num_in_q, tsf, qdepth, inject;
3010         struct sdebug_queued_cmd *sqcp = NULL;
3011         struct scsi_device *sdp = cmnd->device;
3012
3013         if (NULL == cmnd || NULL == devip) {
3014                 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3015                         __func__);
3016                 /* no particularly good error to report back */
3017                 return SCSI_MLQUEUE_HOST_BUSY;
3018         }
3019         if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3020                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3021                             __func__, scsi_result);
3022         if (delta_jiff == 0) {
3023                 /* using same thread to call back mid-layer */
3024                 cmnd->result = scsi_result;
3025                 cmnd->scsi_done(cmnd);
3026                 return 0;
3027         }
3028
3029         /* deferred response cases */
3030         spin_lock_irqsave(&queued_arr_lock, iflags);
3031         num_in_q = atomic_read(&devip->num_in_q);
3032         qdepth = cmnd->device->queue_depth;
3033         k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3034         tsf = 0;
3035         inject = 0;
3036         if ((qdepth > 0) && (num_in_q >= qdepth))
3037                 tsf = 1;
3038         else if ((scsi_debug_every_nth != 0) &&
3039                  (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts)) {
3040                 if ((num_in_q == (qdepth - 1)) &&
3041                     (atomic_inc_return(&sdebug_a_tsf) >=
3042                      abs(scsi_debug_every_nth))) {
3043                         atomic_set(&sdebug_a_tsf, 0);
3044                         inject = 1;
3045                         tsf = 1;
3046                 }
3047         }
3048
3049         /* if (tsf) simulate device reporting SCSI status of TASK SET FULL.
3050          * Might override existing CHECK CONDITION. */
3051         if (tsf)
3052                 scsi_result = device_qfull_result;
3053         if (k >= scsi_debug_max_queue) {
3054                 if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3055                         tsf = 1;
3056                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3057                 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3058                         sdev_printk(KERN_INFO, sdp,
3059                                     "%s: num_in_q=%d, bypass q, %s%s\n",
3060                                     __func__, num_in_q,
3061                                     (inject ? "<inject> " : ""),
3062                                     (tsf ?  "status: TASK SET FULL" :
3063                                             "report: host busy"));
3064                 if (tsf) {
3065                         /* queued_arr full so respond in same thread */
3066                         cmnd->result = scsi_result;
3067                         cmnd->scsi_done(cmnd);
3068                         /* As scsi_done() is called "inline" must return 0 */
3069                         return 0;
3070                 } else
3071                         return SCSI_MLQUEUE_HOST_BUSY;
3072         }
3073         __set_bit(k, queued_in_use_bm);
3074         atomic_inc(&devip->num_in_q);
3075         sqcp = &queued_arr[k];
3076         sqcp->a_cmnd = cmnd;
3077         cmnd->result = scsi_result;
3078         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3079         if (delta_jiff > 0) {
3080                 if (NULL == sqcp->cmnd_timerp) {
3081                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3082                                                     GFP_ATOMIC);
3083                         if (NULL == sqcp->cmnd_timerp)
3084                                 return SCSI_MLQUEUE_HOST_BUSY;
3085                         init_timer(sqcp->cmnd_timerp);
3086                 }
3087                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3088                 sqcp->cmnd_timerp->data = k;
3089                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3090                 add_timer(sqcp->cmnd_timerp);
3091         } else if (scsi_debug_ndelay > 0) {
3092                 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3093                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3094
3095                 if (NULL == sd_hp) {
3096                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3097                         if (NULL == sd_hp)
3098                                 return SCSI_MLQUEUE_HOST_BUSY;
3099                         sqcp->sd_hrtp = sd_hp;
3100                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3101                                      HRTIMER_MODE_REL);
3102                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3103                         sd_hp->qa_indx = k;
3104                 }
3105                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3106         } else {        /* delay < 0 */
3107                 if (NULL == sqcp->tletp) {
3108                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3109                                               GFP_ATOMIC);
3110                         if (NULL == sqcp->tletp)
3111                                 return SCSI_MLQUEUE_HOST_BUSY;
3112                         tasklet_init(sqcp->tletp,
3113                                      sdebug_q_cmd_complete, k);
3114                 }
3115                 if (-1 == delta_jiff)
3116                         tasklet_hi_schedule(sqcp->tletp);
3117                 else
3118                         tasklet_schedule(sqcp->tletp);
3119         }
3120         if (tsf && (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts))
3121                 sdev_printk(KERN_INFO, sdp,
3122                             "%s: num_in_q=%d +1, %s%s\n", __func__,
3123                             num_in_q, (inject ? "<inject> " : ""),
3124                             "status: TASK SET FULL");
3125         return 0;
3126 }
3127
3128 /* Note: The following macros create attribute files in the
3129    /sys/module/scsi_debug/parameters directory. Unfortunately this
3130    driver is unaware of a change and cannot trigger auxiliary actions
3131    as it can when the corresponding attribute in the
3132    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3133  */
3134 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3135 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3136 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3137 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3138 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3139 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3140 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3141 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3142 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3143 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3144 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3145 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3146 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3147 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3148 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3149 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3150 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3151 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3152 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3153 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3154 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3155 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3156 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3157 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3158 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3159 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3160 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3161 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3162 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3163 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3164 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3165 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3166 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3167 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
3168 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
3169 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
3170 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
3171                    S_IRUGO | S_IWUSR);
3172 module_param_named(write_same_length, scsi_debug_write_same_length, int,
3173                    S_IRUGO | S_IWUSR);
3174
3175 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
3176 MODULE_DESCRIPTION("SCSI debug adapter driver");
3177 MODULE_LICENSE("GPL");
3178 MODULE_VERSION(SCSI_DEBUG_VERSION);
3179
3180 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3181 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3182 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3183 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3184 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
3185 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3186 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3187 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
3188 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
3189 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
3190 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
3191 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
3192 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
3193 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
3194 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
3195 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
3196 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
3197 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
3198 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
3199 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
3200 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
3201 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
3202 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
3203 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
3204 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
3205 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
3206 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
3207 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3208 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3209 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
3210 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
3211 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3212 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3213 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3214 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3215 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
3216 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3217 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3218
3219 static char sdebug_info[256];
3220
3221 static const char * scsi_debug_info(struct Scsi_Host * shp)
3222 {
3223         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
3224                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
3225                 scsi_debug_version_date, scsi_debug_dev_size_mb,
3226                 scsi_debug_opts);
3227         return sdebug_info;
3228 }
3229
3230 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
3231 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
3232 {
3233         char arr[16];
3234         int opts;
3235         int minLen = length > 15 ? 15 : length;
3236
3237         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3238                 return -EACCES;
3239         memcpy(arr, buffer, minLen);
3240         arr[minLen] = '\0';
3241         if (1 != sscanf(arr, "%d", &opts))
3242                 return -EINVAL;
3243         scsi_debug_opts = opts;
3244         if (scsi_debug_every_nth != 0)
3245                 atomic_set(&sdebug_cmnd_count, 0);
3246         return length;
3247 }
3248
3249 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
3250  * same for each scsi_debug host (if more than one). Some of the counters
3251  * output are not atomics so might be inaccurate in a busy system. */
3252 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
3253 {
3254         int f, l;
3255         char b[32];
3256
3257         if (scsi_debug_every_nth > 0)
3258                 snprintf(b, sizeof(b), " (curr:%d)",
3259                          ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
3260                                 atomic_read(&sdebug_a_tsf) :
3261                                 atomic_read(&sdebug_cmnd_count)));
3262         else
3263                 b[0] = '\0';
3264
3265         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
3266                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
3267                 "every_nth=%d%s\n"
3268                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
3269                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
3270                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
3271                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
3272                 "usec_in_jiffy=%lu\n",
3273                 SCSI_DEBUG_VERSION, scsi_debug_version_date,
3274                 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
3275                 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
3276                 scsi_debug_max_luns, atomic_read(&sdebug_completions),
3277                 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
3278                 sdebug_sectors_per, num_aborts, num_dev_resets,
3279                 num_target_resets, num_bus_resets, num_host_resets,
3280                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
3281
3282         f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
3283         if (f != scsi_debug_max_queue) {
3284                 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
3285                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
3286                            "queued_in_use_bm", f, l);
3287         }
3288         return 0;
3289 }
3290
3291 static ssize_t delay_show(struct device_driver *ddp, char *buf)
3292 {
3293         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
3294 }
3295 /* Returns -EBUSY if delay is being changed and commands are queued */
3296 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
3297                            size_t count)
3298 {
3299         int delay, res;
3300
3301         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
3302                 res = count;
3303                 if (scsi_debug_delay != delay) {
3304                         unsigned long iflags;
3305                         int k;
3306
3307                         spin_lock_irqsave(&queued_arr_lock, iflags);
3308                         k = find_first_bit(queued_in_use_bm,
3309                                            scsi_debug_max_queue);
3310                         if (k != scsi_debug_max_queue)
3311                                 res = -EBUSY;   /* have queued commands */
3312                         else {
3313                                 scsi_debug_delay = delay;
3314                                 scsi_debug_ndelay = 0;
3315                         }
3316                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3317                 }
3318                 return res;
3319         }
3320         return -EINVAL;
3321 }
3322 static DRIVER_ATTR_RW(delay);
3323
3324 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
3325 {
3326         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
3327 }
3328 /* Returns -EBUSY if ndelay is being changed and commands are queued */
3329 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
3330 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
3331                            size_t count)
3332 {
3333         unsigned long iflags;
3334         int ndelay, res, k;
3335
3336         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
3337             (ndelay >= 0) && (ndelay < 1000000000)) {
3338                 res = count;
3339                 if (scsi_debug_ndelay != ndelay) {
3340                         spin_lock_irqsave(&queued_arr_lock, iflags);
3341                         k = find_first_bit(queued_in_use_bm,
3342                                            scsi_debug_max_queue);
3343                         if (k != scsi_debug_max_queue)
3344                                 res = -EBUSY;   /* have queued commands */
3345                         else {
3346                                 scsi_debug_ndelay = ndelay;
3347                                 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
3348                                                           : DEF_DELAY;
3349                         }
3350                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3351                 }
3352                 return res;
3353         }
3354         return -EINVAL;
3355 }
3356 static DRIVER_ATTR_RW(ndelay);
3357
3358 static ssize_t opts_show(struct device_driver *ddp, char *buf)
3359 {
3360         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
3361 }
3362
3363 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
3364                           size_t count)
3365 {
3366         int opts;
3367         char work[20];
3368
3369         if (1 == sscanf(buf, "%10s", work)) {
3370                 if (0 == strnicmp(work,"0x", 2)) {
3371                         if (1 == sscanf(&work[2], "%x", &opts))
3372                                 goto opts_done;
3373                 } else {
3374                         if (1 == sscanf(work, "%d", &opts))
3375                                 goto opts_done;
3376                 }
3377         }
3378         return -EINVAL;
3379 opts_done:
3380         scsi_debug_opts = opts;
3381         atomic_set(&sdebug_cmnd_count, 0);
3382         atomic_set(&sdebug_a_tsf, 0);
3383         return count;
3384 }
3385 static DRIVER_ATTR_RW(opts);
3386
3387 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
3388 {
3389         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
3390 }
3391 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
3392                            size_t count)
3393 {
3394         int n;
3395
3396         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3397                 scsi_debug_ptype = n;
3398                 return count;
3399         }
3400         return -EINVAL;
3401 }
3402 static DRIVER_ATTR_RW(ptype);
3403
3404 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
3405 {
3406         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
3407 }
3408 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
3409                             size_t count)
3410 {
3411         int n;
3412
3413         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3414                 scsi_debug_dsense = n;
3415                 return count;
3416         }
3417         return -EINVAL;
3418 }
3419 static DRIVER_ATTR_RW(dsense);
3420
3421 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
3422 {
3423         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
3424 }
3425 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
3426                              size_t count)
3427 {
3428         int n;
3429
3430         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3431                 n = (n > 0);
3432                 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
3433                 if (scsi_debug_fake_rw != n) {
3434                         if ((0 == n) && (NULL == fake_storep)) {
3435                                 unsigned long sz =
3436                                         (unsigned long)scsi_debug_dev_size_mb *
3437                                         1048576;
3438
3439                                 fake_storep = vmalloc(sz);
3440                                 if (NULL == fake_storep) {
3441                                         pr_err("%s: out of memory, 9\n",
3442                                                __func__);
3443                                         return -ENOMEM;
3444                                 }
3445                                 memset(fake_storep, 0, sz);
3446                         }
3447                         scsi_debug_fake_rw = n;
3448                 }
3449                 return count;
3450         }
3451         return -EINVAL;
3452 }
3453 static DRIVER_ATTR_RW(fake_rw);
3454
3455 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
3456 {
3457         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
3458 }
3459 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
3460                               size_t count)
3461 {
3462         int n;
3463
3464         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3465                 scsi_debug_no_lun_0 = n;
3466                 return count;
3467         }
3468         return -EINVAL;
3469 }
3470 static DRIVER_ATTR_RW(no_lun_0);
3471
3472 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
3473 {
3474         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3475 }
3476 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3477                               size_t count)
3478 {
3479         int n;
3480
3481         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3482                 scsi_debug_num_tgts = n;
3483                 sdebug_max_tgts_luns();
3484                 return count;
3485         }
3486         return -EINVAL;
3487 }
3488 static DRIVER_ATTR_RW(num_tgts);
3489
3490 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3491 {
3492         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3493 }
3494 static DRIVER_ATTR_RO(dev_size_mb);
3495
3496 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3497 {
3498         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3499 }
3500 static DRIVER_ATTR_RO(num_parts);
3501
3502 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3503 {
3504         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3505 }
3506 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3507                                size_t count)
3508 {
3509         int nth;
3510
3511         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3512                 scsi_debug_every_nth = nth;
3513                 atomic_set(&sdebug_cmnd_count, 0);
3514                 return count;
3515         }
3516         return -EINVAL;
3517 }
3518 static DRIVER_ATTR_RW(every_nth);
3519
3520 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3521 {
3522         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3523 }
3524 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3525                               size_t count)
3526 {
3527         int n;
3528
3529         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3530                 scsi_debug_max_luns = n;
3531                 sdebug_max_tgts_luns();
3532                 return count;
3533         }
3534         return -EINVAL;
3535 }
3536 static DRIVER_ATTR_RW(max_luns);
3537
3538 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3539 {
3540         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3541 }
3542 /* N.B. max_queue can be changed while there are queued commands. In flight
3543  * commands beyond the new max_queue will be completed. */
3544 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3545                                size_t count)
3546 {
3547         unsigned long iflags;
3548         int n, k;
3549
3550         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3551             (n <= SCSI_DEBUG_CANQUEUE)) {
3552                 spin_lock_irqsave(&queued_arr_lock, iflags);
3553                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
3554                 scsi_debug_max_queue = n;
3555                 if (SCSI_DEBUG_CANQUEUE == k)
3556                         atomic_set(&retired_max_queue, 0);
3557                 else if (k >= n)
3558                         atomic_set(&retired_max_queue, k + 1);
3559                 else
3560                         atomic_set(&retired_max_queue, 0);
3561                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3562                 return count;
3563         }
3564         return -EINVAL;
3565 }
3566 static DRIVER_ATTR_RW(max_queue);
3567
3568 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3569 {
3570         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3571 }
3572 static DRIVER_ATTR_RO(no_uld);
3573
3574 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3575 {
3576         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3577 }
3578 static DRIVER_ATTR_RO(scsi_level);
3579
3580 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3581 {
3582         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3583 }
3584 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3585                                 size_t count)
3586 {
3587         int n;
3588
3589         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3590                 scsi_debug_virtual_gb = n;
3591
3592                 sdebug_capacity = get_sdebug_capacity();
3593
3594                 return count;
3595         }
3596         return -EINVAL;
3597 }
3598 static DRIVER_ATTR_RW(virtual_gb);
3599
3600 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3601 {
3602         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3603 }
3604
3605 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3606                               size_t count)
3607 {
3608         int delta_hosts;
3609
3610         if (sscanf(buf, "%d", &delta_hosts) != 1)
3611                 return -EINVAL;
3612         if (delta_hosts > 0) {
3613                 do {
3614                         sdebug_add_adapter();
3615                 } while (--delta_hosts);
3616         } else if (delta_hosts < 0) {
3617                 do {
3618                         sdebug_remove_adapter();
3619                 } while (++delta_hosts);
3620         }
3621         return count;
3622 }
3623 static DRIVER_ATTR_RW(add_host);
3624
3625 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3626 {
3627         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3628 }
3629 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3630                                     size_t count)
3631 {
3632         int n;
3633
3634         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3635                 scsi_debug_vpd_use_hostno = n;
3636                 return count;
3637         }
3638         return -EINVAL;
3639 }
3640 static DRIVER_ATTR_RW(vpd_use_hostno);
3641
3642 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3643 {
3644         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3645 }
3646 static DRIVER_ATTR_RO(sector_size);
3647
3648 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3649 {
3650         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3651 }
3652 static DRIVER_ATTR_RO(dix);
3653
3654 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3655 {
3656         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3657 }
3658 static DRIVER_ATTR_RO(dif);
3659
3660 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3661 {
3662         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3663 }
3664 static DRIVER_ATTR_RO(guard);
3665
3666 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3667 {
3668         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3669 }
3670 static DRIVER_ATTR_RO(ato);
3671
3672 static ssize_t map_show(struct device_driver *ddp, char *buf)
3673 {
3674         ssize_t count;
3675
3676         if (!scsi_debug_lbp())
3677                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3678                                  sdebug_store_sectors);
3679
3680         count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3681
3682         buf[count++] = '\n';
3683         buf[count++] = 0;
3684
3685         return count;
3686 }
3687 static DRIVER_ATTR_RO(map);
3688
3689 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3690 {
3691         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3692 }
3693 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3694                                size_t count)
3695 {
3696         int n;
3697
3698         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3699                 scsi_debug_removable = (n > 0);
3700                 return count;
3701         }
3702         return -EINVAL;
3703 }
3704 static DRIVER_ATTR_RW(removable);
3705
3706 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
3707 {
3708         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
3709 }
3710 /* Returns -EBUSY if host_lock is being changed and commands are queued */
3711 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
3712                                size_t count)
3713 {
3714         int n, res;
3715
3716         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3717                 bool new_host_lock = (n > 0);
3718
3719                 res = count;
3720                 if (new_host_lock != scsi_debug_host_lock) {
3721                         unsigned long iflags;
3722                         int k;
3723
3724                         spin_lock_irqsave(&queued_arr_lock, iflags);
3725                         k = find_first_bit(queued_in_use_bm,
3726                                            scsi_debug_max_queue);
3727                         if (k != scsi_debug_max_queue)
3728                                 res = -EBUSY;   /* have queued commands */
3729                         else
3730                                 scsi_debug_host_lock = new_host_lock;
3731                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3732                 }
3733                 return res;
3734         }
3735         return -EINVAL;
3736 }
3737 static DRIVER_ATTR_RW(host_lock);
3738
3739
3740 /* Note: The following array creates attribute files in the
3741    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3742    files (over those found in the /sys/module/scsi_debug/parameters
3743    directory) is that auxiliary actions can be triggered when an attribute
3744    is changed. For example see: sdebug_add_host_store() above.
3745  */
3746
3747 static struct attribute *sdebug_drv_attrs[] = {
3748         &driver_attr_delay.attr,
3749         &driver_attr_opts.attr,
3750         &driver_attr_ptype.attr,
3751         &driver_attr_dsense.attr,
3752         &driver_attr_fake_rw.attr,
3753         &driver_attr_no_lun_0.attr,
3754         &driver_attr_num_tgts.attr,
3755         &driver_attr_dev_size_mb.attr,
3756         &driver_attr_num_parts.attr,
3757         &driver_attr_every_nth.attr,
3758         &driver_attr_max_luns.attr,
3759         &driver_attr_max_queue.attr,
3760         &driver_attr_no_uld.attr,
3761         &driver_attr_scsi_level.attr,
3762         &driver_attr_virtual_gb.attr,
3763         &driver_attr_add_host.attr,
3764         &driver_attr_vpd_use_hostno.attr,
3765         &driver_attr_sector_size.attr,
3766         &driver_attr_dix.attr,
3767         &driver_attr_dif.attr,
3768         &driver_attr_guard.attr,
3769         &driver_attr_ato.attr,
3770         &driver_attr_map.attr,
3771         &driver_attr_removable.attr,
3772         &driver_attr_host_lock.attr,
3773         &driver_attr_ndelay.attr,
3774         NULL,
3775 };
3776 ATTRIBUTE_GROUPS(sdebug_drv);
3777
3778 static struct device *pseudo_primary;
3779
3780 static int __init scsi_debug_init(void)
3781 {
3782         unsigned long sz;
3783         int host_to_add;
3784         int k;
3785         int ret;
3786
3787         atomic_set(&sdebug_cmnd_count, 0);
3788         atomic_set(&sdebug_completions, 0);
3789         atomic_set(&retired_max_queue, 0);
3790
3791         if (scsi_debug_ndelay >= 1000000000) {
3792                 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
3793                         __func__);
3794                 scsi_debug_ndelay = 0;
3795         } else if (scsi_debug_ndelay > 0)
3796                 scsi_debug_delay = DELAY_OVERRIDDEN;
3797
3798         switch (scsi_debug_sector_size) {
3799         case  512:
3800         case 1024:
3801         case 2048:
3802         case 4096:
3803                 break;
3804         default:
3805                 pr_err("%s: invalid sector_size %d\n", __func__,
3806                        scsi_debug_sector_size);
3807                 return -EINVAL;
3808         }
3809
3810         switch (scsi_debug_dif) {
3811
3812         case SD_DIF_TYPE0_PROTECTION:
3813         case SD_DIF_TYPE1_PROTECTION:
3814         case SD_DIF_TYPE2_PROTECTION:
3815         case SD_DIF_TYPE3_PROTECTION:
3816                 break;
3817
3818         default:
3819                 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
3820                 return -EINVAL;
3821         }
3822
3823         if (scsi_debug_guard > 1) {
3824                 pr_err("%s: guard must be 0 or 1\n", __func__);
3825                 return -EINVAL;
3826         }
3827
3828         if (scsi_debug_ato > 1) {
3829                 pr_err("%s: ato must be 0 or 1\n", __func__);
3830                 return -EINVAL;
3831         }
3832
3833         if (scsi_debug_physblk_exp > 15) {
3834                 pr_err("%s: invalid physblk_exp %u\n", __func__,
3835                        scsi_debug_physblk_exp);
3836                 return -EINVAL;
3837         }
3838
3839         if (scsi_debug_lowest_aligned > 0x3fff) {
3840                 pr_err("%s: lowest_aligned too big: %u\n", __func__,
3841                        scsi_debug_lowest_aligned);
3842                 return -EINVAL;
3843         }
3844
3845         if (scsi_debug_dev_size_mb < 1)
3846                 scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3847         sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3848         sdebug_store_sectors = sz / scsi_debug_sector_size;
3849         sdebug_capacity = get_sdebug_capacity();
3850
3851         /* play around with geometry, don't waste too much on track 0 */
3852         sdebug_heads = 8;
3853         sdebug_sectors_per = 32;
3854         if (scsi_debug_dev_size_mb >= 16)
3855                 sdebug_heads = 32;
3856         else if (scsi_debug_dev_size_mb >= 256)
3857                 sdebug_heads = 64;
3858         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3859                                (sdebug_sectors_per * sdebug_heads);
3860         if (sdebug_cylinders_per >= 1024) {
3861                 /* other LLDs do this; implies >= 1GB ram disk ... */
3862                 sdebug_heads = 255;
3863                 sdebug_sectors_per = 63;
3864                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3865                                (sdebug_sectors_per * sdebug_heads);
3866         }
3867
3868         if (0 == scsi_debug_fake_rw) {
3869                 fake_storep = vmalloc(sz);
3870                 if (NULL == fake_storep) {
3871                         pr_err("%s: out of memory, 1\n", __func__);
3872                         return -ENOMEM;
3873                 }
3874                 memset(fake_storep, 0, sz);
3875                 if (scsi_debug_num_parts > 0)
3876                         sdebug_build_parts(fake_storep, sz);
3877         }
3878
3879         if (scsi_debug_dix) {
3880                 int dif_size;
3881
3882                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3883                 dif_storep = vmalloc(dif_size);
3884
3885                 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
3886                         dif_storep);
3887
3888                 if (dif_storep == NULL) {
3889                         pr_err("%s: out of mem. (DIX)\n", __func__);
3890                         ret = -ENOMEM;
3891                         goto free_vm;
3892                 }
3893
3894                 memset(dif_storep, 0xff, dif_size);
3895         }
3896
3897         /* Logical Block Provisioning */
3898         if (scsi_debug_lbp()) {
3899                 scsi_debug_unmap_max_blocks =
3900                         clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3901
3902                 scsi_debug_unmap_max_desc =
3903                         clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3904
3905                 scsi_debug_unmap_granularity =
3906                         clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3907
3908                 if (scsi_debug_unmap_alignment &&
3909                     scsi_debug_unmap_granularity <=
3910                     scsi_debug_unmap_alignment) {
3911                         pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
3912                                __func__);
3913                         return -EINVAL;
3914                 }
3915
3916                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3917                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3918
3919                 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
3920
3921                 if (map_storep == NULL) {
3922                         pr_err("%s: out of mem. (MAP)\n", __func__);
3923                         ret = -ENOMEM;
3924                         goto free_vm;
3925                 }
3926
3927                 bitmap_zero(map_storep, map_size);
3928
3929                 /* Map first 1KB for partition table */
3930                 if (scsi_debug_num_parts)
3931                         map_region(0, 2);
3932         }
3933
3934         pseudo_primary = root_device_register("pseudo_0");
3935         if (IS_ERR(pseudo_primary)) {
3936                 pr_warn("%s: root_device_register() error\n", __func__);
3937                 ret = PTR_ERR(pseudo_primary);
3938                 goto free_vm;
3939         }
3940         ret = bus_register(&pseudo_lld_bus);
3941         if (ret < 0) {
3942                 pr_warn("%s: bus_register error: %d\n", __func__, ret);
3943                 goto dev_unreg;
3944         }
3945         ret = driver_register(&sdebug_driverfs_driver);
3946         if (ret < 0) {
3947                 pr_warn("%s: driver_register error: %d\n", __func__, ret);
3948                 goto bus_unreg;
3949         }
3950
3951         host_to_add = scsi_debug_add_host;
3952         scsi_debug_add_host = 0;
3953
3954         for (k = 0; k < host_to_add; k++) {
3955                 if (sdebug_add_adapter()) {
3956                         pr_err("%s: sdebug_add_adapter failed k=%d\n",
3957                                 __func__, k);
3958                         break;
3959                 }
3960         }
3961
3962         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3963                 pr_info("%s: built %d host(s)\n", __func__,
3964                         scsi_debug_add_host);
3965         }
3966         return 0;
3967
3968 bus_unreg:
3969         bus_unregister(&pseudo_lld_bus);
3970 dev_unreg:
3971         root_device_unregister(pseudo_primary);
3972 free_vm:
3973         if (map_storep)
3974                 vfree(map_storep);
3975         if (dif_storep)
3976                 vfree(dif_storep);
3977         vfree(fake_storep);
3978
3979         return ret;
3980 }
3981
3982 static void __exit scsi_debug_exit(void)
3983 {
3984         int k = scsi_debug_add_host;
3985
3986         stop_all_queued();
3987         free_all_queued();
3988         for (; k; k--)
3989                 sdebug_remove_adapter();
3990         driver_unregister(&sdebug_driverfs_driver);
3991         bus_unregister(&pseudo_lld_bus);
3992         root_device_unregister(pseudo_primary);
3993
3994         if (dif_storep)
3995                 vfree(dif_storep);
3996
3997         vfree(fake_storep);
3998 }
3999
4000 device_initcall(scsi_debug_init);
4001 module_exit(scsi_debug_exit);
4002
4003 static void sdebug_release_adapter(struct device * dev)
4004 {
4005         struct sdebug_host_info *sdbg_host;
4006
4007         sdbg_host = to_sdebug_host(dev);
4008         kfree(sdbg_host);
4009 }
4010
4011 static int sdebug_add_adapter(void)
4012 {
4013         int k, devs_per_host;
4014         int error = 0;
4015         struct sdebug_host_info *sdbg_host;
4016         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4017
4018         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4019         if (NULL == sdbg_host) {
4020                 printk(KERN_ERR "%s: out of memory at line %d\n",
4021                        __func__, __LINE__);
4022                 return -ENOMEM;
4023         }
4024
4025         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4026
4027         devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4028         for (k = 0; k < devs_per_host; k++) {
4029                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4030                 if (!sdbg_devinfo) {
4031                         printk(KERN_ERR "%s: out of memory at line %d\n",
4032                                __func__, __LINE__);
4033                         error = -ENOMEM;
4034                         goto clean;
4035                 }
4036         }
4037
4038         spin_lock(&sdebug_host_list_lock);
4039         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4040         spin_unlock(&sdebug_host_list_lock);
4041
4042         sdbg_host->dev.bus = &pseudo_lld_bus;
4043         sdbg_host->dev.parent = pseudo_primary;
4044         sdbg_host->dev.release = &sdebug_release_adapter;
4045         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4046
4047         error = device_register(&sdbg_host->dev);
4048
4049         if (error)
4050                 goto clean;
4051
4052         ++scsi_debug_add_host;
4053         return error;
4054
4055 clean:
4056         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4057                                  dev_list) {
4058                 list_del(&sdbg_devinfo->dev_list);
4059                 kfree(sdbg_devinfo);
4060         }
4061
4062         kfree(sdbg_host);
4063         return error;
4064 }
4065
4066 static void sdebug_remove_adapter(void)
4067 {
4068         struct sdebug_host_info * sdbg_host = NULL;
4069
4070         spin_lock(&sdebug_host_list_lock);
4071         if (!list_empty(&sdebug_host_list)) {
4072                 sdbg_host = list_entry(sdebug_host_list.prev,
4073                                        struct sdebug_host_info, host_list);
4074                 list_del(&sdbg_host->host_list);
4075         }
4076         spin_unlock(&sdebug_host_list_lock);
4077
4078         if (!sdbg_host)
4079                 return;
4080
4081         device_unregister(&sdbg_host->dev);
4082         --scsi_debug_add_host;
4083 }
4084
4085 static int
4086 scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
4087 {
4088         unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
4089         int len, k;
4090         unsigned int num;
4091         unsigned long long lba;
4092         u32 ei_lba;
4093         int errsts = 0;
4094         int target = SCpnt->device->id;
4095         struct sdebug_dev_info *devip = NULL;
4096         int inj_recovered = 0;
4097         int inj_transport = 0;
4098         int inj_dif = 0;
4099         int inj_dix = 0;
4100         int inj_short = 0;
4101         int delay_override = 0;
4102         int unmap = 0;
4103
4104         scsi_set_resid(SCpnt, 0);
4105         if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
4106             !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts) && cmd) {
4107                 char b[120];
4108                 int n;
4109
4110                 len = SCpnt->cmd_len;
4111                 if (len > 32)
4112                         strcpy(b, "too long, over 32 bytes");
4113                 else {
4114                         for (k = 0, n = 0; k < len; ++k)
4115                                 n += scnprintf(b + n, sizeof(b) - n, "%02x ",
4116                                                (unsigned int)cmd[k]);
4117                 }
4118                 sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name,
4119                             b);
4120         }
4121
4122         if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
4123             (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
4124                 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4125         devip = devInfoReg(SCpnt->device);
4126         if (NULL == devip)
4127                 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4128
4129         if ((scsi_debug_every_nth != 0) &&
4130             (atomic_inc_return(&sdebug_cmnd_count) >=
4131              abs(scsi_debug_every_nth))) {
4132                 atomic_set(&sdebug_cmnd_count, 0);
4133                 if (scsi_debug_every_nth < -1)
4134                         scsi_debug_every_nth = -1;
4135                 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
4136                         return 0; /* ignore command causing timeout */
4137                 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
4138                          scsi_medium_access_command(SCpnt))
4139                         return 0; /* time out reads and writes */
4140                 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
4141                         inj_recovered = 1; /* to reads and writes below */
4142                 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
4143                         inj_transport = 1; /* to reads and writes below */
4144                 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
4145                         inj_dif = 1; /* to reads and writes below */
4146                 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
4147                         inj_dix = 1; /* to reads and writes below */
4148                 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
4149                         inj_short = 1;
4150         }
4151
4152         if (devip->wlun) {
4153                 switch (*cmd) {
4154                 case INQUIRY:
4155                 case REQUEST_SENSE:
4156                 case TEST_UNIT_READY:
4157                 case REPORT_LUNS:
4158                         break;  /* only allowable wlun commands */
4159                 default:
4160                         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4161                                 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
4162                                        "not supported for wlun\n", *cmd);
4163                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4164                                         INVALID_OPCODE, 0);
4165                         errsts = check_condition_result;
4166                         return schedule_resp(SCpnt, devip, errsts, 0);
4167                 }
4168         }
4169
4170         switch (*cmd) {
4171         case INQUIRY:     /* mandatory, ignore unit attention */
4172                 delay_override = 1;
4173                 errsts = resp_inquiry(SCpnt, target, devip);
4174                 break;
4175         case REQUEST_SENSE:     /* mandatory, ignore unit attention */
4176                 delay_override = 1;
4177                 errsts = resp_requests(SCpnt, devip);
4178                 break;
4179         case REZERO_UNIT:       /* actually this is REWIND for SSC */
4180         case START_STOP:
4181                 errsts = resp_start_stop(SCpnt, devip);
4182                 break;
4183         case ALLOW_MEDIUM_REMOVAL:
4184                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4185                 if (errsts)
4186                         break;
4187                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4188                         printk(KERN_INFO "scsi_debug: Medium removal %s\n",
4189                                cmd[4] ? "inhibited" : "enabled");
4190                 break;
4191         case SEND_DIAGNOSTIC:     /* mandatory */
4192                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4193                 break;
4194         case TEST_UNIT_READY:     /* mandatory */
4195                 /* delay_override = 1; */
4196                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4197                 break;
4198         case RESERVE:
4199                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4200                 break;
4201         case RESERVE_10:
4202                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4203                 break;
4204         case RELEASE:
4205                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4206                 break;
4207         case RELEASE_10:
4208                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4209                 break;
4210         case READ_CAPACITY:
4211                 errsts = resp_readcap(SCpnt, devip);
4212                 break;
4213         case SERVICE_ACTION_IN:
4214                 if (cmd[1] == SAI_READ_CAPACITY_16)
4215                         errsts = resp_readcap16(SCpnt, devip);
4216                 else if (cmd[1] == SAI_GET_LBA_STATUS) {
4217
4218                         if (scsi_debug_lbp() == 0) {
4219                                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4220                                                 INVALID_COMMAND_OPCODE, 0);
4221                                 errsts = check_condition_result;
4222                         } else
4223                                 errsts = resp_get_lba_status(SCpnt, devip);
4224                 } else {
4225                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4226                                         INVALID_OPCODE, 0);
4227                         errsts = check_condition_result;
4228                 }
4229                 break;
4230         case MAINTENANCE_IN:
4231                 if (MI_REPORT_TARGET_PGS != cmd[1]) {
4232                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4233                                         INVALID_OPCODE, 0);
4234                         errsts = check_condition_result;
4235                         break;
4236                 }
4237                 errsts = resp_report_tgtpgs(SCpnt, devip);
4238                 break;
4239         case READ_16:
4240         case READ_12:
4241         case READ_10:
4242                 /* READ{10,12,16} and DIF Type 2 are natural enemies */
4243                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4244                     cmd[1] & 0xe0) {
4245                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4246                                         INVALID_COMMAND_OPCODE, 0);
4247                         errsts = check_condition_result;
4248                         break;
4249                 }
4250
4251                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4252                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4253                     (cmd[1] & 0xe0) == 0)
4254                         printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4255
4256                 /* fall through */
4257         case READ_6:
4258 read:
4259                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4260                 if (errsts)
4261                         break;
4262                 if (scsi_debug_fake_rw)
4263                         break;
4264                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4265
4266                 if (inj_short)
4267                         num /= 2;
4268
4269                 errsts = resp_read(SCpnt, lba, num, ei_lba);
4270                 if (inj_recovered && (0 == errsts)) {
4271                         mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4272                                         THRESHOLD_EXCEEDED, 0);
4273                         errsts = check_condition_result;
4274                 } else if (inj_transport && (0 == errsts)) {
4275                         mk_sense_buffer(SCpnt, ABORTED_COMMAND,
4276                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
4277                         errsts = check_condition_result;
4278                 } else if (inj_dif && (0 == errsts)) {
4279                         /* Logical block guard check failed */
4280                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4281                         errsts = illegal_condition_result;
4282                 } else if (inj_dix && (0 == errsts)) {
4283                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4284                         errsts = illegal_condition_result;
4285                 }
4286                 break;
4287         case REPORT_LUNS:       /* mandatory, ignore unit attention */
4288                 delay_override = 1;
4289                 errsts = resp_report_luns(SCpnt, devip);
4290                 break;
4291         case VERIFY:            /* 10 byte SBC-2 command */
4292                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4293                 break;
4294         case WRITE_16:
4295         case WRITE_12:
4296         case WRITE_10:
4297                 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
4298                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4299                     cmd[1] & 0xe0) {
4300                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4301                                         INVALID_COMMAND_OPCODE, 0);
4302                         errsts = check_condition_result;
4303                         break;
4304                 }
4305
4306                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4307                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4308                     (cmd[1] & 0xe0) == 0)
4309                         printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4310
4311                 /* fall through */
4312         case WRITE_6:
4313 write:
4314                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4315                 if (errsts)
4316                         break;
4317                 if (scsi_debug_fake_rw)
4318                         break;
4319                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4320                 errsts = resp_write(SCpnt, lba, num, ei_lba);
4321                 if (inj_recovered && (0 == errsts)) {
4322                         mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4323                                         THRESHOLD_EXCEEDED, 0);
4324                         errsts = check_condition_result;
4325                 } else if (inj_dif && (0 == errsts)) {
4326                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4327                         errsts = illegal_condition_result;
4328                 } else if (inj_dix && (0 == errsts)) {
4329                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4330                         errsts = illegal_condition_result;
4331                 }
4332                 break;
4333         case WRITE_SAME_16:
4334         case WRITE_SAME:
4335                 if (cmd[1] & 0x8) {
4336                         if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
4337                             (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
4338                                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4339                                                 INVALID_FIELD_IN_CDB, 0);
4340                                 errsts = check_condition_result;
4341                         } else
4342                                 unmap = 1;
4343                 }
4344                 if (errsts)
4345                         break;
4346                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4347                 if (errsts)
4348                         break;
4349                 if (scsi_debug_fake_rw)
4350                         break;
4351                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4352                 errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap);
4353                 break;
4354         case UNMAP:
4355                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4356                 if (errsts)
4357                         break;
4358                 if (scsi_debug_fake_rw)
4359                         break;
4360
4361                 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
4362                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4363                                         INVALID_COMMAND_OPCODE, 0);
4364                         errsts = check_condition_result;
4365                 } else
4366                         errsts = resp_unmap(SCpnt, devip);
4367                 break;
4368         case MODE_SENSE:
4369         case MODE_SENSE_10:
4370                 errsts = resp_mode_sense(SCpnt, target, devip);
4371                 break;
4372         case MODE_SELECT:
4373                 errsts = resp_mode_select(SCpnt, 1, devip);
4374                 break;
4375         case MODE_SELECT_10:
4376                 errsts = resp_mode_select(SCpnt, 0, devip);
4377                 break;
4378         case LOG_SENSE:
4379                 errsts = resp_log_sense(SCpnt, devip);
4380                 break;
4381         case SYNCHRONIZE_CACHE:
4382                 delay_override = 1;
4383                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4384                 break;
4385         case WRITE_BUFFER:
4386                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4387                 break;
4388         case XDWRITEREAD_10:
4389                 if (!scsi_bidi_cmnd(SCpnt)) {
4390                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4391                                         INVALID_FIELD_IN_CDB, 0);
4392                         errsts = check_condition_result;
4393                         break;
4394                 }
4395
4396                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4397                 if (errsts)
4398                         break;
4399                 if (scsi_debug_fake_rw)
4400                         break;
4401                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4402                 errsts = resp_read(SCpnt, lba, num, ei_lba);
4403                 if (errsts)
4404                         break;
4405                 errsts = resp_write(SCpnt, lba, num, ei_lba);
4406                 if (errsts)
4407                         break;
4408                 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
4409                 break;
4410         case VARIABLE_LENGTH_CMD:
4411                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
4412
4413                         if ((cmd[10] & 0xe0) == 0)
4414                                 printk(KERN_ERR
4415                                        "Unprotected RD/WR to DIF device\n");
4416
4417                         if (cmd[9] == READ_32) {
4418                                 BUG_ON(SCpnt->cmd_len < 32);
4419                                 goto read;
4420                         }
4421
4422                         if (cmd[9] == WRITE_32) {
4423                                 BUG_ON(SCpnt->cmd_len < 32);
4424                                 goto write;
4425                         }
4426                 }
4427
4428                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4429                                 INVALID_FIELD_IN_CDB, 0);
4430                 errsts = check_condition_result;
4431                 break;
4432         case 0x85:
4433                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4434                         sdev_printk(KERN_INFO, SCpnt->device,
4435                         "%s: ATA PASS-THROUGH(16) not supported\n", my_name);
4436                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4437                                 INVALID_OPCODE, 0);
4438                 errsts = check_condition_result;
4439                 break;
4440         default:
4441                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4442                         sdev_printk(KERN_INFO, SCpnt->device,
4443                                     "%s: Opcode: 0x%x not supported\n",
4444                                     my_name, *cmd);
4445                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4446                 if (errsts)
4447                         break;  /* Unit attention takes precedence */
4448                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
4449                 errsts = check_condition_result;
4450                 break;
4451         }
4452         return schedule_resp(SCpnt, devip, errsts,
4453                              (delay_override ? 0 : scsi_debug_delay));
4454 }
4455
4456 static int
4457 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4458 {
4459         if (scsi_debug_host_lock) {
4460                 unsigned long iflags;
4461                 int rc;
4462
4463                 spin_lock_irqsave(shost->host_lock, iflags);
4464                 rc = scsi_debug_queuecommand(cmd);
4465                 spin_unlock_irqrestore(shost->host_lock, iflags);
4466                 return rc;
4467         } else
4468                 return scsi_debug_queuecommand(cmd);
4469 }
4470
4471 static int
4472 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
4473 {
4474         int num_in_q = 0;
4475         int bad = 0;
4476         unsigned long iflags;
4477         struct sdebug_dev_info *devip;
4478
4479         spin_lock_irqsave(&queued_arr_lock, iflags);
4480         devip = (struct sdebug_dev_info *)sdev->hostdata;
4481         if (NULL == devip) {
4482                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4483                 return  -ENODEV;
4484         }
4485         num_in_q = atomic_read(&devip->num_in_q);
4486         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4487         if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
4488                 if (qdepth < 1)
4489                         qdepth = 1;
4490                 /* allow to exceed max host queued_arr elements for testing */
4491                 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4492                         qdepth = SCSI_DEBUG_CANQUEUE + 10;
4493                 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4494         } else if (reason == SCSI_QDEPTH_QFULL)
4495                 scsi_track_queue_full(sdev, qdepth);
4496         else
4497                 bad = 1;
4498         if (bad)
4499                 sdev_printk(KERN_WARNING, sdev,
4500                             "%s: unknown reason=0x%x\n", __func__, reason);
4501         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4502                 if (SCSI_QDEPTH_QFULL == reason)
4503                         sdev_printk(KERN_INFO, sdev,
4504                             "%s: -> %d, num_in_q=%d, reason: queue full\n",
4505                                     __func__, qdepth, num_in_q);
4506                 else {
4507                         const char *cp;
4508
4509                         switch (reason) {
4510                         case SCSI_QDEPTH_DEFAULT:
4511                                 cp = "default (sysfs ?)";
4512                                 break;
4513                         case SCSI_QDEPTH_RAMP_UP:
4514                                 cp = "ramp up";
4515                                 break;
4516                         default:
4517                                 cp = "unknown";
4518                                 break;
4519                         }
4520                         sdev_printk(KERN_INFO, sdev,
4521                                     "%s: qdepth=%d, num_in_q=%d, reason: %s\n",
4522                                     __func__, qdepth, num_in_q, cp);
4523                 }
4524         }
4525         return sdev->queue_depth;
4526 }
4527
4528 static int
4529 sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4530 {
4531         if (sdev->tagged_supported) {
4532                 scsi_set_tag_type(sdev, qtype);
4533                 if (qtype)
4534                         scsi_activate_tcq(sdev, sdev->queue_depth);
4535                 else
4536                         scsi_deactivate_tcq(sdev, sdev->queue_depth);
4537         } else
4538                 qtype = 0;
4539         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4540                 const char *cp;
4541
4542                 switch (qtype) {
4543                 case 0:
4544                         cp = "untagged";
4545                         break;
4546                 case MSG_SIMPLE_TAG:
4547                         cp = "simple tags";
4548                         break;
4549                 case MSG_ORDERED_TAG:
4550                         cp = "ordered tags";
4551                         break;
4552                 default:
4553                         cp = "unknown";
4554                         break;
4555                 }
4556                 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
4557         }
4558         return qtype;
4559 }
4560
4561 static struct scsi_host_template sdebug_driver_template = {
4562         .show_info =            scsi_debug_show_info,
4563         .write_info =           scsi_debug_write_info,
4564         .proc_name =            sdebug_proc_name,
4565         .name =                 "SCSI DEBUG",
4566         .info =                 scsi_debug_info,
4567         .slave_alloc =          scsi_debug_slave_alloc,
4568         .slave_configure =      scsi_debug_slave_configure,
4569         .slave_destroy =        scsi_debug_slave_destroy,
4570         .ioctl =                scsi_debug_ioctl,
4571         .queuecommand =         sdebug_queuecommand_lock_or_not,
4572         .change_queue_depth =   sdebug_change_qdepth,
4573         .change_queue_type =    sdebug_change_qtype,
4574         .eh_abort_handler =     scsi_debug_abort,
4575         .eh_device_reset_handler = scsi_debug_device_reset,
4576         .eh_target_reset_handler = scsi_debug_target_reset,
4577         .eh_bus_reset_handler = scsi_debug_bus_reset,
4578         .eh_host_reset_handler = scsi_debug_host_reset,
4579         .can_queue =            SCSI_DEBUG_CANQUEUE,
4580         .this_id =              7,
4581         .sg_tablesize =         SCSI_MAX_SG_CHAIN_SEGMENTS,
4582         .cmd_per_lun =          DEF_CMD_PER_LUN,
4583         .max_sectors =          -1U,
4584         .use_clustering =       DISABLE_CLUSTERING,
4585         .module =               THIS_MODULE,
4586 };
4587
4588 static int sdebug_driver_probe(struct device * dev)
4589 {
4590         int error = 0;
4591         struct sdebug_host_info *sdbg_host;
4592         struct Scsi_Host *hpnt;
4593         int host_prot;
4594
4595         sdbg_host = to_sdebug_host(dev);
4596
4597         sdebug_driver_template.can_queue = scsi_debug_max_queue;
4598         if (scsi_debug_clustering)
4599                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
4600         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
4601         if (NULL == hpnt) {
4602                 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
4603                 error = -ENODEV;
4604                 return error;
4605         }
4606
4607         sdbg_host->shost = hpnt;
4608         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
4609         if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
4610                 hpnt->max_id = scsi_debug_num_tgts + 1;
4611         else
4612                 hpnt->max_id = scsi_debug_num_tgts;
4613         hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;  /* = scsi_debug_max_luns; */
4614
4615         host_prot = 0;
4616
4617         switch (scsi_debug_dif) {
4618
4619         case SD_DIF_TYPE1_PROTECTION:
4620                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4621                 if (scsi_debug_dix)
4622                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4623                 break;
4624
4625         case SD_DIF_TYPE2_PROTECTION:
4626                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4627                 if (scsi_debug_dix)
4628                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4629                 break;
4630
4631         case SD_DIF_TYPE3_PROTECTION:
4632                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4633                 if (scsi_debug_dix)
4634                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4635                 break;
4636
4637         default:
4638                 if (scsi_debug_dix)
4639                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4640                 break;
4641         }
4642
4643         scsi_host_set_prot(hpnt, host_prot);
4644
4645         printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4646                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4647                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4648                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4649                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4650                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4651                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4652                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4653
4654         if (scsi_debug_guard == 1)
4655                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4656         else
4657                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4658
4659         error = scsi_add_host(hpnt, &sdbg_host->dev);
4660         if (error) {
4661                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4662                 error = -ENODEV;
4663                 scsi_host_put(hpnt);
4664         } else
4665                 scsi_scan_host(hpnt);
4666
4667         return error;
4668 }
4669
4670 static int sdebug_driver_remove(struct device * dev)
4671 {
4672         struct sdebug_host_info *sdbg_host;
4673         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4674
4675         sdbg_host = to_sdebug_host(dev);
4676
4677         if (!sdbg_host) {
4678                 printk(KERN_ERR "%s: Unable to locate host info\n",
4679                        __func__);
4680                 return -ENODEV;
4681         }
4682
4683         scsi_remove_host(sdbg_host->shost);
4684
4685         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4686                                  dev_list) {
4687                 list_del(&sdbg_devinfo->dev_list);
4688                 kfree(sdbg_devinfo);
4689         }
4690
4691         scsi_host_put(sdbg_host->shost);
4692         return 0;
4693 }
4694
4695 static int pseudo_lld_bus_match(struct device *dev,
4696                                 struct device_driver *dev_driver)
4697 {
4698         return 1;
4699 }
4700
4701 static struct bus_type pseudo_lld_bus = {
4702         .name = "pseudo",
4703         .match = pseudo_lld_bus_match,
4704         .probe = sdebug_driver_probe,
4705         .remove = sdebug_driver_remove,
4706         .drv_groups = sdebug_drv_groups,
4707 };