Merge tag 'tty-3.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[sfrench/cifs-2.6.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
49
50 #include <net/checksum.h>
51
52 #include <asm/unaligned.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
62
63 #include "sd.h"
64 #include "scsi_logging.h"
65
66 #define SCSI_DEBUG_VERSION "1.84"
67 static const char *scsi_debug_version_date = "20140706";
68
69 #define MY_NAME "scsi_debug"
70
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define ADDR_OUT_OF_RANGE 0x21
79 #define INVALID_COMMAND_OPCODE 0x20
80 #define INVALID_FIELD_IN_CDB 0x24
81 #define INVALID_FIELD_IN_PARAM_LIST 0x26
82 #define UA_RESET_ASC 0x29
83 #define UA_CHANGED_ASC 0x2a
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
87 #define SAVING_PARAMS_UNSUP 0x39
88 #define TRANSPORT_PROBLEM 0x4b
89 #define THRESHOLD_EXCEEDED 0x5d
90 #define LOW_POWER_COND_ON 0x5e
91
92 /* Additional Sense Code Qualifier (ASCQ) */
93 #define ACK_NAK_TO 0x3
94
95
96 /* Default values for driver parameters */
97 #define DEF_NUM_HOST   1
98 #define DEF_NUM_TGTS   1
99 #define DEF_MAX_LUNS   1
100 /* With these defaults, this driver will make 1 host with 1 target
101  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
102  */
103 #define DEF_ATO 1
104 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
105 #define DEF_DEV_SIZE_MB   8
106 #define DEF_DIF 0
107 #define DEF_DIX 0
108 #define DEF_D_SENSE   0
109 #define DEF_EVERY_NTH   0
110 #define DEF_FAKE_RW     0
111 #define DEF_GUARD 0
112 #define DEF_HOST_LOCK 0
113 #define DEF_LBPU 0
114 #define DEF_LBPWS 0
115 #define DEF_LBPWS10 0
116 #define DEF_LBPRZ 1
117 #define DEF_LOWEST_ALIGNED 0
118 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
119 #define DEF_NO_LUN_0   0
120 #define DEF_NUM_PARTS   0
121 #define DEF_OPTS   0
122 #define DEF_OPT_BLKS 64
123 #define DEF_PHYSBLK_EXP 0
124 #define DEF_PTYPE   0
125 #define DEF_REMOVABLE false
126 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
127 #define DEF_SECTOR_SIZE 512
128 #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
129 #define DEF_UNMAP_ALIGNMENT 0
130 #define DEF_UNMAP_GRANULARITY 1
131 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
132 #define DEF_UNMAP_MAX_DESC 256
133 #define DEF_VIRTUAL_GB   0
134 #define DEF_VPD_USE_HOSTNO 1
135 #define DEF_WRITESAME_LENGTH 0xFFFF
136 #define DELAY_OVERRIDDEN -9999
137
138 /* bit mask values for scsi_debug_opts */
139 #define SCSI_DEBUG_OPT_NOISE   1
140 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
141 #define SCSI_DEBUG_OPT_TIMEOUT   4
142 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
143 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
144 #define SCSI_DEBUG_OPT_DIF_ERR   32
145 #define SCSI_DEBUG_OPT_DIX_ERR   64
146 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
147 #define SCSI_DEBUG_OPT_SHORT_TRANSFER   0x100
148 #define SCSI_DEBUG_OPT_Q_NOISE  0x200
149 #define SCSI_DEBUG_OPT_ALL_TSF  0x400
150 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
151 #define SCSI_DEBUG_OPT_N_WCE    0x1000
152 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
153 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
154 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
155 /* When "every_nth" > 0 then modulo "every_nth" commands:
156  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
157  *   - a RECOVERED_ERROR is simulated on successful read and write
158  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
159  *   - a TRANSPORT_ERROR is simulated on successful read and write
160  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
161  *
162  * When "every_nth" < 0 then after "- every_nth" commands:
163  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
164  *   - a RECOVERED_ERROR is simulated on successful read and write
165  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
166  *   - a TRANSPORT_ERROR is simulated on successful read and write
167  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
168  * This will continue until some other action occurs (e.g. the user
169  * writing a new value (other than -1 or 1) to every_nth via sysfs).
170  */
171
172 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
173  * priority order. In the subset implemented here lower numbers have higher
174  * priority. The UA numbers should be a sequence starting from 0 with
175  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
176 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
177 #define SDEBUG_UA_BUS_RESET 1
178 #define SDEBUG_UA_MODE_CHANGED 2
179 #define SDEBUG_NUM_UAS 3
180
181 /* for check_readiness() */
182 #define UAS_ONLY 1
183 #define UAS_TUR 0
184
185 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
186  * sector on read commands: */
187 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
188 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
189
190 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
191  * or "peripheral device" addressing (value 0) */
192 #define SAM2_LUN_ADDRESS_METHOD 0
193 #define SAM2_WLUN_REPORT_LUNS 0xc101
194
195 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
196  * (for response) at one time. Can be reduced by max_queue option. Command
197  * responses are not queued when delay=0 and ndelay=0. The per-device
198  * DEF_CMD_PER_LUN can be changed via sysfs:
199  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
200  * SCSI_DEBUG_CANQUEUE. */
201 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
202 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
203 #define DEF_CMD_PER_LUN  255
204
205 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
206 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
207 #endif
208
209 static int scsi_debug_add_host = DEF_NUM_HOST;
210 static int scsi_debug_ato = DEF_ATO;
211 static int scsi_debug_delay = DEF_DELAY;
212 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
213 static int scsi_debug_dif = DEF_DIF;
214 static int scsi_debug_dix = DEF_DIX;
215 static int scsi_debug_dsense = DEF_D_SENSE;
216 static int scsi_debug_every_nth = DEF_EVERY_NTH;
217 static int scsi_debug_fake_rw = DEF_FAKE_RW;
218 static unsigned int scsi_debug_guard = DEF_GUARD;
219 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
220 static int scsi_debug_max_luns = DEF_MAX_LUNS;
221 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
222 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
223 static int scsi_debug_ndelay = DEF_NDELAY;
224 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
225 static int scsi_debug_no_uld = 0;
226 static int scsi_debug_num_parts = DEF_NUM_PARTS;
227 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
228 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
229 static int scsi_debug_opts = DEF_OPTS;
230 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
231 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
232 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
233 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
234 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
235 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
236 static unsigned int scsi_debug_lbpu = DEF_LBPU;
237 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
238 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
239 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
240 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
241 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
242 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
243 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
244 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
245 static bool scsi_debug_removable = DEF_REMOVABLE;
246 static bool scsi_debug_clustering;
247 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
248
249 static atomic_t sdebug_cmnd_count;
250 static atomic_t sdebug_completions;
251 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
252
253 #define DEV_READONLY(TGT)      (0)
254
255 static unsigned int sdebug_store_sectors;
256 static sector_t sdebug_capacity;        /* in sectors */
257
258 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
259    may still need them */
260 static int sdebug_heads;                /* heads per disk */
261 static int sdebug_cylinders_per;        /* cylinders per surface */
262 static int sdebug_sectors_per;          /* sectors per cylinder */
263
264 #define SDEBUG_MAX_PARTS 4
265
266 #define SCSI_DEBUG_MAX_CMD_LEN 32
267
268 static unsigned int scsi_debug_lbp(void)
269 {
270         return ((0 == scsi_debug_fake_rw) &&
271                 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
272 }
273
274 struct sdebug_dev_info {
275         struct list_head dev_list;
276         unsigned int channel;
277         unsigned int target;
278         u64 lun;
279         struct sdebug_host_info *sdbg_host;
280         u64 wlun;
281         unsigned long uas_bm[1];
282         atomic_t num_in_q;
283         char stopped;
284         char used;
285 };
286
287 struct sdebug_host_info {
288         struct list_head host_list;
289         struct Scsi_Host *shost;
290         struct device dev;
291         struct list_head dev_info_list;
292 };
293
294 #define to_sdebug_host(d)       \
295         container_of(d, struct sdebug_host_info, dev)
296
297 static LIST_HEAD(sdebug_host_list);
298 static DEFINE_SPINLOCK(sdebug_host_list_lock);
299
300
301 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
302         struct hrtimer hrt;     /* must be first element */
303         int qa_indx;
304 };
305
306 struct sdebug_queued_cmd {
307         /* in_use flagged by a bit in queued_in_use_bm[] */
308         struct timer_list *cmnd_timerp;
309         struct tasklet_struct *tletp;
310         struct sdebug_hrtimer *sd_hrtp;
311         struct scsi_cmnd * a_cmnd;
312 };
313 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
314 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
315
316
317 static unsigned char * fake_storep;     /* ramdisk storage */
318 static struct sd_dif_tuple *dif_storep; /* protection info */
319 static void *map_storep;                /* provisioning map */
320
321 static unsigned long map_size;
322 static int num_aborts;
323 static int num_dev_resets;
324 static int num_target_resets;
325 static int num_bus_resets;
326 static int num_host_resets;
327 static int dix_writes;
328 static int dix_reads;
329 static int dif_errors;
330
331 static DEFINE_SPINLOCK(queued_arr_lock);
332 static DEFINE_RWLOCK(atomic_rw);
333
334 static char sdebug_proc_name[] = MY_NAME;
335 static const char *my_name = MY_NAME;
336
337 static struct bus_type pseudo_lld_bus;
338
339 static struct device_driver sdebug_driverfs_driver = {
340         .name           = sdebug_proc_name,
341         .bus            = &pseudo_lld_bus,
342 };
343
344 static const int check_condition_result =
345                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
346
347 static const int illegal_condition_result =
348         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
349
350 static const int device_qfull_result =
351         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
352
353 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
354                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
355                                      0, 0, 0, 0};
356 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
357                                     0, 0, 0x2, 0x4b};
358 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
359                                    0, 0, 0x0, 0x0};
360
361 static void *fake_store(unsigned long long lba)
362 {
363         lba = do_div(lba, sdebug_store_sectors);
364
365         return fake_storep + lba * scsi_debug_sector_size;
366 }
367
368 static struct sd_dif_tuple *dif_store(sector_t sector)
369 {
370         sector = do_div(sector, sdebug_store_sectors);
371
372         return dif_storep + sector;
373 }
374
375 static int sdebug_add_adapter(void);
376 static void sdebug_remove_adapter(void);
377
378 static void sdebug_max_tgts_luns(void)
379 {
380         struct sdebug_host_info *sdbg_host;
381         struct Scsi_Host *hpnt;
382
383         spin_lock(&sdebug_host_list_lock);
384         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
385                 hpnt = sdbg_host->shost;
386                 if ((hpnt->this_id >= 0) &&
387                     (scsi_debug_num_tgts > hpnt->this_id))
388                         hpnt->max_id = scsi_debug_num_tgts + 1;
389                 else
390                         hpnt->max_id = scsi_debug_num_tgts;
391                 /* scsi_debug_max_luns; */
392                 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
393         }
394         spin_unlock(&sdebug_host_list_lock);
395 }
396
397 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
398 {
399         unsigned char *sbuff;
400
401         sbuff = scp->sense_buffer;
402         if (!sbuff) {
403                 sdev_printk(KERN_ERR, scp->device,
404                             "%s: sense_buffer is NULL\n", __func__);
405                 return;
406         }
407         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
408
409         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
410
411         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
412                 sdev_printk(KERN_INFO, scp->device,
413                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
414                             my_name, key, asc, asq);
415 }
416
417 static void get_data_transfer_info(unsigned char *cmd,
418                                    unsigned long long *lba, unsigned int *num,
419                                    u32 *ei_lba)
420 {
421         *ei_lba = 0;
422
423         switch (*cmd) {
424         case VARIABLE_LENGTH_CMD:
425                 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
426                         (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
427                         (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
428                         (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
429
430                 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
431                         (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
432
433                 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
434                         (u32)cmd[28] << 24;
435                 break;
436
437         case WRITE_SAME_16:
438         case WRITE_16:
439         case READ_16:
440                 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
441                         (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
442                         (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
443                         (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
444
445                 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
446                         (u32)cmd[10] << 24;
447                 break;
448         case WRITE_12:
449         case READ_12:
450                 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
451                         (u32)cmd[2] << 24;
452
453                 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
454                         (u32)cmd[6] << 24;
455                 break;
456         case WRITE_SAME:
457         case WRITE_10:
458         case READ_10:
459         case XDWRITEREAD_10:
460                 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
461                         (u32)cmd[2] << 24;
462
463                 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
464                 break;
465         case WRITE_6:
466         case READ_6:
467                 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
468                         (u32)(cmd[1] & 0x1f) << 16;
469                 *num = (0 == cmd[4]) ? 256 : cmd[4];
470                 break;
471         default:
472                 break;
473         }
474 }
475
476 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
477 {
478         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
479                 if (0x1261 == cmd)
480                         sdev_printk(KERN_INFO, dev,
481                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
482                 else if (0x5331 == cmd)
483                         sdev_printk(KERN_INFO, dev,
484                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
485                                     __func__);
486                 else
487                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
488                                     __func__, cmd);
489         }
490         return -EINVAL;
491         /* return -ENOTTY; // correct return but upsets fdisk */
492 }
493
494 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
495                            struct sdebug_dev_info * devip)
496 {
497         int k;
498         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
499
500         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
501         if (k != SDEBUG_NUM_UAS) {
502                 const char *cp = NULL;
503
504                 switch (k) {
505                 case SDEBUG_UA_POR:
506                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
507                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
508                         if (debug)
509                                 cp = "power on reset";
510                         break;
511                 case SDEBUG_UA_BUS_RESET:
512                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
513                                         UA_RESET_ASC, BUS_RESET_ASCQ);
514                         if (debug)
515                                 cp = "bus reset";
516                         break;
517                 case SDEBUG_UA_MODE_CHANGED:
518                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
519                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
520                         if (debug)
521                                 cp = "mode parameters changed";
522                         break;
523                 default:
524                         pr_warn("%s: unexpected unit attention code=%d\n",
525                                 __func__, k);
526                         if (debug)
527                                 cp = "unknown";
528                         break;
529                 }
530                 clear_bit(k, devip->uas_bm);
531                 if (debug)
532                         sdev_printk(KERN_INFO, SCpnt->device,
533                                    "%s reports: Unit attention: %s\n",
534                                    my_name, cp);
535                 return check_condition_result;
536         }
537         if ((UAS_TUR == uas_only) && devip->stopped) {
538                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
539                                 0x2);
540                 if (debug)
541                         sdev_printk(KERN_INFO, SCpnt->device,
542                                     "%s reports: Not ready: %s\n", my_name,
543                                     "initializing command required");
544                 return check_condition_result;
545         }
546         return 0;
547 }
548
549 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
550 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
551                                 int arr_len)
552 {
553         int act_len;
554         struct scsi_data_buffer *sdb = scsi_in(scp);
555
556         if (!sdb->length)
557                 return 0;
558         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
559                 return (DID_ERROR << 16);
560
561         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
562                                       arr, arr_len);
563         sdb->resid = scsi_bufflen(scp) - act_len;
564
565         return 0;
566 }
567
568 /* Returns number of bytes fetched into 'arr' or -1 if error. */
569 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
570                                int arr_len)
571 {
572         if (!scsi_bufflen(scp))
573                 return 0;
574         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
575                 return -1;
576
577         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
578 }
579
580
581 static const char * inq_vendor_id = "Linux   ";
582 static const char * inq_product_id = "scsi_debug      ";
583 static const char *inq_product_rev = "0184";    /* version less '.' */
584
585 /* Device identification VPD page. Returns number of bytes placed in arr */
586 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
587                            int target_dev_id, int dev_id_num,
588                            const char * dev_id_str,
589                            int dev_id_str_len)
590 {
591         int num, port_a;
592         char b[32];
593
594         port_a = target_dev_id + 1;
595         /* T10 vendor identifier field format (faked) */
596         arr[0] = 0x2;   /* ASCII */
597         arr[1] = 0x1;
598         arr[2] = 0x0;
599         memcpy(&arr[4], inq_vendor_id, 8);
600         memcpy(&arr[12], inq_product_id, 16);
601         memcpy(&arr[28], dev_id_str, dev_id_str_len);
602         num = 8 + 16 + dev_id_str_len;
603         arr[3] = num;
604         num += 4;
605         if (dev_id_num >= 0) {
606                 /* NAA-5, Logical unit identifier (binary) */
607                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
608                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
609                 arr[num++] = 0x0;
610                 arr[num++] = 0x8;
611                 arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
612                 arr[num++] = 0x33;
613                 arr[num++] = 0x33;
614                 arr[num++] = 0x30;
615                 arr[num++] = (dev_id_num >> 24);
616                 arr[num++] = (dev_id_num >> 16) & 0xff;
617                 arr[num++] = (dev_id_num >> 8) & 0xff;
618                 arr[num++] = dev_id_num & 0xff;
619                 /* Target relative port number */
620                 arr[num++] = 0x61;      /* proto=sas, binary */
621                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
622                 arr[num++] = 0x0;       /* reserved */
623                 arr[num++] = 0x4;       /* length */
624                 arr[num++] = 0x0;       /* reserved */
625                 arr[num++] = 0x0;       /* reserved */
626                 arr[num++] = 0x0;
627                 arr[num++] = 0x1;       /* relative port A */
628         }
629         /* NAA-5, Target port identifier */
630         arr[num++] = 0x61;      /* proto=sas, binary */
631         arr[num++] = 0x93;      /* piv=1, target port, naa */
632         arr[num++] = 0x0;
633         arr[num++] = 0x8;
634         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
635         arr[num++] = 0x22;
636         arr[num++] = 0x22;
637         arr[num++] = 0x20;
638         arr[num++] = (port_a >> 24);
639         arr[num++] = (port_a >> 16) & 0xff;
640         arr[num++] = (port_a >> 8) & 0xff;
641         arr[num++] = port_a & 0xff;
642         /* NAA-5, Target port group identifier */
643         arr[num++] = 0x61;      /* proto=sas, binary */
644         arr[num++] = 0x95;      /* piv=1, target port group id */
645         arr[num++] = 0x0;
646         arr[num++] = 0x4;
647         arr[num++] = 0;
648         arr[num++] = 0;
649         arr[num++] = (port_group_id >> 8) & 0xff;
650         arr[num++] = port_group_id & 0xff;
651         /* NAA-5, Target device identifier */
652         arr[num++] = 0x61;      /* proto=sas, binary */
653         arr[num++] = 0xa3;      /* piv=1, target device, naa */
654         arr[num++] = 0x0;
655         arr[num++] = 0x8;
656         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
657         arr[num++] = 0x22;
658         arr[num++] = 0x22;
659         arr[num++] = 0x20;
660         arr[num++] = (target_dev_id >> 24);
661         arr[num++] = (target_dev_id >> 16) & 0xff;
662         arr[num++] = (target_dev_id >> 8) & 0xff;
663         arr[num++] = target_dev_id & 0xff;
664         /* SCSI name string: Target device identifier */
665         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
666         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
667         arr[num++] = 0x0;
668         arr[num++] = 24;
669         memcpy(arr + num, "naa.52222220", 12);
670         num += 12;
671         snprintf(b, sizeof(b), "%08X", target_dev_id);
672         memcpy(arr + num, b, 8);
673         num += 8;
674         memset(arr + num, 0, 4);
675         num += 4;
676         return num;
677 }
678
679
680 static unsigned char vpd84_data[] = {
681 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
682     0x22,0x22,0x22,0x0,0xbb,0x1,
683     0x22,0x22,0x22,0x0,0xbb,0x2,
684 };
685
686 /*  Software interface identification VPD page */
687 static int inquiry_evpd_84(unsigned char * arr)
688 {
689         memcpy(arr, vpd84_data, sizeof(vpd84_data));
690         return sizeof(vpd84_data);
691 }
692
693 /* Management network addresses VPD page */
694 static int inquiry_evpd_85(unsigned char * arr)
695 {
696         int num = 0;
697         const char * na1 = "https://www.kernel.org/config";
698         const char * na2 = "http://www.kernel.org/log";
699         int plen, olen;
700
701         arr[num++] = 0x1;       /* lu, storage config */
702         arr[num++] = 0x0;       /* reserved */
703         arr[num++] = 0x0;
704         olen = strlen(na1);
705         plen = olen + 1;
706         if (plen % 4)
707                 plen = ((plen / 4) + 1) * 4;
708         arr[num++] = plen;      /* length, null termianted, padded */
709         memcpy(arr + num, na1, olen);
710         memset(arr + num + olen, 0, plen - olen);
711         num += plen;
712
713         arr[num++] = 0x4;       /* lu, logging */
714         arr[num++] = 0x0;       /* reserved */
715         arr[num++] = 0x0;
716         olen = strlen(na2);
717         plen = olen + 1;
718         if (plen % 4)
719                 plen = ((plen / 4) + 1) * 4;
720         arr[num++] = plen;      /* length, null terminated, padded */
721         memcpy(arr + num, na2, olen);
722         memset(arr + num + olen, 0, plen - olen);
723         num += plen;
724
725         return num;
726 }
727
728 /* SCSI ports VPD page */
729 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
730 {
731         int num = 0;
732         int port_a, port_b;
733
734         port_a = target_dev_id + 1;
735         port_b = port_a + 1;
736         arr[num++] = 0x0;       /* reserved */
737         arr[num++] = 0x0;       /* reserved */
738         arr[num++] = 0x0;
739         arr[num++] = 0x1;       /* relative port 1 (primary) */
740         memset(arr + num, 0, 6);
741         num += 6;
742         arr[num++] = 0x0;
743         arr[num++] = 12;        /* length tp descriptor */
744         /* naa-5 target port identifier (A) */
745         arr[num++] = 0x61;      /* proto=sas, binary */
746         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
747         arr[num++] = 0x0;       /* reserved */
748         arr[num++] = 0x8;       /* length */
749         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
750         arr[num++] = 0x22;
751         arr[num++] = 0x22;
752         arr[num++] = 0x20;
753         arr[num++] = (port_a >> 24);
754         arr[num++] = (port_a >> 16) & 0xff;
755         arr[num++] = (port_a >> 8) & 0xff;
756         arr[num++] = port_a & 0xff;
757
758         arr[num++] = 0x0;       /* reserved */
759         arr[num++] = 0x0;       /* reserved */
760         arr[num++] = 0x0;
761         arr[num++] = 0x2;       /* relative port 2 (secondary) */
762         memset(arr + num, 0, 6);
763         num += 6;
764         arr[num++] = 0x0;
765         arr[num++] = 12;        /* length tp descriptor */
766         /* naa-5 target port identifier (B) */
767         arr[num++] = 0x61;      /* proto=sas, binary */
768         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
769         arr[num++] = 0x0;       /* reserved */
770         arr[num++] = 0x8;       /* length */
771         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
772         arr[num++] = 0x22;
773         arr[num++] = 0x22;
774         arr[num++] = 0x20;
775         arr[num++] = (port_b >> 24);
776         arr[num++] = (port_b >> 16) & 0xff;
777         arr[num++] = (port_b >> 8) & 0xff;
778         arr[num++] = port_b & 0xff;
779
780         return num;
781 }
782
783
784 static unsigned char vpd89_data[] = {
785 /* from 4th byte */ 0,0,0,0,
786 'l','i','n','u','x',' ',' ',' ',
787 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
788 '1','2','3','4',
789 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
790 0xec,0,0,0,
791 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
792 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
793 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
794 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
795 0x53,0x41,
796 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
797 0x20,0x20,
798 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
799 0x10,0x80,
800 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
801 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
802 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
803 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
804 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
805 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
806 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
807 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
808 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
809 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
810 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
811 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
812 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
813 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
814 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
815 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
816 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
817 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
818 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
819 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
820 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
821 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
822 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
823 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
824 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
825 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
826 };
827
828 /* ATA Information VPD page */
829 static int inquiry_evpd_89(unsigned char * arr)
830 {
831         memcpy(arr, vpd89_data, sizeof(vpd89_data));
832         return sizeof(vpd89_data);
833 }
834
835
836 static unsigned char vpdb0_data[] = {
837         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
838         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
839         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
840         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
841 };
842
843 /* Block limits VPD page (SBC-3) */
844 static int inquiry_evpd_b0(unsigned char * arr)
845 {
846         unsigned int gran;
847
848         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
849
850         /* Optimal transfer length granularity */
851         gran = 1 << scsi_debug_physblk_exp;
852         arr[2] = (gran >> 8) & 0xff;
853         arr[3] = gran & 0xff;
854
855         /* Maximum Transfer Length */
856         if (sdebug_store_sectors > 0x400) {
857                 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
858                 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
859                 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
860                 arr[7] = sdebug_store_sectors & 0xff;
861         }
862
863         /* Optimal Transfer Length */
864         put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
865
866         if (scsi_debug_lbpu) {
867                 /* Maximum Unmap LBA Count */
868                 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
869
870                 /* Maximum Unmap Block Descriptor Count */
871                 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
872         }
873
874         /* Unmap Granularity Alignment */
875         if (scsi_debug_unmap_alignment) {
876                 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
877                 arr[28] |= 0x80; /* UGAVALID */
878         }
879
880         /* Optimal Unmap Granularity */
881         put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
882
883         /* Maximum WRITE SAME Length */
884         put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
885
886         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
887
888         return sizeof(vpdb0_data);
889 }
890
891 /* Block device characteristics VPD page (SBC-3) */
892 static int inquiry_evpd_b1(unsigned char *arr)
893 {
894         memset(arr, 0, 0x3c);
895         arr[0] = 0;
896         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
897         arr[2] = 0;
898         arr[3] = 5;     /* less than 1.8" */
899
900         return 0x3c;
901 }
902
903 /* Logical block provisioning VPD page (SBC-3) */
904 static int inquiry_evpd_b2(unsigned char *arr)
905 {
906         memset(arr, 0, 0x4);
907         arr[0] = 0;                     /* threshold exponent */
908
909         if (scsi_debug_lbpu)
910                 arr[1] = 1 << 7;
911
912         if (scsi_debug_lbpws)
913                 arr[1] |= 1 << 6;
914
915         if (scsi_debug_lbpws10)
916                 arr[1] |= 1 << 5;
917
918         if (scsi_debug_lbprz)
919                 arr[1] |= 1 << 2;
920
921         return 0x4;
922 }
923
924 #define SDEBUG_LONG_INQ_SZ 96
925 #define SDEBUG_MAX_INQ_ARR_SZ 584
926
927 static int resp_inquiry(struct scsi_cmnd *scp, int target,
928                         struct sdebug_dev_info * devip)
929 {
930         unsigned char pq_pdt;
931         unsigned char * arr;
932         unsigned char *cmd = scp->cmnd;
933         int alloc_len, n, ret;
934
935         alloc_len = (cmd[3] << 8) + cmd[4];
936         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
937         if (! arr)
938                 return DID_REQUEUE << 16;
939         if (devip->wlun)
940                 pq_pdt = 0x1e;  /* present, wlun */
941         else if (scsi_debug_no_lun_0 && (0 == devip->lun))
942                 pq_pdt = 0x7f;  /* not present, no device type */
943         else
944                 pq_pdt = (scsi_debug_ptype & 0x1f);
945         arr[0] = pq_pdt;
946         if (0x2 & cmd[1]) {  /* CMDDT bit set */
947                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
948                                 0);
949                 kfree(arr);
950                 return check_condition_result;
951         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
952                 int lu_id_num, port_group_id, target_dev_id, len;
953                 char lu_id_str[6];
954                 int host_no = devip->sdbg_host->shost->host_no;
955                 
956                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
957                     (devip->channel & 0x7f);
958                 if (0 == scsi_debug_vpd_use_hostno)
959                         host_no = 0;
960                 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
961                             (devip->target * 1000) + devip->lun);
962                 target_dev_id = ((host_no + 1) * 2000) +
963                                  (devip->target * 1000) - 3;
964                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
965                 if (0 == cmd[2]) { /* supported vital product data pages */
966                         arr[1] = cmd[2];        /*sanity */
967                         n = 4;
968                         arr[n++] = 0x0;   /* this page */
969                         arr[n++] = 0x80;  /* unit serial number */
970                         arr[n++] = 0x83;  /* device identification */
971                         arr[n++] = 0x84;  /* software interface ident. */
972                         arr[n++] = 0x85;  /* management network addresses */
973                         arr[n++] = 0x86;  /* extended inquiry */
974                         arr[n++] = 0x87;  /* mode page policy */
975                         arr[n++] = 0x88;  /* SCSI ports */
976                         arr[n++] = 0x89;  /* ATA information */
977                         arr[n++] = 0xb0;  /* Block limits (SBC) */
978                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
979                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
980                                 arr[n++] = 0xb2;
981                         arr[3] = n - 4;   /* number of supported VPD pages */
982                 } else if (0x80 == cmd[2]) { /* unit serial number */
983                         arr[1] = cmd[2];        /*sanity */
984                         arr[3] = len;
985                         memcpy(&arr[4], lu_id_str, len);
986                 } else if (0x83 == cmd[2]) { /* device identification */
987                         arr[1] = cmd[2];        /*sanity */
988                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
989                                                  target_dev_id, lu_id_num,
990                                                  lu_id_str, len);
991                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
992                         arr[1] = cmd[2];        /*sanity */
993                         arr[3] = inquiry_evpd_84(&arr[4]);
994                 } else if (0x85 == cmd[2]) { /* Management network addresses */
995                         arr[1] = cmd[2];        /*sanity */
996                         arr[3] = inquiry_evpd_85(&arr[4]);
997                 } else if (0x86 == cmd[2]) { /* extended inquiry */
998                         arr[1] = cmd[2];        /*sanity */
999                         arr[3] = 0x3c;  /* number of following entries */
1000                         if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1001                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1002                         else if (scsi_debug_dif)
1003                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1004                         else
1005                                 arr[4] = 0x0;   /* no protection stuff */
1006                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1007                 } else if (0x87 == cmd[2]) { /* mode page policy */
1008                         arr[1] = cmd[2];        /*sanity */
1009                         arr[3] = 0x8;   /* number of following entries */
1010                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1011                         arr[6] = 0x80;  /* mlus, shared */
1012                         arr[8] = 0x18;   /* protocol specific lu */
1013                         arr[10] = 0x82;  /* mlus, per initiator port */
1014                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1015                         arr[1] = cmd[2];        /*sanity */
1016                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1017                 } else if (0x89 == cmd[2]) { /* ATA information */
1018                         arr[1] = cmd[2];        /*sanity */
1019                         n = inquiry_evpd_89(&arr[4]);
1020                         arr[2] = (n >> 8);
1021                         arr[3] = (n & 0xff);
1022                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1023                         arr[1] = cmd[2];        /*sanity */
1024                         arr[3] = inquiry_evpd_b0(&arr[4]);
1025                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1026                         arr[1] = cmd[2];        /*sanity */
1027                         arr[3] = inquiry_evpd_b1(&arr[4]);
1028                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1029                         arr[1] = cmd[2];        /*sanity */
1030                         arr[3] = inquiry_evpd_b2(&arr[4]);
1031                 } else {
1032                         /* Illegal request, invalid field in cdb */
1033                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1034                                         INVALID_FIELD_IN_CDB, 0);
1035                         kfree(arr);
1036                         return check_condition_result;
1037                 }
1038                 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1039                 ret = fill_from_dev_buffer(scp, arr,
1040                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1041                 kfree(arr);
1042                 return ret;
1043         }
1044         /* drops through here for a standard inquiry */
1045         arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
1046         arr[2] = scsi_debug_scsi_level;
1047         arr[3] = 2;    /* response_data_format==2 */
1048         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1049         arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1050         if (0 == scsi_debug_vpd_use_hostno)
1051                 arr[5] = 0x10; /* claim: implicit TGPS */
1052         arr[6] = 0x10; /* claim: MultiP */
1053         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1054         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1055         memcpy(&arr[8], inq_vendor_id, 8);
1056         memcpy(&arr[16], inq_product_id, 16);
1057         memcpy(&arr[32], inq_product_rev, 4);
1058         /* version descriptors (2 bytes each) follow */
1059         arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1060         arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1061         n = 62;
1062         if (scsi_debug_ptype == 0) {
1063                 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1064         } else if (scsi_debug_ptype == 1) {
1065                 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1066         }
1067         arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1068         ret = fill_from_dev_buffer(scp, arr,
1069                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1070         kfree(arr);
1071         return ret;
1072 }
1073
1074 static int resp_requests(struct scsi_cmnd * scp,
1075                          struct sdebug_dev_info * devip)
1076 {
1077         unsigned char * sbuff;
1078         unsigned char *cmd = scp->cmnd;
1079         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1080         int want_dsense;
1081         int len = 18;
1082
1083         memset(arr, 0, sizeof(arr));
1084         want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
1085         sbuff = scp->sense_buffer;
1086         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1087                 if (want_dsense) {
1088                         arr[0] = 0x72;
1089                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1090                         arr[2] = THRESHOLD_EXCEEDED;
1091                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1092                 } else {
1093                         arr[0] = 0x70;
1094                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1095                         arr[7] = 0xa;           /* 18 byte sense buffer */
1096                         arr[12] = THRESHOLD_EXCEEDED;
1097                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1098                 }
1099         } else {
1100                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1101                 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
1102                         /* DESC bit set and sense_buff in fixed format */
1103                         memset(arr, 0, sizeof(arr));
1104                         arr[0] = 0x72;
1105                         arr[1] = sbuff[2];     /* sense key */
1106                         arr[2] = sbuff[12];    /* asc */
1107                         arr[3] = sbuff[13];    /* ascq */
1108                         len = 8;
1109                 }
1110         }
1111         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1112         return fill_from_dev_buffer(scp, arr, len);
1113 }
1114
1115 static int resp_start_stop(struct scsi_cmnd * scp,
1116                            struct sdebug_dev_info * devip)
1117 {
1118         unsigned char *cmd = scp->cmnd;
1119         int power_cond, errsts, start;
1120
1121         errsts = check_readiness(scp, UAS_ONLY, devip);
1122         if (errsts)
1123                 return errsts;
1124         power_cond = (cmd[4] & 0xf0) >> 4;
1125         if (power_cond) {
1126                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1127                                 0);
1128                 return check_condition_result;
1129         }
1130         start = cmd[4] & 1;
1131         if (start == devip->stopped)
1132                 devip->stopped = !start;
1133         return 0;
1134 }
1135
1136 static sector_t get_sdebug_capacity(void)
1137 {
1138         if (scsi_debug_virtual_gb > 0)
1139                 return (sector_t)scsi_debug_virtual_gb *
1140                         (1073741824 / scsi_debug_sector_size);
1141         else
1142                 return sdebug_store_sectors;
1143 }
1144
1145 #define SDEBUG_READCAP_ARR_SZ 8
1146 static int resp_readcap(struct scsi_cmnd * scp,
1147                         struct sdebug_dev_info * devip)
1148 {
1149         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1150         unsigned int capac;
1151         int errsts;
1152
1153         errsts = check_readiness(scp, UAS_ONLY, devip);
1154         if (errsts)
1155                 return errsts;
1156         /* following just in case virtual_gb changed */
1157         sdebug_capacity = get_sdebug_capacity();
1158         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1159         if (sdebug_capacity < 0xffffffff) {
1160                 capac = (unsigned int)sdebug_capacity - 1;
1161                 arr[0] = (capac >> 24);
1162                 arr[1] = (capac >> 16) & 0xff;
1163                 arr[2] = (capac >> 8) & 0xff;
1164                 arr[3] = capac & 0xff;
1165         } else {
1166                 arr[0] = 0xff;
1167                 arr[1] = 0xff;
1168                 arr[2] = 0xff;
1169                 arr[3] = 0xff;
1170         }
1171         arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1172         arr[7] = scsi_debug_sector_size & 0xff;
1173         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1174 }
1175
1176 #define SDEBUG_READCAP16_ARR_SZ 32
1177 static int resp_readcap16(struct scsi_cmnd * scp,
1178                           struct sdebug_dev_info * devip)
1179 {
1180         unsigned char *cmd = scp->cmnd;
1181         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1182         unsigned long long capac;
1183         int errsts, k, alloc_len;
1184
1185         errsts = check_readiness(scp, UAS_ONLY, devip);
1186         if (errsts)
1187                 return errsts;
1188         alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1189                      + cmd[13]);
1190         /* following just in case virtual_gb changed */
1191         sdebug_capacity = get_sdebug_capacity();
1192         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1193         capac = sdebug_capacity - 1;
1194         for (k = 0; k < 8; ++k, capac >>= 8)
1195                 arr[7 - k] = capac & 0xff;
1196         arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1197         arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1198         arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1199         arr[11] = scsi_debug_sector_size & 0xff;
1200         arr[13] = scsi_debug_physblk_exp & 0xf;
1201         arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1202
1203         if (scsi_debug_lbp()) {
1204                 arr[14] |= 0x80; /* LBPME */
1205                 if (scsi_debug_lbprz)
1206                         arr[14] |= 0x40; /* LBPRZ */
1207         }
1208
1209         arr[15] = scsi_debug_lowest_aligned & 0xff;
1210
1211         if (scsi_debug_dif) {
1212                 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1213                 arr[12] |= 1; /* PROT_EN */
1214         }
1215
1216         return fill_from_dev_buffer(scp, arr,
1217                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1218 }
1219
1220 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1221
1222 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1223                               struct sdebug_dev_info * devip)
1224 {
1225         unsigned char *cmd = scp->cmnd;
1226         unsigned char * arr;
1227         int host_no = devip->sdbg_host->shost->host_no;
1228         int n, ret, alen, rlen;
1229         int port_group_a, port_group_b, port_a, port_b;
1230
1231         alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1232                 + cmd[9]);
1233
1234         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1235         if (! arr)
1236                 return DID_REQUEUE << 16;
1237         /*
1238          * EVPD page 0x88 states we have two ports, one
1239          * real and a fake port with no device connected.
1240          * So we create two port groups with one port each
1241          * and set the group with port B to unavailable.
1242          */
1243         port_a = 0x1; /* relative port A */
1244         port_b = 0x2; /* relative port B */
1245         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1246             (devip->channel & 0x7f);
1247         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1248             (devip->channel & 0x7f) + 0x80;
1249
1250         /*
1251          * The asymmetric access state is cycled according to the host_id.
1252          */
1253         n = 4;
1254         if (0 == scsi_debug_vpd_use_hostno) {
1255             arr[n++] = host_no % 3; /* Asymm access state */
1256             arr[n++] = 0x0F; /* claim: all states are supported */
1257         } else {
1258             arr[n++] = 0x0; /* Active/Optimized path */
1259             arr[n++] = 0x01; /* claim: only support active/optimized paths */
1260         }
1261         arr[n++] = (port_group_a >> 8) & 0xff;
1262         arr[n++] = port_group_a & 0xff;
1263         arr[n++] = 0;    /* Reserved */
1264         arr[n++] = 0;    /* Status code */
1265         arr[n++] = 0;    /* Vendor unique */
1266         arr[n++] = 0x1;  /* One port per group */
1267         arr[n++] = 0;    /* Reserved */
1268         arr[n++] = 0;    /* Reserved */
1269         arr[n++] = (port_a >> 8) & 0xff;
1270         arr[n++] = port_a & 0xff;
1271         arr[n++] = 3;    /* Port unavailable */
1272         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1273         arr[n++] = (port_group_b >> 8) & 0xff;
1274         arr[n++] = port_group_b & 0xff;
1275         arr[n++] = 0;    /* Reserved */
1276         arr[n++] = 0;    /* Status code */
1277         arr[n++] = 0;    /* Vendor unique */
1278         arr[n++] = 0x1;  /* One port per group */
1279         arr[n++] = 0;    /* Reserved */
1280         arr[n++] = 0;    /* Reserved */
1281         arr[n++] = (port_b >> 8) & 0xff;
1282         arr[n++] = port_b & 0xff;
1283
1284         rlen = n - 4;
1285         arr[0] = (rlen >> 24) & 0xff;
1286         arr[1] = (rlen >> 16) & 0xff;
1287         arr[2] = (rlen >> 8) & 0xff;
1288         arr[3] = rlen & 0xff;
1289
1290         /*
1291          * Return the smallest value of either
1292          * - The allocated length
1293          * - The constructed command length
1294          * - The maximum array size
1295          */
1296         rlen = min(alen,n);
1297         ret = fill_from_dev_buffer(scp, arr,
1298                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1299         kfree(arr);
1300         return ret;
1301 }
1302
1303 /* <<Following mode page info copied from ST318451LW>> */
1304
1305 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1306 {       /* Read-Write Error Recovery page for mode_sense */
1307         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1308                                         5, 0, 0xff, 0xff};
1309
1310         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1311         if (1 == pcontrol)
1312                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1313         return sizeof(err_recov_pg);
1314 }
1315
1316 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1317 {       /* Disconnect-Reconnect page for mode_sense */
1318         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1319                                          0, 0, 0, 0, 0, 0, 0, 0};
1320
1321         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1322         if (1 == pcontrol)
1323                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1324         return sizeof(disconnect_pg);
1325 }
1326
1327 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1328 {       /* Format device page for mode_sense */
1329         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1330                                      0, 0, 0, 0, 0, 0, 0, 0,
1331                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1332
1333         memcpy(p, format_pg, sizeof(format_pg));
1334         p[10] = (sdebug_sectors_per >> 8) & 0xff;
1335         p[11] = sdebug_sectors_per & 0xff;
1336         p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1337         p[13] = scsi_debug_sector_size & 0xff;
1338         if (scsi_debug_removable)
1339                 p[20] |= 0x20; /* should agree with INQUIRY */
1340         if (1 == pcontrol)
1341                 memset(p + 2, 0, sizeof(format_pg) - 2);
1342         return sizeof(format_pg);
1343 }
1344
1345 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1346 {       /* Caching page for mode_sense */
1347         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1348                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1349         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1350                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1351
1352         if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1353                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1354         memcpy(p, caching_pg, sizeof(caching_pg));
1355         if (1 == pcontrol)
1356                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1357         else if (2 == pcontrol)
1358                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1359         return sizeof(caching_pg);
1360 }
1361
1362 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1363 {       /* Control mode page for mode_sense */
1364         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1365                                         0, 0, 0, 0};
1366         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1367                                      0, 0, 0x2, 0x4b};
1368
1369         if (scsi_debug_dsense)
1370                 ctrl_m_pg[2] |= 0x4;
1371         else
1372                 ctrl_m_pg[2] &= ~0x4;
1373
1374         if (scsi_debug_ato)
1375                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1376
1377         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1378         if (1 == pcontrol)
1379                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1380         else if (2 == pcontrol)
1381                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1382         return sizeof(ctrl_m_pg);
1383 }
1384
1385
1386 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1387 {       /* Informational Exceptions control mode page for mode_sense */
1388         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1389                                        0, 0, 0x0, 0x0};
1390         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1391                                       0, 0, 0x0, 0x0};
1392
1393         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1394         if (1 == pcontrol)
1395                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1396         else if (2 == pcontrol)
1397                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1398         return sizeof(iec_m_pg);
1399 }
1400
1401 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1402 {       /* SAS SSP mode page - short format for mode_sense */
1403         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1404                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1405
1406         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1407         if (1 == pcontrol)
1408                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1409         return sizeof(sas_sf_m_pg);
1410 }
1411
1412
1413 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1414                               int target_dev_id)
1415 {       /* SAS phy control and discover mode page for mode_sense */
1416         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1417                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1418                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1419                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1420                     0x2, 0, 0, 0, 0, 0, 0, 0,
1421                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1422                     0, 0, 0, 0, 0, 0, 0, 0,
1423                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1424                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1425                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1426                     0x3, 0, 0, 0, 0, 0, 0, 0,
1427                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1428                     0, 0, 0, 0, 0, 0, 0, 0,
1429                 };
1430         int port_a, port_b;
1431
1432         port_a = target_dev_id + 1;
1433         port_b = port_a + 1;
1434         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1435         p[20] = (port_a >> 24);
1436         p[21] = (port_a >> 16) & 0xff;
1437         p[22] = (port_a >> 8) & 0xff;
1438         p[23] = port_a & 0xff;
1439         p[48 + 20] = (port_b >> 24);
1440         p[48 + 21] = (port_b >> 16) & 0xff;
1441         p[48 + 22] = (port_b >> 8) & 0xff;
1442         p[48 + 23] = port_b & 0xff;
1443         if (1 == pcontrol)
1444                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1445         return sizeof(sas_pcd_m_pg);
1446 }
1447
1448 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1449 {       /* SAS SSP shared protocol specific port mode subpage */
1450         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1451                     0, 0, 0, 0, 0, 0, 0, 0,
1452                 };
1453
1454         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1455         if (1 == pcontrol)
1456                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1457         return sizeof(sas_sha_m_pg);
1458 }
1459
1460 #define SDEBUG_MAX_MSENSE_SZ 256
1461
1462 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1463                            struct sdebug_dev_info * devip)
1464 {
1465         unsigned char dbd, llbaa;
1466         int pcontrol, pcode, subpcode, bd_len;
1467         unsigned char dev_spec;
1468         int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1469         unsigned char * ap;
1470         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1471         unsigned char *cmd = scp->cmnd;
1472
1473         errsts = check_readiness(scp, UAS_ONLY, devip);
1474         if (errsts)
1475                 return errsts;
1476         dbd = !!(cmd[1] & 0x8);
1477         pcontrol = (cmd[2] & 0xc0) >> 6;
1478         pcode = cmd[2] & 0x3f;
1479         subpcode = cmd[3];
1480         msense_6 = (MODE_SENSE == cmd[0]);
1481         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1482         if ((0 == scsi_debug_ptype) && (0 == dbd))
1483                 bd_len = llbaa ? 16 : 8;
1484         else
1485                 bd_len = 0;
1486         alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1487         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1488         if (0x3 == pcontrol) {  /* Saving values not supported */
1489                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1490                 return check_condition_result;
1491         }
1492         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1493                         (devip->target * 1000) - 3;
1494         /* set DPOFUA bit for disks */
1495         if (0 == scsi_debug_ptype)
1496                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1497         else
1498                 dev_spec = 0x0;
1499         if (msense_6) {
1500                 arr[2] = dev_spec;
1501                 arr[3] = bd_len;
1502                 offset = 4;
1503         } else {
1504                 arr[3] = dev_spec;
1505                 if (16 == bd_len)
1506                         arr[4] = 0x1;   /* set LONGLBA bit */
1507                 arr[7] = bd_len;        /* assume 255 or less */
1508                 offset = 8;
1509         }
1510         ap = arr + offset;
1511         if ((bd_len > 0) && (!sdebug_capacity))
1512                 sdebug_capacity = get_sdebug_capacity();
1513
1514         if (8 == bd_len) {
1515                 if (sdebug_capacity > 0xfffffffe) {
1516                         ap[0] = 0xff;
1517                         ap[1] = 0xff;
1518                         ap[2] = 0xff;
1519                         ap[3] = 0xff;
1520                 } else {
1521                         ap[0] = (sdebug_capacity >> 24) & 0xff;
1522                         ap[1] = (sdebug_capacity >> 16) & 0xff;
1523                         ap[2] = (sdebug_capacity >> 8) & 0xff;
1524                         ap[3] = sdebug_capacity & 0xff;
1525                 }
1526                 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1527                 ap[7] = scsi_debug_sector_size & 0xff;
1528                 offset += bd_len;
1529                 ap = arr + offset;
1530         } else if (16 == bd_len) {
1531                 unsigned long long capac = sdebug_capacity;
1532
1533                 for (k = 0; k < 8; ++k, capac >>= 8)
1534                         ap[7 - k] = capac & 0xff;
1535                 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1536                 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1537                 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1538                 ap[15] = scsi_debug_sector_size & 0xff;
1539                 offset += bd_len;
1540                 ap = arr + offset;
1541         }
1542
1543         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1544                 /* TODO: Control Extension page */
1545                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1546                                 0);
1547                 return check_condition_result;
1548         }
1549         switch (pcode) {
1550         case 0x1:       /* Read-Write error recovery page, direct access */
1551                 len = resp_err_recov_pg(ap, pcontrol, target);
1552                 offset += len;
1553                 break;
1554         case 0x2:       /* Disconnect-Reconnect page, all devices */
1555                 len = resp_disconnect_pg(ap, pcontrol, target);
1556                 offset += len;
1557                 break;
1558         case 0x3:       /* Format device page, direct access */
1559                 len = resp_format_pg(ap, pcontrol, target);
1560                 offset += len;
1561                 break;
1562         case 0x8:       /* Caching page, direct access */
1563                 len = resp_caching_pg(ap, pcontrol, target);
1564                 offset += len;
1565                 break;
1566         case 0xa:       /* Control Mode page, all devices */
1567                 len = resp_ctrl_m_pg(ap, pcontrol, target);
1568                 offset += len;
1569                 break;
1570         case 0x19:      /* if spc==1 then sas phy, control+discover */
1571                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1572                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1573                                         INVALID_FIELD_IN_CDB, 0);
1574                         return check_condition_result;
1575                 }
1576                 len = 0;
1577                 if ((0x0 == subpcode) || (0xff == subpcode))
1578                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1579                 if ((0x1 == subpcode) || (0xff == subpcode))
1580                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1581                                                   target_dev_id);
1582                 if ((0x2 == subpcode) || (0xff == subpcode))
1583                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
1584                 offset += len;
1585                 break;
1586         case 0x1c:      /* Informational Exceptions Mode page, all devices */
1587                 len = resp_iec_m_pg(ap, pcontrol, target);
1588                 offset += len;
1589                 break;
1590         case 0x3f:      /* Read all Mode pages */
1591                 if ((0 == subpcode) || (0xff == subpcode)) {
1592                         len = resp_err_recov_pg(ap, pcontrol, target);
1593                         len += resp_disconnect_pg(ap + len, pcontrol, target);
1594                         len += resp_format_pg(ap + len, pcontrol, target);
1595                         len += resp_caching_pg(ap + len, pcontrol, target);
1596                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1597                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1598                         if (0xff == subpcode) {
1599                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1600                                                   target, target_dev_id);
1601                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1602                         }
1603                         len += resp_iec_m_pg(ap + len, pcontrol, target);
1604                 } else {
1605                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1606                                         INVALID_FIELD_IN_CDB, 0);
1607                         return check_condition_result;
1608                 }
1609                 offset += len;
1610                 break;
1611         default:
1612                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1613                                 0);
1614                 return check_condition_result;
1615         }
1616         if (msense_6)
1617                 arr[0] = offset - 1;
1618         else {
1619                 arr[0] = ((offset - 2) >> 8) & 0xff;
1620                 arr[1] = (offset - 2) & 0xff;
1621         }
1622         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1623 }
1624
1625 #define SDEBUG_MAX_MSELECT_SZ 512
1626
1627 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1628                             struct sdebug_dev_info * devip)
1629 {
1630         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1631         int param_len, res, errsts, mpage;
1632         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1633         unsigned char *cmd = scp->cmnd;
1634
1635         errsts = check_readiness(scp, UAS_ONLY, devip);
1636         if (errsts)
1637                 return errsts;
1638         memset(arr, 0, sizeof(arr));
1639         pf = cmd[1] & 0x10;
1640         sp = cmd[1] & 0x1;
1641         param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1642         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1643                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1644                                 INVALID_FIELD_IN_CDB, 0);
1645                 return check_condition_result;
1646         }
1647         res = fetch_to_dev_buffer(scp, arr, param_len);
1648         if (-1 == res)
1649                 return (DID_ERROR << 16);
1650         else if ((res < param_len) &&
1651                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1652                 sdev_printk(KERN_INFO, scp->device,
1653                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
1654                             __func__, param_len, res);
1655         md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1656         bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1657         if (md_len > 2) {
1658                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1659                                 INVALID_FIELD_IN_PARAM_LIST, 0);
1660                 return check_condition_result;
1661         }
1662         off = bd_len + (mselect6 ? 4 : 8);
1663         mpage = arr[off] & 0x3f;
1664         ps = !!(arr[off] & 0x80);
1665         if (ps) {
1666                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1667                                 INVALID_FIELD_IN_PARAM_LIST, 0);
1668                 return check_condition_result;
1669         }
1670         spf = !!(arr[off] & 0x40);
1671         pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1672                        (arr[off + 1] + 2);
1673         if ((pg_len + off) > param_len) {
1674                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1675                                 PARAMETER_LIST_LENGTH_ERR, 0);
1676                 return check_condition_result;
1677         }
1678         switch (mpage) {
1679         case 0x8:      /* Caching Mode page */
1680                 if (caching_pg[1] == arr[off + 1]) {
1681                         memcpy(caching_pg + 2, arr + off + 2,
1682                                sizeof(caching_pg) - 2);
1683                         goto set_mode_changed_ua;
1684                 }
1685                 break;
1686         case 0xa:      /* Control Mode page */
1687                 if (ctrl_m_pg[1] == arr[off + 1]) {
1688                         memcpy(ctrl_m_pg + 2, arr + off + 2,
1689                                sizeof(ctrl_m_pg) - 2);
1690                         scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1691                         goto set_mode_changed_ua;
1692                 }
1693                 break;
1694         case 0x1c:      /* Informational Exceptions Mode page */
1695                 if (iec_m_pg[1] == arr[off + 1]) {
1696                         memcpy(iec_m_pg + 2, arr + off + 2,
1697                                sizeof(iec_m_pg) - 2);
1698                         goto set_mode_changed_ua;
1699                 }
1700                 break;
1701         default:
1702                 break;
1703         }
1704         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1705                         INVALID_FIELD_IN_PARAM_LIST, 0);
1706         return check_condition_result;
1707 set_mode_changed_ua:
1708         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
1709         return 0;
1710 }
1711
1712 static int resp_temp_l_pg(unsigned char * arr)
1713 {
1714         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1715                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
1716                 };
1717
1718         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1719         return sizeof(temp_l_pg);
1720 }
1721
1722 static int resp_ie_l_pg(unsigned char * arr)
1723 {
1724         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1725                 };
1726
1727         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1728         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
1729                 arr[4] = THRESHOLD_EXCEEDED;
1730                 arr[5] = 0xff;
1731         }
1732         return sizeof(ie_l_pg);
1733 }
1734
1735 #define SDEBUG_MAX_LSENSE_SZ 512
1736
1737 static int resp_log_sense(struct scsi_cmnd * scp,
1738                           struct sdebug_dev_info * devip)
1739 {
1740         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1741         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1742         unsigned char *cmd = scp->cmnd;
1743
1744         errsts = check_readiness(scp, UAS_ONLY, devip);
1745         if (errsts)
1746                 return errsts;
1747         memset(arr, 0, sizeof(arr));
1748         ppc = cmd[1] & 0x2;
1749         sp = cmd[1] & 0x1;
1750         if (ppc || sp) {
1751                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1752                                 INVALID_FIELD_IN_CDB, 0);
1753                 return check_condition_result;
1754         }
1755         pcontrol = (cmd[2] & 0xc0) >> 6;
1756         pcode = cmd[2] & 0x3f;
1757         subpcode = cmd[3] & 0xff;
1758         alloc_len = (cmd[7] << 8) + cmd[8];
1759         arr[0] = pcode;
1760         if (0 == subpcode) {
1761                 switch (pcode) {
1762                 case 0x0:       /* Supported log pages log page */
1763                         n = 4;
1764                         arr[n++] = 0x0;         /* this page */
1765                         arr[n++] = 0xd;         /* Temperature */
1766                         arr[n++] = 0x2f;        /* Informational exceptions */
1767                         arr[3] = n - 4;
1768                         break;
1769                 case 0xd:       /* Temperature log page */
1770                         arr[3] = resp_temp_l_pg(arr + 4);
1771                         break;
1772                 case 0x2f:      /* Informational exceptions log page */
1773                         arr[3] = resp_ie_l_pg(arr + 4);
1774                         break;
1775                 default:
1776                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1777                                         INVALID_FIELD_IN_CDB, 0);
1778                         return check_condition_result;
1779                 }
1780         } else if (0xff == subpcode) {
1781                 arr[0] |= 0x40;
1782                 arr[1] = subpcode;
1783                 switch (pcode) {
1784                 case 0x0:       /* Supported log pages and subpages log page */
1785                         n = 4;
1786                         arr[n++] = 0x0;
1787                         arr[n++] = 0x0;         /* 0,0 page */
1788                         arr[n++] = 0x0;
1789                         arr[n++] = 0xff;        /* this page */
1790                         arr[n++] = 0xd;
1791                         arr[n++] = 0x0;         /* Temperature */
1792                         arr[n++] = 0x2f;
1793                         arr[n++] = 0x0; /* Informational exceptions */
1794                         arr[3] = n - 4;
1795                         break;
1796                 case 0xd:       /* Temperature subpages */
1797                         n = 4;
1798                         arr[n++] = 0xd;
1799                         arr[n++] = 0x0;         /* Temperature */
1800                         arr[3] = n - 4;
1801                         break;
1802                 case 0x2f:      /* Informational exceptions subpages */
1803                         n = 4;
1804                         arr[n++] = 0x2f;
1805                         arr[n++] = 0x0;         /* Informational exceptions */
1806                         arr[3] = n - 4;
1807                         break;
1808                 default:
1809                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
1810                                         INVALID_FIELD_IN_CDB, 0);
1811                         return check_condition_result;
1812                 }
1813         } else {
1814                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
1815                                 INVALID_FIELD_IN_CDB, 0);
1816                 return check_condition_result;
1817         }
1818         len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1819         return fill_from_dev_buffer(scp, arr,
1820                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
1821 }
1822
1823 static int check_device_access_params(struct scsi_cmnd *scp,
1824                                       unsigned long long lba, unsigned int num)
1825 {
1826         if (lba + num > sdebug_capacity) {
1827                 mk_sense_buffer(scp, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1828                 return check_condition_result;
1829         }
1830         /* transfer length excessive (tie in to block limits VPD page) */
1831         if (num > sdebug_store_sectors) {
1832                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1833                 return check_condition_result;
1834         }
1835         return 0;
1836 }
1837
1838 /* Returns number of bytes copied or -1 if error. */
1839 static int do_device_access(struct scsi_cmnd *scmd,
1840                             unsigned long long lba, unsigned int num, int write)
1841 {
1842         int ret;
1843         unsigned long long block, rest = 0;
1844         struct scsi_data_buffer *sdb;
1845         enum dma_data_direction dir;
1846         size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1847                        off_t);
1848
1849         if (write) {
1850                 sdb = scsi_out(scmd);
1851                 dir = DMA_TO_DEVICE;
1852                 func = sg_pcopy_to_buffer;
1853         } else {
1854                 sdb = scsi_in(scmd);
1855                 dir = DMA_FROM_DEVICE;
1856                 func = sg_pcopy_from_buffer;
1857         }
1858
1859         if (!sdb->length)
1860                 return 0;
1861         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1862                 return -1;
1863
1864         block = do_div(lba, sdebug_store_sectors);
1865         if (block + num > sdebug_store_sectors)
1866                 rest = block + num - sdebug_store_sectors;
1867
1868         ret = func(sdb->table.sgl, sdb->table.nents,
1869                    fake_storep + (block * scsi_debug_sector_size),
1870                    (num - rest) * scsi_debug_sector_size, 0);
1871         if (ret != (num - rest) * scsi_debug_sector_size)
1872                 return ret;
1873
1874         if (rest) {
1875                 ret += func(sdb->table.sgl, sdb->table.nents,
1876                             fake_storep, rest * scsi_debug_sector_size,
1877                             (num - rest) * scsi_debug_sector_size);
1878         }
1879
1880         return ret;
1881 }
1882
1883 static __be16 dif_compute_csum(const void *buf, int len)
1884 {
1885         __be16 csum;
1886
1887         if (scsi_debug_guard)
1888                 csum = (__force __be16)ip_compute_csum(buf, len);
1889         else
1890                 csum = cpu_to_be16(crc_t10dif(buf, len));
1891
1892         return csum;
1893 }
1894
1895 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1896                       sector_t sector, u32 ei_lba)
1897 {
1898         __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1899
1900         if (sdt->guard_tag != csum) {
1901                 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1902                         __func__,
1903                         (unsigned long)sector,
1904                         be16_to_cpu(sdt->guard_tag),
1905                         be16_to_cpu(csum));
1906                 return 0x01;
1907         }
1908         if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1909             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1910                 pr_err("%s: REF check failed on sector %lu\n",
1911                         __func__, (unsigned long)sector);
1912                 return 0x03;
1913         }
1914         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1915             be32_to_cpu(sdt->ref_tag) != ei_lba) {
1916                 pr_err("%s: REF check failed on sector %lu\n",
1917                         __func__, (unsigned long)sector);
1918                 return 0x03;
1919         }
1920         return 0;
1921 }
1922
1923 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
1924                           unsigned int sectors, bool read)
1925 {
1926         size_t resid;
1927         void *paddr;
1928         const void *dif_store_end = dif_storep + sdebug_store_sectors;
1929         struct sg_mapping_iter miter;
1930
1931         /* Bytes of protection data to copy into sgl */
1932         resid = sectors * sizeof(*dif_storep);
1933
1934         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
1935                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
1936                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
1937
1938         while (sg_miter_next(&miter) && resid > 0) {
1939                 size_t len = min(miter.length, resid);
1940                 void *start = dif_store(sector);
1941                 size_t rest = 0;
1942
1943                 if (dif_store_end < start + len)
1944                         rest = start + len - dif_store_end;
1945
1946                 paddr = miter.addr;
1947
1948                 if (read)
1949                         memcpy(paddr, start, len - rest);
1950                 else
1951                         memcpy(start, paddr, len - rest);
1952
1953                 if (rest) {
1954                         if (read)
1955                                 memcpy(paddr + len - rest, dif_storep, rest);
1956                         else
1957                                 memcpy(dif_storep, paddr + len - rest, rest);
1958                 }
1959
1960                 sector += len / sizeof(*dif_storep);
1961                 resid -= len;
1962         }
1963         sg_miter_stop(&miter);
1964 }
1965
1966 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1967                             unsigned int sectors, u32 ei_lba)
1968 {
1969         unsigned int i;
1970         struct sd_dif_tuple *sdt;
1971         sector_t sector;
1972
1973         for (i = 0; i < sectors; i++, ei_lba++) {
1974                 int ret;
1975
1976                 sector = start_sec + i;
1977                 sdt = dif_store(sector);
1978
1979                 if (sdt->app_tag == cpu_to_be16(0xffff))
1980                         continue;
1981
1982                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
1983                 if (ret) {
1984                         dif_errors++;
1985                         return ret;
1986                 }
1987         }
1988
1989         dif_copy_prot(SCpnt, start_sec, sectors, true);
1990         dix_reads++;
1991
1992         return 0;
1993 }
1994
1995 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1996                      unsigned int num, u32 ei_lba)
1997 {
1998         unsigned long iflags;
1999         int ret;
2000
2001         ret = check_device_access_params(SCpnt, lba, num);
2002         if (ret)
2003                 return ret;
2004
2005         if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2006             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2007             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2008                 /* claim unrecoverable read error */
2009                 mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2010                 /* set info field and valid bit for fixed descriptor */
2011                 if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) {
2012                         SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */
2013                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2014                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2015                         SCpnt->sense_buffer[3] = (ret >> 24) & 0xff;
2016                         SCpnt->sense_buffer[4] = (ret >> 16) & 0xff;
2017                         SCpnt->sense_buffer[5] = (ret >> 8) & 0xff;
2018                         SCpnt->sense_buffer[6] = ret & 0xff;
2019                 }
2020                 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
2021                 return check_condition_result;
2022         }
2023
2024         read_lock_irqsave(&atomic_rw, iflags);
2025
2026         /* DIX + T10 DIF */
2027         if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2028                 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
2029
2030                 if (prot_ret) {
2031                         read_unlock_irqrestore(&atomic_rw, iflags);
2032                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret);
2033                         return illegal_condition_result;
2034                 }
2035         }
2036
2037         ret = do_device_access(SCpnt, lba, num, 0);
2038         read_unlock_irqrestore(&atomic_rw, iflags);
2039         if (ret == -1)
2040                 return DID_ERROR << 16;
2041
2042         scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
2043
2044         return 0;
2045 }
2046
2047 void dump_sector(unsigned char *buf, int len)
2048 {
2049         int i, j, n;
2050
2051         pr_err(">>> Sector Dump <<<\n");
2052         for (i = 0 ; i < len ; i += 16) {
2053                 char b[128];
2054
2055                 for (j = 0, n = 0; j < 16; j++) {
2056                         unsigned char c = buf[i+j];
2057
2058                         if (c >= 0x20 && c < 0x7e)
2059                                 n += scnprintf(b + n, sizeof(b) - n,
2060                                                " %c ", buf[i+j]);
2061                         else
2062                                 n += scnprintf(b + n, sizeof(b) - n,
2063                                                "%02x ", buf[i+j]);
2064                 }
2065                 pr_err("%04d: %s\n", i, b);
2066         }
2067 }
2068
2069 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2070                              unsigned int sectors, u32 ei_lba)
2071 {
2072         int ret;
2073         struct sd_dif_tuple *sdt;
2074         void *daddr;
2075         sector_t sector = start_sec;
2076         int ppage_offset;
2077         int dpage_offset;
2078         struct sg_mapping_iter diter;
2079         struct sg_mapping_iter piter;
2080
2081         BUG_ON(scsi_sg_count(SCpnt) == 0);
2082         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2083
2084         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2085                         scsi_prot_sg_count(SCpnt),
2086                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2087         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2088                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2089
2090         /* For each protection page */
2091         while (sg_miter_next(&piter)) {
2092                 dpage_offset = 0;
2093                 if (WARN_ON(!sg_miter_next(&diter))) {
2094                         ret = 0x01;
2095                         goto out;
2096                 }
2097
2098                 for (ppage_offset = 0; ppage_offset < piter.length;
2099                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2100                         /* If we're at the end of the current
2101                          * data page advance to the next one
2102                          */
2103                         if (dpage_offset >= diter.length) {
2104                                 if (WARN_ON(!sg_miter_next(&diter))) {
2105                                         ret = 0x01;
2106                                         goto out;
2107                                 }
2108                                 dpage_offset = 0;
2109                         }
2110
2111                         sdt = piter.addr + ppage_offset;
2112                         daddr = diter.addr + dpage_offset;
2113
2114                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2115                         if (ret) {
2116                                 dump_sector(daddr, scsi_debug_sector_size);
2117                                 goto out;
2118                         }
2119
2120                         sector++;
2121                         ei_lba++;
2122                         dpage_offset += scsi_debug_sector_size;
2123                 }
2124                 diter.consumed = dpage_offset;
2125                 sg_miter_stop(&diter);
2126         }
2127         sg_miter_stop(&piter);
2128
2129         dif_copy_prot(SCpnt, start_sec, sectors, false);
2130         dix_writes++;
2131
2132         return 0;
2133
2134 out:
2135         dif_errors++;
2136         sg_miter_stop(&diter);
2137         sg_miter_stop(&piter);
2138         return ret;
2139 }
2140
2141 static unsigned long lba_to_map_index(sector_t lba)
2142 {
2143         if (scsi_debug_unmap_alignment) {
2144                 lba += scsi_debug_unmap_granularity -
2145                         scsi_debug_unmap_alignment;
2146         }
2147         do_div(lba, scsi_debug_unmap_granularity);
2148
2149         return lba;
2150 }
2151
2152 static sector_t map_index_to_lba(unsigned long index)
2153 {
2154         sector_t lba = index * scsi_debug_unmap_granularity;
2155
2156         if (scsi_debug_unmap_alignment) {
2157                 lba -= scsi_debug_unmap_granularity -
2158                         scsi_debug_unmap_alignment;
2159         }
2160
2161         return lba;
2162 }
2163
2164 static unsigned int map_state(sector_t lba, unsigned int *num)
2165 {
2166         sector_t end;
2167         unsigned int mapped;
2168         unsigned long index;
2169         unsigned long next;
2170
2171         index = lba_to_map_index(lba);
2172         mapped = test_bit(index, map_storep);
2173
2174         if (mapped)
2175                 next = find_next_zero_bit(map_storep, map_size, index);
2176         else
2177                 next = find_next_bit(map_storep, map_size, index);
2178
2179         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2180         *num = end - lba;
2181
2182         return mapped;
2183 }
2184
2185 static void map_region(sector_t lba, unsigned int len)
2186 {
2187         sector_t end = lba + len;
2188
2189         while (lba < end) {
2190                 unsigned long index = lba_to_map_index(lba);
2191
2192                 if (index < map_size)
2193                         set_bit(index, map_storep);
2194
2195                 lba = map_index_to_lba(index + 1);
2196         }
2197 }
2198
2199 static void unmap_region(sector_t lba, unsigned int len)
2200 {
2201         sector_t end = lba + len;
2202
2203         while (lba < end) {
2204                 unsigned long index = lba_to_map_index(lba);
2205
2206                 if (lba == map_index_to_lba(index) &&
2207                     lba + scsi_debug_unmap_granularity <= end &&
2208                     index < map_size) {
2209                         clear_bit(index, map_storep);
2210                         if (scsi_debug_lbprz) {
2211                                 memset(fake_storep +
2212                                        lba * scsi_debug_sector_size, 0,
2213                                        scsi_debug_sector_size *
2214                                        scsi_debug_unmap_granularity);
2215                         }
2216                         if (dif_storep) {
2217                                 memset(dif_storep + lba, 0xff,
2218                                        sizeof(*dif_storep) *
2219                                        scsi_debug_unmap_granularity);
2220                         }
2221                 }
2222                 lba = map_index_to_lba(index + 1);
2223         }
2224 }
2225
2226 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2227                       unsigned int num, u32 ei_lba)
2228 {
2229         unsigned long iflags;
2230         int ret;
2231
2232         ret = check_device_access_params(SCpnt, lba, num);
2233         if (ret)
2234                 return ret;
2235
2236         write_lock_irqsave(&atomic_rw, iflags);
2237
2238         /* DIX + T10 DIF */
2239         if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2240                 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2241
2242                 if (prot_ret) {
2243                         write_unlock_irqrestore(&atomic_rw, iflags);
2244                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10,
2245                                         prot_ret);
2246                         return illegal_condition_result;
2247                 }
2248         }
2249
2250         ret = do_device_access(SCpnt, lba, num, 1);
2251         if (scsi_debug_lbp())
2252                 map_region(lba, num);
2253         write_unlock_irqrestore(&atomic_rw, iflags);
2254         if (-1 == ret)
2255                 return (DID_ERROR << 16);
2256         else if ((ret < (num * scsi_debug_sector_size)) &&
2257                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2258                 sdev_printk(KERN_INFO, SCpnt->device,
2259                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2260                             my_name, num * scsi_debug_sector_size, ret);
2261
2262         return 0;
2263 }
2264
2265 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2266                       unsigned int num, u32 ei_lba, unsigned int unmap)
2267 {
2268         unsigned long iflags;
2269         unsigned long long i;
2270         int ret;
2271
2272         ret = check_device_access_params(scmd, lba, num);
2273         if (ret)
2274                 return ret;
2275
2276         if (num > scsi_debug_write_same_length) {
2277                 mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2278                                 0);
2279                 return check_condition_result;
2280         }
2281
2282         write_lock_irqsave(&atomic_rw, iflags);
2283
2284         if (unmap && scsi_debug_lbp()) {
2285                 unmap_region(lba, num);
2286                 goto out;
2287         }
2288
2289         /* Else fetch one logical block */
2290         ret = fetch_to_dev_buffer(scmd,
2291                                   fake_storep + (lba * scsi_debug_sector_size),
2292                                   scsi_debug_sector_size);
2293
2294         if (-1 == ret) {
2295                 write_unlock_irqrestore(&atomic_rw, iflags);
2296                 return (DID_ERROR << 16);
2297         } else if ((ret < (num * scsi_debug_sector_size)) &&
2298                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2299                 sdev_printk(KERN_INFO, scmd->device,
2300                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2301                             my_name, "write same",
2302                             num * scsi_debug_sector_size, ret);
2303
2304         /* Copy first sector to remaining blocks */
2305         for (i = 1 ; i < num ; i++)
2306                 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2307                        fake_storep + (lba * scsi_debug_sector_size),
2308                        scsi_debug_sector_size);
2309
2310         if (scsi_debug_lbp())
2311                 map_region(lba, num);
2312 out:
2313         write_unlock_irqrestore(&atomic_rw, iflags);
2314
2315         return 0;
2316 }
2317
2318 struct unmap_block_desc {
2319         __be64  lba;
2320         __be32  blocks;
2321         __be32  __reserved;
2322 };
2323
2324 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2325 {
2326         unsigned char *buf;
2327         struct unmap_block_desc *desc;
2328         unsigned int i, payload_len, descriptors;
2329         int ret;
2330         unsigned long iflags;
2331
2332         ret = check_readiness(scmd, UAS_ONLY, devip);
2333         if (ret)
2334                 return ret;
2335
2336         payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2337         BUG_ON(scsi_bufflen(scmd) != payload_len);
2338
2339         descriptors = (payload_len - 8) / 16;
2340
2341         buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2342         if (!buf)
2343                 return check_condition_result;
2344
2345         scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2346
2347         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2348         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2349
2350         desc = (void *)&buf[8];
2351
2352         write_lock_irqsave(&atomic_rw, iflags);
2353
2354         for (i = 0 ; i < descriptors ; i++) {
2355                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2356                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2357
2358                 ret = check_device_access_params(scmd, lba, num);
2359                 if (ret)
2360                         goto out;
2361
2362                 unmap_region(lba, num);
2363         }
2364
2365         ret = 0;
2366
2367 out:
2368         write_unlock_irqrestore(&atomic_rw, iflags);
2369         kfree(buf);
2370
2371         return ret;
2372 }
2373
2374 #define SDEBUG_GET_LBA_STATUS_LEN 32
2375
2376 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2377                                struct sdebug_dev_info * devip)
2378 {
2379         unsigned long long lba;
2380         unsigned int alloc_len, mapped, num;
2381         unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2382         int ret;
2383
2384         ret = check_readiness(scmd, UAS_ONLY, devip);
2385         if (ret)
2386                 return ret;
2387
2388         lba = get_unaligned_be64(&scmd->cmnd[2]);
2389         alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2390
2391         if (alloc_len < 24)
2392                 return 0;
2393
2394         ret = check_device_access_params(scmd, lba, 1);
2395         if (ret)
2396                 return ret;
2397
2398         mapped = map_state(lba, &num);
2399
2400         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2401         put_unaligned_be32(20, &arr[0]);        /* Parameter Data Length */
2402         put_unaligned_be64(lba, &arr[8]);       /* LBA */
2403         put_unaligned_be32(num, &arr[16]);      /* Number of blocks */
2404         arr[20] = !mapped;                      /* mapped = 0, unmapped = 1 */
2405
2406         return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2407 }
2408
2409 #define SDEBUG_RLUN_ARR_SZ 256
2410
2411 static int resp_report_luns(struct scsi_cmnd * scp,
2412                             struct sdebug_dev_info * devip)
2413 {
2414         unsigned int alloc_len;
2415         int lun_cnt, i, upper, num, n;
2416         u64 wlun, lun;
2417         unsigned char *cmd = scp->cmnd;
2418         int select_report = (int)cmd[2];
2419         struct scsi_lun *one_lun;
2420         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2421         unsigned char * max_addr;
2422
2423         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2424         if ((alloc_len < 4) || (select_report > 2)) {
2425                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2426                                 0);
2427                 return check_condition_result;
2428         }
2429         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2430         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2431         lun_cnt = scsi_debug_max_luns;
2432         if (1 == select_report)
2433                 lun_cnt = 0;
2434         else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2435                 --lun_cnt;
2436         wlun = (select_report > 0) ? 1 : 0;
2437         num = lun_cnt + wlun;
2438         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2439         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2440         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2441                             sizeof(struct scsi_lun)), num);
2442         if (n < num) {
2443                 wlun = 0;
2444                 lun_cnt = n;
2445         }
2446         one_lun = (struct scsi_lun *) &arr[8];
2447         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2448         for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2449              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2450              i++, lun++) {
2451                 upper = (lun >> 8) & 0x3f;
2452                 if (upper)
2453                         one_lun[i].scsi_lun[0] =
2454                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2455                 one_lun[i].scsi_lun[1] = lun & 0xff;
2456         }
2457         if (wlun) {
2458                 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2459                 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2460                 i++;
2461         }
2462         alloc_len = (unsigned char *)(one_lun + i) - arr;
2463         return fill_from_dev_buffer(scp, arr,
2464                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2465 }
2466
2467 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2468                             unsigned int num, struct sdebug_dev_info *devip)
2469 {
2470         int j;
2471         unsigned char *kaddr, *buf;
2472         unsigned int offset;
2473         struct scsi_data_buffer *sdb = scsi_in(scp);
2474         struct sg_mapping_iter miter;
2475
2476         /* better not to use temporary buffer. */
2477         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2478         if (!buf) {
2479                 mk_sense_buffer(scp, NOT_READY,
2480                                 LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
2481                 return check_condition_result;
2482         }
2483
2484         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2485
2486         offset = 0;
2487         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
2488                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
2489
2490         while (sg_miter_next(&miter)) {
2491                 kaddr = miter.addr;
2492                 for (j = 0; j < miter.length; j++)
2493                         *(kaddr + j) ^= *(buf + offset + j);
2494
2495                 offset += miter.length;
2496         }
2497         sg_miter_stop(&miter);
2498         kfree(buf);
2499
2500         return 0;
2501 }
2502
2503 /* When timer or tasklet goes off this function is called. */
2504 static void sdebug_q_cmd_complete(unsigned long indx)
2505 {
2506         int qa_indx;
2507         int retiring = 0;
2508         unsigned long iflags;
2509         struct sdebug_queued_cmd *sqcp;
2510         struct scsi_cmnd *scp;
2511         struct sdebug_dev_info *devip;
2512
2513         atomic_inc(&sdebug_completions);
2514         qa_indx = indx;
2515         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2516                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2517                 return;
2518         }
2519         spin_lock_irqsave(&queued_arr_lock, iflags);
2520         sqcp = &queued_arr[qa_indx];
2521         scp = sqcp->a_cmnd;
2522         if (NULL == scp) {
2523                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2524                 pr_err("%s: scp is NULL\n", __func__);
2525                 return;
2526         }
2527         devip = (struct sdebug_dev_info *)scp->device->hostdata;
2528         if (devip)
2529                 atomic_dec(&devip->num_in_q);
2530         else
2531                 pr_err("%s: devip=NULL\n", __func__);
2532         if (atomic_read(&retired_max_queue) > 0)
2533                 retiring = 1;
2534
2535         sqcp->a_cmnd = NULL;
2536         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2537                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2538                 pr_err("%s: Unexpected completion\n", __func__);
2539                 return;
2540         }
2541
2542         if (unlikely(retiring)) {       /* user has reduced max_queue */
2543                 int k, retval;
2544
2545                 retval = atomic_read(&retired_max_queue);
2546                 if (qa_indx >= retval) {
2547                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2548                         pr_err("%s: index %d too large\n", __func__, retval);
2549                         return;
2550                 }
2551                 k = find_last_bit(queued_in_use_bm, retval);
2552                 if ((k < scsi_debug_max_queue) || (k == retval))
2553                         atomic_set(&retired_max_queue, 0);
2554                 else
2555                         atomic_set(&retired_max_queue, k + 1);
2556         }
2557         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2558         scp->scsi_done(scp); /* callback to mid level */
2559 }
2560
2561 /* When high resolution timer goes off this function is called. */
2562 static enum hrtimer_restart
2563 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
2564 {
2565         int qa_indx;
2566         int retiring = 0;
2567         unsigned long iflags;
2568         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
2569         struct sdebug_queued_cmd *sqcp;
2570         struct scsi_cmnd *scp;
2571         struct sdebug_dev_info *devip;
2572
2573         atomic_inc(&sdebug_completions);
2574         qa_indx = sd_hrtp->qa_indx;
2575         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
2576                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
2577                 goto the_end;
2578         }
2579         spin_lock_irqsave(&queued_arr_lock, iflags);
2580         sqcp = &queued_arr[qa_indx];
2581         scp = sqcp->a_cmnd;
2582         if (NULL == scp) {
2583                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2584                 pr_err("%s: scp is NULL\n", __func__);
2585                 goto the_end;
2586         }
2587         devip = (struct sdebug_dev_info *)scp->device->hostdata;
2588         if (devip)
2589                 atomic_dec(&devip->num_in_q);
2590         else
2591                 pr_err("%s: devip=NULL\n", __func__);
2592         if (atomic_read(&retired_max_queue) > 0)
2593                 retiring = 1;
2594
2595         sqcp->a_cmnd = NULL;
2596         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
2597                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2598                 pr_err("%s: Unexpected completion\n", __func__);
2599                 goto the_end;
2600         }
2601
2602         if (unlikely(retiring)) {       /* user has reduced max_queue */
2603                 int k, retval;
2604
2605                 retval = atomic_read(&retired_max_queue);
2606                 if (qa_indx >= retval) {
2607                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2608                         pr_err("%s: index %d too large\n", __func__, retval);
2609                         goto the_end;
2610                 }
2611                 k = find_last_bit(queued_in_use_bm, retval);
2612                 if ((k < scsi_debug_max_queue) || (k == retval))
2613                         atomic_set(&retired_max_queue, 0);
2614                 else
2615                         atomic_set(&retired_max_queue, k + 1);
2616         }
2617         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2618         scp->scsi_done(scp); /* callback to mid level */
2619 the_end:
2620         return HRTIMER_NORESTART;
2621 }
2622
2623 static struct sdebug_dev_info *
2624 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2625 {
2626         struct sdebug_dev_info *devip;
2627
2628         devip = kzalloc(sizeof(*devip), flags);
2629         if (devip) {
2630                 devip->sdbg_host = sdbg_host;
2631                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2632         }
2633         return devip;
2634 }
2635
2636 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2637 {
2638         struct sdebug_host_info * sdbg_host;
2639         struct sdebug_dev_info * open_devip = NULL;
2640         struct sdebug_dev_info * devip =
2641                         (struct sdebug_dev_info *)sdev->hostdata;
2642
2643         if (devip)
2644                 return devip;
2645         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2646         if (!sdbg_host) {
2647                 pr_err("%s: Host info NULL\n", __func__);
2648                 return NULL;
2649         }
2650         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2651                 if ((devip->used) && (devip->channel == sdev->channel) &&
2652                     (devip->target == sdev->id) &&
2653                     (devip->lun == sdev->lun))
2654                         return devip;
2655                 else {
2656                         if ((!devip->used) && (!open_devip))
2657                                 open_devip = devip;
2658                 }
2659         }
2660         if (!open_devip) { /* try and make a new one */
2661                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2662                 if (!open_devip) {
2663                         printk(KERN_ERR "%s: out of memory at line %d\n",
2664                                 __func__, __LINE__);
2665                         return NULL;
2666                 }
2667         }
2668
2669         open_devip->channel = sdev->channel;
2670         open_devip->target = sdev->id;
2671         open_devip->lun = sdev->lun;
2672         open_devip->sdbg_host = sdbg_host;
2673         atomic_set(&open_devip->num_in_q, 0);
2674         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
2675         open_devip->used = 1;
2676         if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2677                 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2678
2679         return open_devip;
2680 }
2681
2682 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2683 {
2684         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2685                 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
2686                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2687         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2688         return 0;
2689 }
2690
2691 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2692 {
2693         struct sdebug_dev_info *devip;
2694
2695         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2696                 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
2697                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2698         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2699                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2700         devip = devInfoReg(sdp);
2701         if (NULL == devip)
2702                 return 1;       /* no resources, will be marked offline */
2703         sdp->hostdata = devip;
2704         sdp->tagged_supported = 1;
2705         if (sdp->host->cmd_per_lun)
2706                 scsi_adjust_queue_depth(sdp, DEF_TAGGED_QUEUING,
2707                                         DEF_CMD_PER_LUN);
2708         blk_queue_max_segment_size(sdp->request_queue, -1U);
2709         if (scsi_debug_no_uld)
2710                 sdp->no_uld_attach = 1;
2711         return 0;
2712 }
2713
2714 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2715 {
2716         struct sdebug_dev_info *devip =
2717                 (struct sdebug_dev_info *)sdp->hostdata;
2718
2719         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2720                 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
2721                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2722         if (devip) {
2723                 /* make this slot available for re-use */
2724                 devip->used = 0;
2725                 sdp->hostdata = NULL;
2726         }
2727 }
2728
2729 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
2730 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2731 {
2732         unsigned long iflags;
2733         int k, qmax, r_qmax;
2734         struct sdebug_queued_cmd *sqcp;
2735         struct sdebug_dev_info *devip;
2736
2737         spin_lock_irqsave(&queued_arr_lock, iflags);
2738         qmax = scsi_debug_max_queue;
2739         r_qmax = atomic_read(&retired_max_queue);
2740         if (r_qmax > qmax)
2741                 qmax = r_qmax;
2742         for (k = 0; k < qmax; ++k) {
2743                 if (test_bit(k, queued_in_use_bm)) {
2744                         sqcp = &queued_arr[k];
2745                         if (cmnd == sqcp->a_cmnd) {
2746                                 devip = (struct sdebug_dev_info *)
2747                                         cmnd->device->hostdata;
2748                                 if (devip)
2749                                         atomic_dec(&devip->num_in_q);
2750                                 sqcp->a_cmnd = NULL;
2751                                 spin_unlock_irqrestore(&queued_arr_lock,
2752                                                        iflags);
2753                                 if (scsi_debug_ndelay > 0) {
2754                                         if (sqcp->sd_hrtp)
2755                                                 hrtimer_cancel(
2756                                                         &sqcp->sd_hrtp->hrt);
2757                                 } else if (scsi_debug_delay > 0) {
2758                                         if (sqcp->cmnd_timerp)
2759                                                 del_timer_sync(
2760                                                         sqcp->cmnd_timerp);
2761                                 } else if (scsi_debug_delay < 0) {
2762                                         if (sqcp->tletp)
2763                                                 tasklet_kill(sqcp->tletp);
2764                                 }
2765                                 clear_bit(k, queued_in_use_bm);
2766                                 return 1;
2767                         }
2768                 }
2769         }
2770         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2771         return 0;
2772 }
2773
2774 /* Deletes (stops) timers or tasklets of all queued commands */
2775 static void stop_all_queued(void)
2776 {
2777         unsigned long iflags;
2778         int k;
2779         struct sdebug_queued_cmd *sqcp;
2780         struct sdebug_dev_info *devip;
2781
2782         spin_lock_irqsave(&queued_arr_lock, iflags);
2783         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2784                 if (test_bit(k, queued_in_use_bm)) {
2785                         sqcp = &queued_arr[k];
2786                         if (sqcp->a_cmnd) {
2787                                 devip = (struct sdebug_dev_info *)
2788                                         sqcp->a_cmnd->device->hostdata;
2789                                 if (devip)
2790                                         atomic_dec(&devip->num_in_q);
2791                                 sqcp->a_cmnd = NULL;
2792                                 spin_unlock_irqrestore(&queued_arr_lock,
2793                                                        iflags);
2794                                 if (scsi_debug_ndelay > 0) {
2795                                         if (sqcp->sd_hrtp)
2796                                                 hrtimer_cancel(
2797                                                         &sqcp->sd_hrtp->hrt);
2798                                 } else if (scsi_debug_delay > 0) {
2799                                         if (sqcp->cmnd_timerp)
2800                                                 del_timer_sync(
2801                                                         sqcp->cmnd_timerp);
2802                                 } else if (scsi_debug_delay < 0) {
2803                                         if (sqcp->tletp)
2804                                                 tasklet_kill(sqcp->tletp);
2805                                 }
2806                                 clear_bit(k, queued_in_use_bm);
2807                                 spin_lock_irqsave(&queued_arr_lock, iflags);
2808                         }
2809                 }
2810         }
2811         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2812 }
2813
2814 /* Free queued command memory on heap */
2815 static void free_all_queued(void)
2816 {
2817         unsigned long iflags;
2818         int k;
2819         struct sdebug_queued_cmd *sqcp;
2820
2821         spin_lock_irqsave(&queued_arr_lock, iflags);
2822         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2823                 sqcp = &queued_arr[k];
2824                 kfree(sqcp->cmnd_timerp);
2825                 sqcp->cmnd_timerp = NULL;
2826                 kfree(sqcp->tletp);
2827                 sqcp->tletp = NULL;
2828                 kfree(sqcp->sd_hrtp);
2829                 sqcp->sd_hrtp = NULL;
2830         }
2831         spin_unlock_irqrestore(&queued_arr_lock, iflags);
2832 }
2833
2834 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
2835 {
2836         ++num_aborts;
2837         if (SCpnt) {
2838                 if (SCpnt->device &&
2839                     (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2840                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
2841                                     __func__);
2842                 stop_queued_cmnd(SCpnt);
2843         }
2844         return SUCCESS;
2845 }
2846
2847 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2848 {
2849         struct sdebug_dev_info * devip;
2850
2851         ++num_dev_resets;
2852         if (SCpnt && SCpnt->device) {
2853                 struct scsi_device *sdp = SCpnt->device;
2854
2855                 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2856                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2857                 devip = devInfoReg(sdp);
2858                 if (devip)
2859                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
2860         }
2861         return SUCCESS;
2862 }
2863
2864 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
2865 {
2866         struct sdebug_host_info *sdbg_host;
2867         struct sdebug_dev_info *devip;
2868         struct scsi_device *sdp;
2869         struct Scsi_Host *hp;
2870         int k = 0;
2871
2872         ++num_target_resets;
2873         if (!SCpnt)
2874                 goto lie;
2875         sdp = SCpnt->device;
2876         if (!sdp)
2877                 goto lie;
2878         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2879                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2880         hp = sdp->host;
2881         if (!hp)
2882                 goto lie;
2883         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2884         if (sdbg_host) {
2885                 list_for_each_entry(devip,
2886                                     &sdbg_host->dev_info_list,
2887                                     dev_list)
2888                         if (devip->target == sdp->id) {
2889                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2890                                 ++k;
2891                         }
2892         }
2893         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2894                 sdev_printk(KERN_INFO, sdp,
2895                             "%s: %d device(s) found in target\n", __func__, k);
2896 lie:
2897         return SUCCESS;
2898 }
2899
2900 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2901 {
2902         struct sdebug_host_info *sdbg_host;
2903         struct sdebug_dev_info *devip;
2904         struct scsi_device * sdp;
2905         struct Scsi_Host * hp;
2906         int k = 0;
2907
2908         ++num_bus_resets;
2909         if (!(SCpnt && SCpnt->device))
2910                 goto lie;
2911         sdp = SCpnt->device;
2912         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
2913                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
2914         hp = sdp->host;
2915         if (hp) {
2916                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2917                 if (sdbg_host) {
2918                         list_for_each_entry(devip,
2919                                             &sdbg_host->dev_info_list,
2920                                             dev_list) {
2921                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2922                                 ++k;
2923                         }
2924                 }
2925         }
2926         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2927                 sdev_printk(KERN_INFO, sdp,
2928                             "%s: %d device(s) found in host\n", __func__, k);
2929 lie:
2930         return SUCCESS;
2931 }
2932
2933 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2934 {
2935         struct sdebug_host_info * sdbg_host;
2936         struct sdebug_dev_info *devip;
2937         int k = 0;
2938
2939         ++num_host_resets;
2940         if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
2941                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
2942         spin_lock(&sdebug_host_list_lock);
2943         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2944                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
2945                                     dev_list) {
2946                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
2947                         ++k;
2948                 }
2949         }
2950         spin_unlock(&sdebug_host_list_lock);
2951         stop_all_queued();
2952         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
2953                 sdev_printk(KERN_INFO, SCpnt->device,
2954                             "%s: %d device(s) found\n", __func__, k);
2955         return SUCCESS;
2956 }
2957
2958 static void __init sdebug_build_parts(unsigned char *ramp,
2959                                       unsigned long store_size)
2960 {
2961         struct partition * pp;
2962         int starts[SDEBUG_MAX_PARTS + 2];
2963         int sectors_per_part, num_sectors, k;
2964         int heads_by_sects, start_sec, end_sec;
2965
2966         /* assume partition table already zeroed */
2967         if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2968                 return;
2969         if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2970                 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2971                 pr_warn("%s: reducing partitions to %d\n", __func__,
2972                         SDEBUG_MAX_PARTS);
2973         }
2974         num_sectors = (int)sdebug_store_sectors;
2975         sectors_per_part = (num_sectors - sdebug_sectors_per)
2976                            / scsi_debug_num_parts;
2977         heads_by_sects = sdebug_heads * sdebug_sectors_per;
2978         starts[0] = sdebug_sectors_per;
2979         for (k = 1; k < scsi_debug_num_parts; ++k)
2980                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2981                             * heads_by_sects;
2982         starts[scsi_debug_num_parts] = num_sectors;
2983         starts[scsi_debug_num_parts + 1] = 0;
2984
2985         ramp[510] = 0x55;       /* magic partition markings */
2986         ramp[511] = 0xAA;
2987         pp = (struct partition *)(ramp + 0x1be);
2988         for (k = 0; starts[k + 1]; ++k, ++pp) {
2989                 start_sec = starts[k];
2990                 end_sec = starts[k + 1] - 1;
2991                 pp->boot_ind = 0;
2992
2993                 pp->cyl = start_sec / heads_by_sects;
2994                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2995                            / sdebug_sectors_per;
2996                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2997
2998                 pp->end_cyl = end_sec / heads_by_sects;
2999                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3000                                / sdebug_sectors_per;
3001                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3002
3003                 pp->start_sect = cpu_to_le32(start_sec);
3004                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3005                 pp->sys_ind = 0x83;     /* plain Linux partition */
3006         }
3007 }
3008
3009 static int
3010 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3011               int scsi_result, int delta_jiff)
3012 {
3013         unsigned long iflags;
3014         int k, num_in_q, qdepth, inject;
3015         struct sdebug_queued_cmd *sqcp = NULL;
3016         struct scsi_device *sdp = cmnd->device;
3017
3018         if (NULL == cmnd || NULL == devip) {
3019                 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3020                         __func__);
3021                 /* no particularly good error to report back */
3022                 return SCSI_MLQUEUE_HOST_BUSY;
3023         }
3024         if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3025                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3026                             __func__, scsi_result);
3027         if (delta_jiff == 0)
3028                 goto respond_in_thread;
3029
3030         /* schedule the response at a later time if resources permit */
3031         spin_lock_irqsave(&queued_arr_lock, iflags);
3032         num_in_q = atomic_read(&devip->num_in_q);
3033         qdepth = cmnd->device->queue_depth;
3034         inject = 0;
3035         if ((qdepth > 0) && (num_in_q >= qdepth)) {
3036                 if (scsi_result) {
3037                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3038                         goto respond_in_thread;
3039                 } else
3040                         scsi_result = device_qfull_result;
3041         } else if ((scsi_debug_every_nth != 0) &&
3042                    (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3043                    (scsi_result == 0)) {
3044                 if ((num_in_q == (qdepth - 1)) &&
3045                     (atomic_inc_return(&sdebug_a_tsf) >=
3046                      abs(scsi_debug_every_nth))) {
3047                         atomic_set(&sdebug_a_tsf, 0);
3048                         inject = 1;
3049                         scsi_result = device_qfull_result;
3050                 }
3051         }
3052
3053         k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3054         if (k >= scsi_debug_max_queue) {
3055                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3056                 if (scsi_result)
3057                         goto respond_in_thread;
3058                 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3059                         scsi_result = device_qfull_result;
3060                 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3061                         sdev_printk(KERN_INFO, sdp,
3062                                     "%s: max_queue=%d exceeded, %s\n",
3063                                     __func__, scsi_debug_max_queue,
3064                                     (scsi_result ?  "status: TASK SET FULL" :
3065                                                     "report: host busy"));
3066                 if (scsi_result)
3067                         goto respond_in_thread;
3068                 else
3069                         return SCSI_MLQUEUE_HOST_BUSY;
3070         }
3071         __set_bit(k, queued_in_use_bm);
3072         atomic_inc(&devip->num_in_q);
3073         sqcp = &queued_arr[k];
3074         sqcp->a_cmnd = cmnd;
3075         cmnd->result = scsi_result;
3076         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3077         if (delta_jiff > 0) {
3078                 if (NULL == sqcp->cmnd_timerp) {
3079                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3080                                                     GFP_ATOMIC);
3081                         if (NULL == sqcp->cmnd_timerp)
3082                                 return SCSI_MLQUEUE_HOST_BUSY;
3083                         init_timer(sqcp->cmnd_timerp);
3084                 }
3085                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3086                 sqcp->cmnd_timerp->data = k;
3087                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3088                 add_timer(sqcp->cmnd_timerp);
3089         } else if (scsi_debug_ndelay > 0) {
3090                 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3091                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3092
3093                 if (NULL == sd_hp) {
3094                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3095                         if (NULL == sd_hp)
3096                                 return SCSI_MLQUEUE_HOST_BUSY;
3097                         sqcp->sd_hrtp = sd_hp;
3098                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3099                                      HRTIMER_MODE_REL);
3100                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3101                         sd_hp->qa_indx = k;
3102                 }
3103                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3104         } else {        /* delay < 0 */
3105                 if (NULL == sqcp->tletp) {
3106                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3107                                               GFP_ATOMIC);
3108                         if (NULL == sqcp->tletp)
3109                                 return SCSI_MLQUEUE_HOST_BUSY;
3110                         tasklet_init(sqcp->tletp,
3111                                      sdebug_q_cmd_complete, k);
3112                 }
3113                 if (-1 == delta_jiff)
3114                         tasklet_hi_schedule(sqcp->tletp);
3115                 else
3116                         tasklet_schedule(sqcp->tletp);
3117         }
3118         if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3119             (scsi_result == device_qfull_result))
3120                 sdev_printk(KERN_INFO, sdp,
3121                             "%s: num_in_q=%d +1, %s%s\n", __func__,
3122                             num_in_q, (inject ? "<inject> " : ""),
3123                             "status: TASK SET FULL");
3124         return 0;
3125
3126 respond_in_thread:      /* call back to mid-layer using invocation thread */
3127         cmnd->result = scsi_result;
3128         cmnd->scsi_done(cmnd);
3129         return 0;
3130 }
3131
3132 /* Note: The following macros create attribute files in the
3133    /sys/module/scsi_debug/parameters directory. Unfortunately this
3134    driver is unaware of a change and cannot trigger auxiliary actions
3135    as it can when the corresponding attribute in the
3136    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3137  */
3138 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3139 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3140 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3141 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3142 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3143 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3144 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3145 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3146 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3147 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3148 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3149 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3150 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3151 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3152 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3153 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3154 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3155 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3156 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3157 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3158 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3159 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3160 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3161 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3162 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3163 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3164 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3165 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3166 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3167 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3168 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3169 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3170 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
3171 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
3172 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
3173 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
3174 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
3175                    S_IRUGO | S_IWUSR);
3176 module_param_named(write_same_length, scsi_debug_write_same_length, int,
3177                    S_IRUGO | S_IWUSR);
3178
3179 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
3180 MODULE_DESCRIPTION("SCSI debug adapter driver");
3181 MODULE_LICENSE("GPL");
3182 MODULE_VERSION(SCSI_DEBUG_VERSION);
3183
3184 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
3185 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
3186 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
3187 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
3188 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
3189 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
3190 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
3191 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
3192 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
3193 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
3194 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
3195 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
3196 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
3197 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
3198 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
3199 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
3200 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
3201 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
3202 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
3203 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
3204 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
3205 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
3206 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
3207 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
3208 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
3209 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
3210 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
3211 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3212 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3213 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
3214 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
3215 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3216 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
3217 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
3218 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
3219 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
3220 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
3221 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
3222
3223 static char sdebug_info[256];
3224
3225 static const char * scsi_debug_info(struct Scsi_Host * shp)
3226 {
3227         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
3228                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
3229                 scsi_debug_version_date, scsi_debug_dev_size_mb,
3230                 scsi_debug_opts);
3231         return sdebug_info;
3232 }
3233
3234 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
3235 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
3236 {
3237         char arr[16];
3238         int opts;
3239         int minLen = length > 15 ? 15 : length;
3240
3241         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3242                 return -EACCES;
3243         memcpy(arr, buffer, minLen);
3244         arr[minLen] = '\0';
3245         if (1 != sscanf(arr, "%d", &opts))
3246                 return -EINVAL;
3247         scsi_debug_opts = opts;
3248         if (scsi_debug_every_nth != 0)
3249                 atomic_set(&sdebug_cmnd_count, 0);
3250         return length;
3251 }
3252
3253 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
3254  * same for each scsi_debug host (if more than one). Some of the counters
3255  * output are not atomics so might be inaccurate in a busy system. */
3256 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
3257 {
3258         int f, l;
3259         char b[32];
3260
3261         if (scsi_debug_every_nth > 0)
3262                 snprintf(b, sizeof(b), " (curr:%d)",
3263                          ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
3264                                 atomic_read(&sdebug_a_tsf) :
3265                                 atomic_read(&sdebug_cmnd_count)));
3266         else
3267                 b[0] = '\0';
3268
3269         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
3270                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
3271                 "every_nth=%d%s\n"
3272                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
3273                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
3274                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
3275                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
3276                 "usec_in_jiffy=%lu\n",
3277                 SCSI_DEBUG_VERSION, scsi_debug_version_date,
3278                 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
3279                 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
3280                 scsi_debug_max_luns, atomic_read(&sdebug_completions),
3281                 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
3282                 sdebug_sectors_per, num_aborts, num_dev_resets,
3283                 num_target_resets, num_bus_resets, num_host_resets,
3284                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
3285
3286         f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
3287         if (f != scsi_debug_max_queue) {
3288                 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
3289                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
3290                            "queued_in_use_bm", f, l);
3291         }
3292         return 0;
3293 }
3294
3295 static ssize_t delay_show(struct device_driver *ddp, char *buf)
3296 {
3297         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
3298 }
3299 /* Returns -EBUSY if delay is being changed and commands are queued */
3300 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
3301                            size_t count)
3302 {
3303         int delay, res;
3304
3305         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
3306                 res = count;
3307                 if (scsi_debug_delay != delay) {
3308                         unsigned long iflags;
3309                         int k;
3310
3311                         spin_lock_irqsave(&queued_arr_lock, iflags);
3312                         k = find_first_bit(queued_in_use_bm,
3313                                            scsi_debug_max_queue);
3314                         if (k != scsi_debug_max_queue)
3315                                 res = -EBUSY;   /* have queued commands */
3316                         else {
3317                                 scsi_debug_delay = delay;
3318                                 scsi_debug_ndelay = 0;
3319                         }
3320                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3321                 }
3322                 return res;
3323         }
3324         return -EINVAL;
3325 }
3326 static DRIVER_ATTR_RW(delay);
3327
3328 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
3329 {
3330         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
3331 }
3332 /* Returns -EBUSY if ndelay is being changed and commands are queued */
3333 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
3334 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
3335                            size_t count)
3336 {
3337         unsigned long iflags;
3338         int ndelay, res, k;
3339
3340         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
3341             (ndelay >= 0) && (ndelay < 1000000000)) {
3342                 res = count;
3343                 if (scsi_debug_ndelay != ndelay) {
3344                         spin_lock_irqsave(&queued_arr_lock, iflags);
3345                         k = find_first_bit(queued_in_use_bm,
3346                                            scsi_debug_max_queue);
3347                         if (k != scsi_debug_max_queue)
3348                                 res = -EBUSY;   /* have queued commands */
3349                         else {
3350                                 scsi_debug_ndelay = ndelay;
3351                                 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
3352                                                           : DEF_DELAY;
3353                         }
3354                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3355                 }
3356                 return res;
3357         }
3358         return -EINVAL;
3359 }
3360 static DRIVER_ATTR_RW(ndelay);
3361
3362 static ssize_t opts_show(struct device_driver *ddp, char *buf)
3363 {
3364         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
3365 }
3366
3367 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
3368                           size_t count)
3369 {
3370         int opts;
3371         char work[20];
3372
3373         if (1 == sscanf(buf, "%10s", work)) {
3374                 if (0 == strnicmp(work,"0x", 2)) {
3375                         if (1 == sscanf(&work[2], "%x", &opts))
3376                                 goto opts_done;
3377                 } else {
3378                         if (1 == sscanf(work, "%d", &opts))
3379                                 goto opts_done;
3380                 }
3381         }
3382         return -EINVAL;
3383 opts_done:
3384         scsi_debug_opts = opts;
3385         atomic_set(&sdebug_cmnd_count, 0);
3386         atomic_set(&sdebug_a_tsf, 0);
3387         return count;
3388 }
3389 static DRIVER_ATTR_RW(opts);
3390
3391 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
3392 {
3393         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
3394 }
3395 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
3396                            size_t count)
3397 {
3398         int n;
3399
3400         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3401                 scsi_debug_ptype = n;
3402                 return count;
3403         }
3404         return -EINVAL;
3405 }
3406 static DRIVER_ATTR_RW(ptype);
3407
3408 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
3409 {
3410         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
3411 }
3412 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
3413                             size_t count)
3414 {
3415         int n;
3416
3417         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3418                 scsi_debug_dsense = n;
3419                 return count;
3420         }
3421         return -EINVAL;
3422 }
3423 static DRIVER_ATTR_RW(dsense);
3424
3425 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
3426 {
3427         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
3428 }
3429 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
3430                              size_t count)
3431 {
3432         int n;
3433
3434         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3435                 n = (n > 0);
3436                 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
3437                 if (scsi_debug_fake_rw != n) {
3438                         if ((0 == n) && (NULL == fake_storep)) {
3439                                 unsigned long sz =
3440                                         (unsigned long)scsi_debug_dev_size_mb *
3441                                         1048576;
3442
3443                                 fake_storep = vmalloc(sz);
3444                                 if (NULL == fake_storep) {
3445                                         pr_err("%s: out of memory, 9\n",
3446                                                __func__);
3447                                         return -ENOMEM;
3448                                 }
3449                                 memset(fake_storep, 0, sz);
3450                         }
3451                         scsi_debug_fake_rw = n;
3452                 }
3453                 return count;
3454         }
3455         return -EINVAL;
3456 }
3457 static DRIVER_ATTR_RW(fake_rw);
3458
3459 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
3460 {
3461         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
3462 }
3463 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
3464                               size_t count)
3465 {
3466         int n;
3467
3468         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3469                 scsi_debug_no_lun_0 = n;
3470                 return count;
3471         }
3472         return -EINVAL;
3473 }
3474 static DRIVER_ATTR_RW(no_lun_0);
3475
3476 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
3477 {
3478         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
3479 }
3480 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
3481                               size_t count)
3482 {
3483         int n;
3484
3485         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3486                 scsi_debug_num_tgts = n;
3487                 sdebug_max_tgts_luns();
3488                 return count;
3489         }
3490         return -EINVAL;
3491 }
3492 static DRIVER_ATTR_RW(num_tgts);
3493
3494 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
3495 {
3496         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3497 }
3498 static DRIVER_ATTR_RO(dev_size_mb);
3499
3500 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
3501 {
3502         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3503 }
3504 static DRIVER_ATTR_RO(num_parts);
3505
3506 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
3507 {
3508         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3509 }
3510 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
3511                                size_t count)
3512 {
3513         int nth;
3514
3515         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3516                 scsi_debug_every_nth = nth;
3517                 atomic_set(&sdebug_cmnd_count, 0);
3518                 return count;
3519         }
3520         return -EINVAL;
3521 }
3522 static DRIVER_ATTR_RW(every_nth);
3523
3524 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
3525 {
3526         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3527 }
3528 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
3529                               size_t count)
3530 {
3531         int n;
3532
3533         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3534                 scsi_debug_max_luns = n;
3535                 sdebug_max_tgts_luns();
3536                 return count;
3537         }
3538         return -EINVAL;
3539 }
3540 static DRIVER_ATTR_RW(max_luns);
3541
3542 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
3543 {
3544         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3545 }
3546 /* N.B. max_queue can be changed while there are queued commands. In flight
3547  * commands beyond the new max_queue will be completed. */
3548 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
3549                                size_t count)
3550 {
3551         unsigned long iflags;
3552         int n, k;
3553
3554         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3555             (n <= SCSI_DEBUG_CANQUEUE)) {
3556                 spin_lock_irqsave(&queued_arr_lock, iflags);
3557                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
3558                 scsi_debug_max_queue = n;
3559                 if (SCSI_DEBUG_CANQUEUE == k)
3560                         atomic_set(&retired_max_queue, 0);
3561                 else if (k >= n)
3562                         atomic_set(&retired_max_queue, k + 1);
3563                 else
3564                         atomic_set(&retired_max_queue, 0);
3565                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3566                 return count;
3567         }
3568         return -EINVAL;
3569 }
3570 static DRIVER_ATTR_RW(max_queue);
3571
3572 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
3573 {
3574         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3575 }
3576 static DRIVER_ATTR_RO(no_uld);
3577
3578 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
3579 {
3580         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3581 }
3582 static DRIVER_ATTR_RO(scsi_level);
3583
3584 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
3585 {
3586         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3587 }
3588 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
3589                                 size_t count)
3590 {
3591         int n;
3592
3593         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3594                 scsi_debug_virtual_gb = n;
3595
3596                 sdebug_capacity = get_sdebug_capacity();
3597
3598                 return count;
3599         }
3600         return -EINVAL;
3601 }
3602 static DRIVER_ATTR_RW(virtual_gb);
3603
3604 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
3605 {
3606         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3607 }
3608
3609 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
3610                               size_t count)
3611 {
3612         int delta_hosts;
3613
3614         if (sscanf(buf, "%d", &delta_hosts) != 1)
3615                 return -EINVAL;
3616         if (delta_hosts > 0) {
3617                 do {
3618                         sdebug_add_adapter();
3619                 } while (--delta_hosts);
3620         } else if (delta_hosts < 0) {
3621                 do {
3622                         sdebug_remove_adapter();
3623                 } while (++delta_hosts);
3624         }
3625         return count;
3626 }
3627 static DRIVER_ATTR_RW(add_host);
3628
3629 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
3630 {
3631         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3632 }
3633 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
3634                                     size_t count)
3635 {
3636         int n;
3637
3638         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3639                 scsi_debug_vpd_use_hostno = n;
3640                 return count;
3641         }
3642         return -EINVAL;
3643 }
3644 static DRIVER_ATTR_RW(vpd_use_hostno);
3645
3646 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
3647 {
3648         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3649 }
3650 static DRIVER_ATTR_RO(sector_size);
3651
3652 static ssize_t dix_show(struct device_driver *ddp, char *buf)
3653 {
3654         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3655 }
3656 static DRIVER_ATTR_RO(dix);
3657
3658 static ssize_t dif_show(struct device_driver *ddp, char *buf)
3659 {
3660         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3661 }
3662 static DRIVER_ATTR_RO(dif);
3663
3664 static ssize_t guard_show(struct device_driver *ddp, char *buf)
3665 {
3666         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
3667 }
3668 static DRIVER_ATTR_RO(guard);
3669
3670 static ssize_t ato_show(struct device_driver *ddp, char *buf)
3671 {
3672         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3673 }
3674 static DRIVER_ATTR_RO(ato);
3675
3676 static ssize_t map_show(struct device_driver *ddp, char *buf)
3677 {
3678         ssize_t count;
3679
3680         if (!scsi_debug_lbp())
3681                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3682                                  sdebug_store_sectors);
3683
3684         count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3685
3686         buf[count++] = '\n';
3687         buf[count++] = 0;
3688
3689         return count;
3690 }
3691 static DRIVER_ATTR_RO(map);
3692
3693 static ssize_t removable_show(struct device_driver *ddp, char *buf)
3694 {
3695         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3696 }
3697 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
3698                                size_t count)
3699 {
3700         int n;
3701
3702         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3703                 scsi_debug_removable = (n > 0);
3704                 return count;
3705         }
3706         return -EINVAL;
3707 }
3708 static DRIVER_ATTR_RW(removable);
3709
3710 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
3711 {
3712         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
3713 }
3714 /* Returns -EBUSY if host_lock is being changed and commands are queued */
3715 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
3716                                size_t count)
3717 {
3718         int n, res;
3719
3720         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3721                 bool new_host_lock = (n > 0);
3722
3723                 res = count;
3724                 if (new_host_lock != scsi_debug_host_lock) {
3725                         unsigned long iflags;
3726                         int k;
3727
3728                         spin_lock_irqsave(&queued_arr_lock, iflags);
3729                         k = find_first_bit(queued_in_use_bm,
3730                                            scsi_debug_max_queue);
3731                         if (k != scsi_debug_max_queue)
3732                                 res = -EBUSY;   /* have queued commands */
3733                         else
3734                                 scsi_debug_host_lock = new_host_lock;
3735                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3736                 }
3737                 return res;
3738         }
3739         return -EINVAL;
3740 }
3741 static DRIVER_ATTR_RW(host_lock);
3742
3743
3744 /* Note: The following array creates attribute files in the
3745    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3746    files (over those found in the /sys/module/scsi_debug/parameters
3747    directory) is that auxiliary actions can be triggered when an attribute
3748    is changed. For example see: sdebug_add_host_store() above.
3749  */
3750
3751 static struct attribute *sdebug_drv_attrs[] = {
3752         &driver_attr_delay.attr,
3753         &driver_attr_opts.attr,
3754         &driver_attr_ptype.attr,
3755         &driver_attr_dsense.attr,
3756         &driver_attr_fake_rw.attr,
3757         &driver_attr_no_lun_0.attr,
3758         &driver_attr_num_tgts.attr,
3759         &driver_attr_dev_size_mb.attr,
3760         &driver_attr_num_parts.attr,
3761         &driver_attr_every_nth.attr,
3762         &driver_attr_max_luns.attr,
3763         &driver_attr_max_queue.attr,
3764         &driver_attr_no_uld.attr,
3765         &driver_attr_scsi_level.attr,
3766         &driver_attr_virtual_gb.attr,
3767         &driver_attr_add_host.attr,
3768         &driver_attr_vpd_use_hostno.attr,
3769         &driver_attr_sector_size.attr,
3770         &driver_attr_dix.attr,
3771         &driver_attr_dif.attr,
3772         &driver_attr_guard.attr,
3773         &driver_attr_ato.attr,
3774         &driver_attr_map.attr,
3775         &driver_attr_removable.attr,
3776         &driver_attr_host_lock.attr,
3777         &driver_attr_ndelay.attr,
3778         NULL,
3779 };
3780 ATTRIBUTE_GROUPS(sdebug_drv);
3781
3782 static struct device *pseudo_primary;
3783
3784 static int __init scsi_debug_init(void)
3785 {
3786         unsigned long sz;
3787         int host_to_add;
3788         int k;
3789         int ret;
3790
3791         atomic_set(&sdebug_cmnd_count, 0);
3792         atomic_set(&sdebug_completions, 0);
3793         atomic_set(&retired_max_queue, 0);
3794
3795         if (scsi_debug_ndelay >= 1000000000) {
3796                 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
3797                         __func__);
3798                 scsi_debug_ndelay = 0;
3799         } else if (scsi_debug_ndelay > 0)
3800                 scsi_debug_delay = DELAY_OVERRIDDEN;
3801
3802         switch (scsi_debug_sector_size) {
3803         case  512:
3804         case 1024:
3805         case 2048:
3806         case 4096:
3807                 break;
3808         default:
3809                 pr_err("%s: invalid sector_size %d\n", __func__,
3810                        scsi_debug_sector_size);
3811                 return -EINVAL;
3812         }
3813
3814         switch (scsi_debug_dif) {
3815
3816         case SD_DIF_TYPE0_PROTECTION:
3817         case SD_DIF_TYPE1_PROTECTION:
3818         case SD_DIF_TYPE2_PROTECTION:
3819         case SD_DIF_TYPE3_PROTECTION:
3820                 break;
3821
3822         default:
3823                 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
3824                 return -EINVAL;
3825         }
3826
3827         if (scsi_debug_guard > 1) {
3828                 pr_err("%s: guard must be 0 or 1\n", __func__);
3829                 return -EINVAL;
3830         }
3831
3832         if (scsi_debug_ato > 1) {
3833                 pr_err("%s: ato must be 0 or 1\n", __func__);
3834                 return -EINVAL;
3835         }
3836
3837         if (scsi_debug_physblk_exp > 15) {
3838                 pr_err("%s: invalid physblk_exp %u\n", __func__,
3839                        scsi_debug_physblk_exp);
3840                 return -EINVAL;
3841         }
3842
3843         if (scsi_debug_lowest_aligned > 0x3fff) {
3844                 pr_err("%s: lowest_aligned too big: %u\n", __func__,
3845                        scsi_debug_lowest_aligned);
3846                 return -EINVAL;
3847         }
3848
3849         if (scsi_debug_dev_size_mb < 1)
3850                 scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
3851         sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3852         sdebug_store_sectors = sz / scsi_debug_sector_size;
3853         sdebug_capacity = get_sdebug_capacity();
3854
3855         /* play around with geometry, don't waste too much on track 0 */
3856         sdebug_heads = 8;
3857         sdebug_sectors_per = 32;
3858         if (scsi_debug_dev_size_mb >= 16)
3859                 sdebug_heads = 32;
3860         else if (scsi_debug_dev_size_mb >= 256)
3861                 sdebug_heads = 64;
3862         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3863                                (sdebug_sectors_per * sdebug_heads);
3864         if (sdebug_cylinders_per >= 1024) {
3865                 /* other LLDs do this; implies >= 1GB ram disk ... */
3866                 sdebug_heads = 255;
3867                 sdebug_sectors_per = 63;
3868                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3869                                (sdebug_sectors_per * sdebug_heads);
3870         }
3871
3872         if (0 == scsi_debug_fake_rw) {
3873                 fake_storep = vmalloc(sz);
3874                 if (NULL == fake_storep) {
3875                         pr_err("%s: out of memory, 1\n", __func__);
3876                         return -ENOMEM;
3877                 }
3878                 memset(fake_storep, 0, sz);
3879                 if (scsi_debug_num_parts > 0)
3880                         sdebug_build_parts(fake_storep, sz);
3881         }
3882
3883         if (scsi_debug_dix) {
3884                 int dif_size;
3885
3886                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3887                 dif_storep = vmalloc(dif_size);
3888
3889                 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
3890                         dif_storep);
3891
3892                 if (dif_storep == NULL) {
3893                         pr_err("%s: out of mem. (DIX)\n", __func__);
3894                         ret = -ENOMEM;
3895                         goto free_vm;
3896                 }
3897
3898                 memset(dif_storep, 0xff, dif_size);
3899         }
3900
3901         /* Logical Block Provisioning */
3902         if (scsi_debug_lbp()) {
3903                 scsi_debug_unmap_max_blocks =
3904                         clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3905
3906                 scsi_debug_unmap_max_desc =
3907                         clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3908
3909                 scsi_debug_unmap_granularity =
3910                         clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3911
3912                 if (scsi_debug_unmap_alignment &&
3913                     scsi_debug_unmap_granularity <=
3914                     scsi_debug_unmap_alignment) {
3915                         pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
3916                                __func__);
3917                         return -EINVAL;
3918                 }
3919
3920                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3921                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3922
3923                 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
3924
3925                 if (map_storep == NULL) {
3926                         pr_err("%s: out of mem. (MAP)\n", __func__);
3927                         ret = -ENOMEM;
3928                         goto free_vm;
3929                 }
3930
3931                 bitmap_zero(map_storep, map_size);
3932
3933                 /* Map first 1KB for partition table */
3934                 if (scsi_debug_num_parts)
3935                         map_region(0, 2);
3936         }
3937
3938         pseudo_primary = root_device_register("pseudo_0");
3939         if (IS_ERR(pseudo_primary)) {
3940                 pr_warn("%s: root_device_register() error\n", __func__);
3941                 ret = PTR_ERR(pseudo_primary);
3942                 goto free_vm;
3943         }
3944         ret = bus_register(&pseudo_lld_bus);
3945         if (ret < 0) {
3946                 pr_warn("%s: bus_register error: %d\n", __func__, ret);
3947                 goto dev_unreg;
3948         }
3949         ret = driver_register(&sdebug_driverfs_driver);
3950         if (ret < 0) {
3951                 pr_warn("%s: driver_register error: %d\n", __func__, ret);
3952                 goto bus_unreg;
3953         }
3954
3955         host_to_add = scsi_debug_add_host;
3956         scsi_debug_add_host = 0;
3957
3958         for (k = 0; k < host_to_add; k++) {
3959                 if (sdebug_add_adapter()) {
3960                         pr_err("%s: sdebug_add_adapter failed k=%d\n",
3961                                 __func__, k);
3962                         break;
3963                 }
3964         }
3965
3966         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3967                 pr_info("%s: built %d host(s)\n", __func__,
3968                         scsi_debug_add_host);
3969         }
3970         return 0;
3971
3972 bus_unreg:
3973         bus_unregister(&pseudo_lld_bus);
3974 dev_unreg:
3975         root_device_unregister(pseudo_primary);
3976 free_vm:
3977         if (map_storep)
3978                 vfree(map_storep);
3979         if (dif_storep)
3980                 vfree(dif_storep);
3981         vfree(fake_storep);
3982
3983         return ret;
3984 }
3985
3986 static void __exit scsi_debug_exit(void)
3987 {
3988         int k = scsi_debug_add_host;
3989
3990         stop_all_queued();
3991         free_all_queued();
3992         for (; k; k--)
3993                 sdebug_remove_adapter();
3994         driver_unregister(&sdebug_driverfs_driver);
3995         bus_unregister(&pseudo_lld_bus);
3996         root_device_unregister(pseudo_primary);
3997
3998         if (dif_storep)
3999                 vfree(dif_storep);
4000
4001         vfree(fake_storep);
4002 }
4003
4004 device_initcall(scsi_debug_init);
4005 module_exit(scsi_debug_exit);
4006
4007 static void sdebug_release_adapter(struct device * dev)
4008 {
4009         struct sdebug_host_info *sdbg_host;
4010
4011         sdbg_host = to_sdebug_host(dev);
4012         kfree(sdbg_host);
4013 }
4014
4015 static int sdebug_add_adapter(void)
4016 {
4017         int k, devs_per_host;
4018         int error = 0;
4019         struct sdebug_host_info *sdbg_host;
4020         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4021
4022         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4023         if (NULL == sdbg_host) {
4024                 printk(KERN_ERR "%s: out of memory at line %d\n",
4025                        __func__, __LINE__);
4026                 return -ENOMEM;
4027         }
4028
4029         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4030
4031         devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4032         for (k = 0; k < devs_per_host; k++) {
4033                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4034                 if (!sdbg_devinfo) {
4035                         printk(KERN_ERR "%s: out of memory at line %d\n",
4036                                __func__, __LINE__);
4037                         error = -ENOMEM;
4038                         goto clean;
4039                 }
4040         }
4041
4042         spin_lock(&sdebug_host_list_lock);
4043         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4044         spin_unlock(&sdebug_host_list_lock);
4045
4046         sdbg_host->dev.bus = &pseudo_lld_bus;
4047         sdbg_host->dev.parent = pseudo_primary;
4048         sdbg_host->dev.release = &sdebug_release_adapter;
4049         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4050
4051         error = device_register(&sdbg_host->dev);
4052
4053         if (error)
4054                 goto clean;
4055
4056         ++scsi_debug_add_host;
4057         return error;
4058
4059 clean:
4060         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4061                                  dev_list) {
4062                 list_del(&sdbg_devinfo->dev_list);
4063                 kfree(sdbg_devinfo);
4064         }
4065
4066         kfree(sdbg_host);
4067         return error;
4068 }
4069
4070 static void sdebug_remove_adapter(void)
4071 {
4072         struct sdebug_host_info * sdbg_host = NULL;
4073
4074         spin_lock(&sdebug_host_list_lock);
4075         if (!list_empty(&sdebug_host_list)) {
4076                 sdbg_host = list_entry(sdebug_host_list.prev,
4077                                        struct sdebug_host_info, host_list);
4078                 list_del(&sdbg_host->host_list);
4079         }
4080         spin_unlock(&sdebug_host_list_lock);
4081
4082         if (!sdbg_host)
4083                 return;
4084
4085         device_unregister(&sdbg_host->dev);
4086         --scsi_debug_add_host;
4087 }
4088
4089 static int
4090 scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
4091 {
4092         unsigned char *cmd = SCpnt->cmnd;
4093         int len, k;
4094         unsigned int num;
4095         unsigned long long lba;
4096         u32 ei_lba;
4097         int errsts = 0;
4098         int target = SCpnt->device->id;
4099         struct sdebug_dev_info *devip = NULL;
4100         int inj_recovered = 0;
4101         int inj_transport = 0;
4102         int inj_dif = 0;
4103         int inj_dix = 0;
4104         int inj_short = 0;
4105         int delay_override = 0;
4106         int unmap = 0;
4107
4108         scsi_set_resid(SCpnt, 0);
4109         if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
4110             !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
4111                 char b[120];
4112                 int n;
4113
4114                 len = SCpnt->cmd_len;
4115                 if (len > 32)
4116                         strcpy(b, "too long, over 32 bytes");
4117                 else {
4118                         for (k = 0, n = 0; k < len; ++k)
4119                                 n += scnprintf(b + n, sizeof(b) - n, "%02x ",
4120                                                (unsigned int)cmd[k]);
4121                 }
4122                 sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name,
4123                             b);
4124         }
4125
4126         if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
4127             (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
4128                 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4129         devip = devInfoReg(SCpnt->device);
4130         if (NULL == devip)
4131                 return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
4132
4133         if ((scsi_debug_every_nth != 0) &&
4134             (atomic_inc_return(&sdebug_cmnd_count) >=
4135              abs(scsi_debug_every_nth))) {
4136                 atomic_set(&sdebug_cmnd_count, 0);
4137                 if (scsi_debug_every_nth < -1)
4138                         scsi_debug_every_nth = -1;
4139                 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
4140                         return 0; /* ignore command causing timeout */
4141                 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
4142                          scsi_medium_access_command(SCpnt))
4143                         return 0; /* time out reads and writes */
4144                 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
4145                         inj_recovered = 1; /* to reads and writes below */
4146                 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
4147                         inj_transport = 1; /* to reads and writes below */
4148                 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
4149                         inj_dif = 1; /* to reads and writes below */
4150                 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
4151                         inj_dix = 1; /* to reads and writes below */
4152                 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
4153                         inj_short = 1;
4154         }
4155
4156         if (devip->wlun) {
4157                 switch (*cmd) {
4158                 case INQUIRY:
4159                 case REQUEST_SENSE:
4160                 case TEST_UNIT_READY:
4161                 case REPORT_LUNS:
4162                         break;  /* only allowable wlun commands */
4163                 default:
4164                         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4165                                 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
4166                                        "not supported for wlun\n", *cmd);
4167                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4168                                         INVALID_OPCODE, 0);
4169                         errsts = check_condition_result;
4170                         return schedule_resp(SCpnt, devip, errsts, 0);
4171                 }
4172         }
4173
4174         switch (*cmd) {
4175         case INQUIRY:     /* mandatory, ignore unit attention */
4176                 delay_override = 1;
4177                 errsts = resp_inquiry(SCpnt, target, devip);
4178                 break;
4179         case REQUEST_SENSE:     /* mandatory, ignore unit attention */
4180                 delay_override = 1;
4181                 errsts = resp_requests(SCpnt, devip);
4182                 break;
4183         case REZERO_UNIT:       /* actually this is REWIND for SSC */
4184         case START_STOP:
4185                 errsts = resp_start_stop(SCpnt, devip);
4186                 break;
4187         case ALLOW_MEDIUM_REMOVAL:
4188                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4189                 if (errsts)
4190                         break;
4191                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4192                         printk(KERN_INFO "scsi_debug: Medium removal %s\n",
4193                                cmd[4] ? "inhibited" : "enabled");
4194                 break;
4195         case SEND_DIAGNOSTIC:     /* mandatory */
4196                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4197                 break;
4198         case TEST_UNIT_READY:     /* mandatory */
4199                 /* delay_override = 1; */
4200                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4201                 break;
4202         case RESERVE:
4203                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4204                 break;
4205         case RESERVE_10:
4206                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4207                 break;
4208         case RELEASE:
4209                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4210                 break;
4211         case RELEASE_10:
4212                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4213                 break;
4214         case READ_CAPACITY:
4215                 errsts = resp_readcap(SCpnt, devip);
4216                 break;
4217         case SERVICE_ACTION_IN:
4218                 if (cmd[1] == SAI_READ_CAPACITY_16)
4219                         errsts = resp_readcap16(SCpnt, devip);
4220                 else if (cmd[1] == SAI_GET_LBA_STATUS) {
4221
4222                         if (scsi_debug_lbp() == 0) {
4223                                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4224                                                 INVALID_COMMAND_OPCODE, 0);
4225                                 errsts = check_condition_result;
4226                         } else
4227                                 errsts = resp_get_lba_status(SCpnt, devip);
4228                 } else {
4229                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4230                                         INVALID_OPCODE, 0);
4231                         errsts = check_condition_result;
4232                 }
4233                 break;
4234         case MAINTENANCE_IN:
4235                 if (MI_REPORT_TARGET_PGS != cmd[1]) {
4236                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4237                                         INVALID_OPCODE, 0);
4238                         errsts = check_condition_result;
4239                         break;
4240                 }
4241                 errsts = resp_report_tgtpgs(SCpnt, devip);
4242                 break;
4243         case READ_16:
4244         case READ_12:
4245         case READ_10:
4246                 /* READ{10,12,16} and DIF Type 2 are natural enemies */
4247                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4248                     cmd[1] & 0xe0) {
4249                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4250                                         INVALID_COMMAND_OPCODE, 0);
4251                         errsts = check_condition_result;
4252                         break;
4253                 }
4254
4255                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4256                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4257                     (cmd[1] & 0xe0) == 0)
4258                         printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4259
4260                 /* fall through */
4261         case READ_6:
4262 read:
4263                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4264                 if (errsts)
4265                         break;
4266                 if (scsi_debug_fake_rw)
4267                         break;
4268                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4269
4270                 if (inj_short)
4271                         num /= 2;
4272
4273                 errsts = resp_read(SCpnt, lba, num, ei_lba);
4274                 if (inj_recovered && (0 == errsts)) {
4275                         mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4276                                         THRESHOLD_EXCEEDED, 0);
4277                         errsts = check_condition_result;
4278                 } else if (inj_transport && (0 == errsts)) {
4279                         mk_sense_buffer(SCpnt, ABORTED_COMMAND,
4280                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
4281                         errsts = check_condition_result;
4282                 } else if (inj_dif && (0 == errsts)) {
4283                         /* Logical block guard check failed */
4284                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4285                         errsts = illegal_condition_result;
4286                 } else if (inj_dix && (0 == errsts)) {
4287                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4288                         errsts = illegal_condition_result;
4289                 }
4290                 break;
4291         case REPORT_LUNS:       /* mandatory, ignore unit attention */
4292                 delay_override = 1;
4293                 errsts = resp_report_luns(SCpnt, devip);
4294                 break;
4295         case VERIFY:            /* 10 byte SBC-2 command */
4296                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4297                 break;
4298         case WRITE_16:
4299         case WRITE_12:
4300         case WRITE_10:
4301                 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
4302                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
4303                     cmd[1] & 0xe0) {
4304                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4305                                         INVALID_COMMAND_OPCODE, 0);
4306                         errsts = check_condition_result;
4307                         break;
4308                 }
4309
4310                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
4311                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
4312                     (cmd[1] & 0xe0) == 0)
4313                         printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
4314
4315                 /* fall through */
4316         case WRITE_6:
4317 write:
4318                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4319                 if (errsts)
4320                         break;
4321                 if (scsi_debug_fake_rw)
4322                         break;
4323                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4324                 errsts = resp_write(SCpnt, lba, num, ei_lba);
4325                 if (inj_recovered && (0 == errsts)) {
4326                         mk_sense_buffer(SCpnt, RECOVERED_ERROR,
4327                                         THRESHOLD_EXCEEDED, 0);
4328                         errsts = check_condition_result;
4329                 } else if (inj_dif && (0 == errsts)) {
4330                         mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
4331                         errsts = illegal_condition_result;
4332                 } else if (inj_dix && (0 == errsts)) {
4333                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
4334                         errsts = illegal_condition_result;
4335                 }
4336                 break;
4337         case WRITE_SAME_16:
4338         case WRITE_SAME:
4339                 if (cmd[1] & 0x8) {
4340                         if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
4341                             (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
4342                                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4343                                                 INVALID_FIELD_IN_CDB, 0);
4344                                 errsts = check_condition_result;
4345                         } else
4346                                 unmap = 1;
4347                 }
4348                 if (errsts)
4349                         break;
4350                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4351                 if (errsts)
4352                         break;
4353                 if (scsi_debug_fake_rw)
4354                         break;
4355                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4356                 errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap);
4357                 break;
4358         case UNMAP:
4359                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4360                 if (errsts)
4361                         break;
4362                 if (scsi_debug_fake_rw)
4363                         break;
4364
4365                 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
4366                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4367                                         INVALID_COMMAND_OPCODE, 0);
4368                         errsts = check_condition_result;
4369                 } else
4370                         errsts = resp_unmap(SCpnt, devip);
4371                 break;
4372         case MODE_SENSE:
4373         case MODE_SENSE_10:
4374                 errsts = resp_mode_sense(SCpnt, target, devip);
4375                 break;
4376         case MODE_SELECT:
4377                 errsts = resp_mode_select(SCpnt, 1, devip);
4378                 break;
4379         case MODE_SELECT_10:
4380                 errsts = resp_mode_select(SCpnt, 0, devip);
4381                 break;
4382         case LOG_SENSE:
4383                 errsts = resp_log_sense(SCpnt, devip);
4384                 break;
4385         case SYNCHRONIZE_CACHE:
4386                 delay_override = 1;
4387                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4388                 break;
4389         case WRITE_BUFFER:
4390                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4391                 break;
4392         case XDWRITEREAD_10:
4393                 if (!scsi_bidi_cmnd(SCpnt)) {
4394                         mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4395                                         INVALID_FIELD_IN_CDB, 0);
4396                         errsts = check_condition_result;
4397                         break;
4398                 }
4399
4400                 errsts = check_readiness(SCpnt, UAS_TUR, devip);
4401                 if (errsts)
4402                         break;
4403                 if (scsi_debug_fake_rw)
4404                         break;
4405                 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
4406                 errsts = resp_read(SCpnt, lba, num, ei_lba);
4407                 if (errsts)
4408                         break;
4409                 errsts = resp_write(SCpnt, lba, num, ei_lba);
4410                 if (errsts)
4411                         break;
4412                 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
4413                 break;
4414         case VARIABLE_LENGTH_CMD:
4415                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
4416
4417                         if ((cmd[10] & 0xe0) == 0)
4418                                 printk(KERN_ERR
4419                                        "Unprotected RD/WR to DIF device\n");
4420
4421                         if (cmd[9] == READ_32) {
4422                                 BUG_ON(SCpnt->cmd_len < 32);
4423                                 goto read;
4424                         }
4425
4426                         if (cmd[9] == WRITE_32) {
4427                                 BUG_ON(SCpnt->cmd_len < 32);
4428                                 goto write;
4429                         }
4430                 }
4431
4432                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4433                                 INVALID_FIELD_IN_CDB, 0);
4434                 errsts = check_condition_result;
4435                 break;
4436         case 0x85:
4437                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4438                         sdev_printk(KERN_INFO, SCpnt->device,
4439                         "%s: ATA PASS-THROUGH(16) not supported\n", my_name);
4440                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
4441                                 INVALID_OPCODE, 0);
4442                 errsts = check_condition_result;
4443                 break;
4444         default:
4445                 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
4446                         sdev_printk(KERN_INFO, SCpnt->device,
4447                                     "%s: Opcode: 0x%x not supported\n",
4448                                     my_name, *cmd);
4449                 errsts = check_readiness(SCpnt, UAS_ONLY, devip);
4450                 if (errsts)
4451                         break;  /* Unit attention takes precedence */
4452                 mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
4453                 errsts = check_condition_result;
4454                 break;
4455         }
4456         return schedule_resp(SCpnt, devip, errsts,
4457                              (delay_override ? 0 : scsi_debug_delay));
4458 }
4459
4460 static int
4461 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4462 {
4463         if (scsi_debug_host_lock) {
4464                 unsigned long iflags;
4465                 int rc;
4466
4467                 spin_lock_irqsave(shost->host_lock, iflags);
4468                 rc = scsi_debug_queuecommand(cmd);
4469                 spin_unlock_irqrestore(shost->host_lock, iflags);
4470                 return rc;
4471         } else
4472                 return scsi_debug_queuecommand(cmd);
4473 }
4474
4475 static int
4476 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
4477 {
4478         int num_in_q = 0;
4479         int bad = 0;
4480         unsigned long iflags;
4481         struct sdebug_dev_info *devip;
4482
4483         spin_lock_irqsave(&queued_arr_lock, iflags);
4484         devip = (struct sdebug_dev_info *)sdev->hostdata;
4485         if (NULL == devip) {
4486                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4487                 return  -ENODEV;
4488         }
4489         num_in_q = atomic_read(&devip->num_in_q);
4490         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4491         if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
4492                 if (qdepth < 1)
4493                         qdepth = 1;
4494                 /* allow to exceed max host queued_arr elements for testing */
4495                 if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4496                         qdepth = SCSI_DEBUG_CANQUEUE + 10;
4497                 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4498         } else if (reason == SCSI_QDEPTH_QFULL)
4499                 scsi_track_queue_full(sdev, qdepth);
4500         else
4501                 bad = 1;
4502         if (bad)
4503                 sdev_printk(KERN_WARNING, sdev,
4504                             "%s: unknown reason=0x%x\n", __func__, reason);
4505         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4506                 if (SCSI_QDEPTH_QFULL == reason)
4507                         sdev_printk(KERN_INFO, sdev,
4508                             "%s: -> %d, num_in_q=%d, reason: queue full\n",
4509                                     __func__, qdepth, num_in_q);
4510                 else {
4511                         const char *cp;
4512
4513                         switch (reason) {
4514                         case SCSI_QDEPTH_DEFAULT:
4515                                 cp = "default (sysfs ?)";
4516                                 break;
4517                         case SCSI_QDEPTH_RAMP_UP:
4518                                 cp = "ramp up";
4519                                 break;
4520                         default:
4521                                 cp = "unknown";
4522                                 break;
4523                         }
4524                         sdev_printk(KERN_INFO, sdev,
4525                                     "%s: qdepth=%d, num_in_q=%d, reason: %s\n",
4526                                     __func__, qdepth, num_in_q, cp);
4527                 }
4528         }
4529         return sdev->queue_depth;
4530 }
4531
4532 static int
4533 sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4534 {
4535         if (sdev->tagged_supported) {
4536                 scsi_set_tag_type(sdev, qtype);
4537                 if (qtype)
4538                         scsi_activate_tcq(sdev, sdev->queue_depth);
4539                 else
4540                         scsi_deactivate_tcq(sdev, sdev->queue_depth);
4541         } else
4542                 qtype = 0;
4543         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4544                 const char *cp;
4545
4546                 switch (qtype) {
4547                 case 0:
4548                         cp = "untagged";
4549                         break;
4550                 case MSG_SIMPLE_TAG:
4551                         cp = "simple tags";
4552                         break;
4553                 case MSG_ORDERED_TAG:
4554                         cp = "ordered tags";
4555                         break;
4556                 default:
4557                         cp = "unknown";
4558                         break;
4559                 }
4560                 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
4561         }
4562         return qtype;
4563 }
4564
4565 static struct scsi_host_template sdebug_driver_template = {
4566         .show_info =            scsi_debug_show_info,
4567         .write_info =           scsi_debug_write_info,
4568         .proc_name =            sdebug_proc_name,
4569         .name =                 "SCSI DEBUG",
4570         .info =                 scsi_debug_info,
4571         .slave_alloc =          scsi_debug_slave_alloc,
4572         .slave_configure =      scsi_debug_slave_configure,
4573         .slave_destroy =        scsi_debug_slave_destroy,
4574         .ioctl =                scsi_debug_ioctl,
4575         .queuecommand =         sdebug_queuecommand_lock_or_not,
4576         .change_queue_depth =   sdebug_change_qdepth,
4577         .change_queue_type =    sdebug_change_qtype,
4578         .eh_abort_handler =     scsi_debug_abort,
4579         .eh_device_reset_handler = scsi_debug_device_reset,
4580         .eh_target_reset_handler = scsi_debug_target_reset,
4581         .eh_bus_reset_handler = scsi_debug_bus_reset,
4582         .eh_host_reset_handler = scsi_debug_host_reset,
4583         .can_queue =            SCSI_DEBUG_CANQUEUE,
4584         .this_id =              7,
4585         .sg_tablesize =         SCSI_MAX_SG_CHAIN_SEGMENTS,
4586         .cmd_per_lun =          DEF_CMD_PER_LUN,
4587         .max_sectors =          -1U,
4588         .use_clustering =       DISABLE_CLUSTERING,
4589         .module =               THIS_MODULE,
4590 };
4591
4592 static int sdebug_driver_probe(struct device * dev)
4593 {
4594         int error = 0;
4595         struct sdebug_host_info *sdbg_host;
4596         struct Scsi_Host *hpnt;
4597         int host_prot;
4598
4599         sdbg_host = to_sdebug_host(dev);
4600
4601         sdebug_driver_template.can_queue = scsi_debug_max_queue;
4602         if (scsi_debug_clustering)
4603                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
4604         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
4605         if (NULL == hpnt) {
4606                 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
4607                 error = -ENODEV;
4608                 return error;
4609         }
4610
4611         sdbg_host->shost = hpnt;
4612         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
4613         if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
4614                 hpnt->max_id = scsi_debug_num_tgts + 1;
4615         else
4616                 hpnt->max_id = scsi_debug_num_tgts;
4617         hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;  /* = scsi_debug_max_luns; */
4618
4619         host_prot = 0;
4620
4621         switch (scsi_debug_dif) {
4622
4623         case SD_DIF_TYPE1_PROTECTION:
4624                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4625                 if (scsi_debug_dix)
4626                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4627                 break;
4628
4629         case SD_DIF_TYPE2_PROTECTION:
4630                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4631                 if (scsi_debug_dix)
4632                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4633                 break;
4634
4635         case SD_DIF_TYPE3_PROTECTION:
4636                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4637                 if (scsi_debug_dix)
4638                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4639                 break;
4640
4641         default:
4642                 if (scsi_debug_dix)
4643                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4644                 break;
4645         }
4646
4647         scsi_host_set_prot(hpnt, host_prot);
4648
4649         printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4650                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4651                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4652                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4653                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4654                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4655                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4656                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4657
4658         if (scsi_debug_guard == 1)
4659                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4660         else
4661                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4662
4663         error = scsi_add_host(hpnt, &sdbg_host->dev);
4664         if (error) {
4665                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4666                 error = -ENODEV;
4667                 scsi_host_put(hpnt);
4668         } else
4669                 scsi_scan_host(hpnt);
4670
4671         return error;
4672 }
4673
4674 static int sdebug_driver_remove(struct device * dev)
4675 {
4676         struct sdebug_host_info *sdbg_host;
4677         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4678
4679         sdbg_host = to_sdebug_host(dev);
4680
4681         if (!sdbg_host) {
4682                 printk(KERN_ERR "%s: Unable to locate host info\n",
4683                        __func__);
4684                 return -ENODEV;
4685         }
4686
4687         scsi_remove_host(sdbg_host->shost);
4688
4689         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4690                                  dev_list) {
4691                 list_del(&sdbg_devinfo->dev_list);
4692                 kfree(sdbg_devinfo);
4693         }
4694
4695         scsi_host_put(sdbg_host->shost);
4696         return 0;
4697 }
4698
4699 static int pseudo_lld_bus_match(struct device *dev,
4700                                 struct device_driver *dev_driver)
4701 {
4702         return 1;
4703 }
4704
4705 static struct bus_type pseudo_lld_bus = {
4706         .name = "pseudo",
4707         .match = pseudo_lld_bus_match,
4708         .probe = sdebug_driver_probe,
4709         .remove = sdebug_driver_remove,
4710         .drv_groups = sdebug_drv_groups,
4711 };