Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[sfrench/cifs-2.6.git] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30
31
32 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE         32
34
35 struct scsi_host_sg_pool {
36         size_t          size;
37         char            *name; 
38         kmem_cache_t    *slab;
39         mempool_t       *pool;
40 };
41
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45
46 #define SP(x) { x, "sgpool-" #x } 
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48         SP(8),
49         SP(16),
50         SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52         SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54         SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56         SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };      
64 #undef SP
65
66
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq    - request that is ready to be queued.
73  *              at_head - boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89         /*
90          * Because users of this function are apt to reuse requests with no
91          * modification, we have to sanitise the request flags here
92          */
93         sreq->sr_request->flags &= ~REQ_DONTPREP;
94         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95                            at_head, sreq);
96         return 0;
97 }
98
99 static void scsi_run_queue(struct request_queue *q);
100
101 /*
102  * Function:    scsi_unprep_request()
103  *
104  * Purpose:     Remove all preparation done for a request, including its
105  *              associated scsi_cmnd, so that it can be requeued.
106  *
107  * Arguments:   req     - request to unprepare
108  *
109  * Lock status: Assumed that no locks are held upon entry.
110  *
111  * Returns:     Nothing.
112  */
113 static void scsi_unprep_request(struct request *req)
114 {
115         struct scsi_cmnd *cmd = req->special;
116
117         req->flags &= ~REQ_DONTPREP;
118         req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
119
120         scsi_put_command(cmd);
121 }
122
123 /*
124  * Function:    scsi_queue_insert()
125  *
126  * Purpose:     Insert a command in the midlevel queue.
127  *
128  * Arguments:   cmd    - command that we are adding to queue.
129  *              reason - why we are inserting command to queue.
130  *
131  * Lock status: Assumed that lock is not held upon entry.
132  *
133  * Returns:     Nothing.
134  *
135  * Notes:       We do this for one of two cases.  Either the host is busy
136  *              and it cannot accept any more commands for the time being,
137  *              or the device returned QUEUE_FULL and can accept no more
138  *              commands.
139  * Notes:       This could be called either from an interrupt context or a
140  *              normal process context.
141  */
142 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
143 {
144         struct Scsi_Host *host = cmd->device->host;
145         struct scsi_device *device = cmd->device;
146         struct request_queue *q = device->request_queue;
147         unsigned long flags;
148
149         SCSI_LOG_MLQUEUE(1,
150                  printk("Inserting command %p into mlqueue\n", cmd));
151
152         /*
153          * Set the appropriate busy bit for the device/host.
154          *
155          * If the host/device isn't busy, assume that something actually
156          * completed, and that we should be able to queue a command now.
157          *
158          * Note that the prior mid-layer assumption that any host could
159          * always queue at least one command is now broken.  The mid-layer
160          * will implement a user specifiable stall (see
161          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
162          * if a command is requeued with no other commands outstanding
163          * either for the device or for the host.
164          */
165         if (reason == SCSI_MLQUEUE_HOST_BUSY)
166                 host->host_blocked = host->max_host_blocked;
167         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
168                 device->device_blocked = device->max_device_blocked;
169
170         /*
171          * Decrement the counters, since these commands are no longer
172          * active on the host/device.
173          */
174         scsi_device_unbusy(device);
175
176         /*
177          * Requeue this command.  It will go before all other commands
178          * that are already in the queue.
179          *
180          * NOTE: there is magic here about the way the queue is plugged if
181          * we have no outstanding commands.
182          * 
183          * Although we *don't* plug the queue, we call the request
184          * function.  The SCSI request function detects the blocked condition
185          * and plugs the queue appropriately.
186          */
187         spin_lock_irqsave(q->queue_lock, flags);
188         blk_requeue_request(q, cmd->request);
189         spin_unlock_irqrestore(q->queue_lock, flags);
190
191         scsi_run_queue(q);
192
193         return 0;
194 }
195
196 /*
197  * Function:    scsi_do_req
198  *
199  * Purpose:     Queue a SCSI request
200  *
201  * Arguments:   sreq      - command descriptor.
202  *              cmnd      - actual SCSI command to be performed.
203  *              buffer    - data buffer.
204  *              bufflen   - size of data buffer.
205  *              done      - completion function to be run.
206  *              timeout   - how long to let it run before timeout.
207  *              retries   - number of retries we allow.
208  *
209  * Lock status: No locks held upon entry.
210  *
211  * Returns:     Nothing.
212  *
213  * Notes:       This function is only used for queueing requests for things
214  *              like ioctls and character device requests - this is because
215  *              we essentially just inject a request into the queue for the
216  *              device.
217  *
218  *              In order to support the scsi_device_quiesce function, we
219  *              now inject requests on the *head* of the device queue
220  *              rather than the tail.
221  */
222 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
223                  void *buffer, unsigned bufflen,
224                  void (*done)(struct scsi_cmnd *),
225                  int timeout, int retries)
226 {
227         /*
228          * If the upper level driver is reusing these things, then
229          * we should release the low-level block now.  Another one will
230          * be allocated later when this request is getting queued.
231          */
232         __scsi_release_request(sreq);
233
234         /*
235          * Our own function scsi_done (which marks the host as not busy,
236          * disables the timeout counter, etc) will be called by us or by the
237          * scsi_hosts[host].queuecommand() function needs to also call
238          * the completion function for the high level driver.
239          */
240         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
241         sreq->sr_bufflen = bufflen;
242         sreq->sr_buffer = buffer;
243         sreq->sr_allowed = retries;
244         sreq->sr_done = done;
245         sreq->sr_timeout_per_command = timeout;
246
247         if (sreq->sr_cmd_len == 0)
248                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
249
250         /*
251          * head injection *required* here otherwise quiesce won't work
252          */
253         scsi_insert_special_req(sreq, 1);
254 }
255 EXPORT_SYMBOL(scsi_do_req);
256
257 /* This is the end routine we get to if a command was never attached
258  * to the request.  Simply complete the request without changing
259  * rq_status; this will cause a DRIVER_ERROR. */
260 static void scsi_wait_req_end_io(struct request *req)
261 {
262         BUG_ON(!req->waiting);
263
264         complete(req->waiting);
265 }
266
267 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
268                    unsigned bufflen, int timeout, int retries)
269 {
270         DECLARE_COMPLETION(wait);
271         int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
272         struct request *req;
273
274         req = blk_get_request(sreq->sr_device->request_queue, write,
275                               __GFP_WAIT);
276         if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
277                                        buffer, bufflen, __GFP_WAIT)) {
278                 sreq->sr_result = DRIVER_ERROR << 24;
279                 blk_put_request(req);
280                 return;
281         }
282
283         req->flags |= REQ_NOMERGE;
284         req->waiting = &wait;
285         req->end_io = scsi_wait_req_end_io;
286         req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
287         req->sense = sreq->sr_sense_buffer;
288         req->sense_len = 0;
289         memcpy(req->cmd, cmnd, req->cmd_len);
290         req->timeout = timeout;
291         req->flags |= REQ_BLOCK_PC;
292         req->rq_disk = NULL;
293         blk_insert_request(sreq->sr_device->request_queue, req,
294                            sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
295         wait_for_completion(&wait);
296         sreq->sr_request->waiting = NULL;
297         sreq->sr_result = req->errors;
298         if (req->errors)
299                 sreq->sr_result |= (DRIVER_ERROR << 24);
300
301         blk_put_request(req);
302 }
303
304 EXPORT_SYMBOL(scsi_wait_req);
305
306 /**
307  * scsi_execute - insert request and wait for the result
308  * @sdev:       scsi device
309  * @cmd:        scsi command
310  * @data_direction: data direction
311  * @buffer:     data buffer
312  * @bufflen:    len of buffer
313  * @sense:      optional sense buffer
314  * @timeout:    request timeout in seconds
315  * @retries:    number of times to retry request
316  * @flags:      or into request flags;
317  *
318  * returns the req->errors value which is the the scsi_cmnd result
319  * field.
320  **/
321 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
322                  int data_direction, void *buffer, unsigned bufflen,
323                  unsigned char *sense, int timeout, int retries, int flags)
324 {
325         struct request *req;
326         int write = (data_direction == DMA_TO_DEVICE);
327         int ret = DRIVER_ERROR << 24;
328
329         req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
330
331         if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
332                                         buffer, bufflen, __GFP_WAIT))
333                 goto out;
334
335         req->cmd_len = COMMAND_SIZE(cmd[0]);
336         memcpy(req->cmd, cmd, req->cmd_len);
337         req->sense = sense;
338         req->sense_len = 0;
339         req->timeout = timeout;
340         req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
341
342         /*
343          * head injection *required* here otherwise quiesce won't work
344          */
345         blk_execute_rq(req->q, NULL, req, 1);
346
347         ret = req->errors;
348  out:
349         blk_put_request(req);
350
351         return ret;
352 }
353 EXPORT_SYMBOL(scsi_execute);
354
355
356 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
357                      int data_direction, void *buffer, unsigned bufflen,
358                      struct scsi_sense_hdr *sshdr, int timeout, int retries)
359 {
360         char *sense = NULL;
361         int result;
362         
363         if (sshdr) {
364                 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
365                 if (!sense)
366                         return DRIVER_ERROR << 24;
367                 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
368         }
369         result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
370                                   sense, timeout, retries, 0);
371         if (sshdr)
372                 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
373
374         kfree(sense);
375         return result;
376 }
377 EXPORT_SYMBOL(scsi_execute_req);
378
379 /*
380  * Function:    scsi_init_cmd_errh()
381  *
382  * Purpose:     Initialize cmd fields related to error handling.
383  *
384  * Arguments:   cmd     - command that is ready to be queued.
385  *
386  * Returns:     Nothing
387  *
388  * Notes:       This function has the job of initializing a number of
389  *              fields related to error handling.   Typically this will
390  *              be called once for each command, as required.
391  */
392 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
393 {
394         cmd->serial_number = 0;
395
396         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
397
398         if (cmd->cmd_len == 0)
399                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
400
401         /*
402          * We need saved copies of a number of fields - this is because
403          * error handling may need to overwrite these with different values
404          * to run different commands, and once error handling is complete,
405          * we will need to restore these values prior to running the actual
406          * command.
407          */
408         cmd->old_use_sg = cmd->use_sg;
409         cmd->old_cmd_len = cmd->cmd_len;
410         cmd->sc_old_data_direction = cmd->sc_data_direction;
411         cmd->old_underflow = cmd->underflow;
412         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
413         cmd->buffer = cmd->request_buffer;
414         cmd->bufflen = cmd->request_bufflen;
415
416         return 1;
417 }
418
419 /*
420  * Function:   scsi_setup_cmd_retry()
421  *
422  * Purpose:    Restore the command state for a retry
423  *
424  * Arguments:  cmd      - command to be restored
425  *
426  * Returns:    Nothing
427  *
428  * Notes:      Immediately prior to retrying a command, we need
429  *             to restore certain fields that we saved above.
430  */
431 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
432 {
433         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
434         cmd->request_buffer = cmd->buffer;
435         cmd->request_bufflen = cmd->bufflen;
436         cmd->use_sg = cmd->old_use_sg;
437         cmd->cmd_len = cmd->old_cmd_len;
438         cmd->sc_data_direction = cmd->sc_old_data_direction;
439         cmd->underflow = cmd->old_underflow;
440 }
441
442 void scsi_device_unbusy(struct scsi_device *sdev)
443 {
444         struct Scsi_Host *shost = sdev->host;
445         unsigned long flags;
446
447         spin_lock_irqsave(shost->host_lock, flags);
448         shost->host_busy--;
449         if (unlikely(scsi_host_in_recovery(shost) &&
450                      shost->host_failed))
451                 scsi_eh_wakeup(shost);
452         spin_unlock(shost->host_lock);
453         spin_lock(sdev->request_queue->queue_lock);
454         sdev->device_busy--;
455         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
456 }
457
458 /*
459  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
460  * and call blk_run_queue for all the scsi_devices on the target -
461  * including current_sdev first.
462  *
463  * Called with *no* scsi locks held.
464  */
465 static void scsi_single_lun_run(struct scsi_device *current_sdev)
466 {
467         struct Scsi_Host *shost = current_sdev->host;
468         struct scsi_device *sdev, *tmp;
469         struct scsi_target *starget = scsi_target(current_sdev);
470         unsigned long flags;
471
472         spin_lock_irqsave(shost->host_lock, flags);
473         starget->starget_sdev_user = NULL;
474         spin_unlock_irqrestore(shost->host_lock, flags);
475
476         /*
477          * Call blk_run_queue for all LUNs on the target, starting with
478          * current_sdev. We race with others (to set starget_sdev_user),
479          * but in most cases, we will be first. Ideally, each LU on the
480          * target would get some limited time or requests on the target.
481          */
482         blk_run_queue(current_sdev->request_queue);
483
484         spin_lock_irqsave(shost->host_lock, flags);
485         if (starget->starget_sdev_user)
486                 goto out;
487         list_for_each_entry_safe(sdev, tmp, &starget->devices,
488                         same_target_siblings) {
489                 if (sdev == current_sdev)
490                         continue;
491                 if (scsi_device_get(sdev))
492                         continue;
493
494                 spin_unlock_irqrestore(shost->host_lock, flags);
495                 blk_run_queue(sdev->request_queue);
496                 spin_lock_irqsave(shost->host_lock, flags);
497         
498                 scsi_device_put(sdev);
499         }
500  out:
501         spin_unlock_irqrestore(shost->host_lock, flags);
502 }
503
504 /*
505  * Function:    scsi_run_queue()
506  *
507  * Purpose:     Select a proper request queue to serve next
508  *
509  * Arguments:   q       - last request's queue
510  *
511  * Returns:     Nothing
512  *
513  * Notes:       The previous command was completely finished, start
514  *              a new one if possible.
515  */
516 static void scsi_run_queue(struct request_queue *q)
517 {
518         struct scsi_device *sdev = q->queuedata;
519         struct Scsi_Host *shost = sdev->host;
520         unsigned long flags;
521
522         if (sdev->single_lun)
523                 scsi_single_lun_run(sdev);
524
525         spin_lock_irqsave(shost->host_lock, flags);
526         while (!list_empty(&shost->starved_list) &&
527                !shost->host_blocked && !shost->host_self_blocked &&
528                 !((shost->can_queue > 0) &&
529                   (shost->host_busy >= shost->can_queue))) {
530                 /*
531                  * As long as shost is accepting commands and we have
532                  * starved queues, call blk_run_queue. scsi_request_fn
533                  * drops the queue_lock and can add us back to the
534                  * starved_list.
535                  *
536                  * host_lock protects the starved_list and starved_entry.
537                  * scsi_request_fn must get the host_lock before checking
538                  * or modifying starved_list or starved_entry.
539                  */
540                 sdev = list_entry(shost->starved_list.next,
541                                           struct scsi_device, starved_entry);
542                 list_del_init(&sdev->starved_entry);
543                 spin_unlock_irqrestore(shost->host_lock, flags);
544
545                 blk_run_queue(sdev->request_queue);
546
547                 spin_lock_irqsave(shost->host_lock, flags);
548                 if (unlikely(!list_empty(&sdev->starved_entry)))
549                         /*
550                          * sdev lost a race, and was put back on the
551                          * starved list. This is unlikely but without this
552                          * in theory we could loop forever.
553                          */
554                         break;
555         }
556         spin_unlock_irqrestore(shost->host_lock, flags);
557
558         blk_run_queue(q);
559 }
560
561 /*
562  * Function:    scsi_requeue_command()
563  *
564  * Purpose:     Handle post-processing of completed commands.
565  *
566  * Arguments:   q       - queue to operate on
567  *              cmd     - command that may need to be requeued.
568  *
569  * Returns:     Nothing
570  *
571  * Notes:       After command completion, there may be blocks left
572  *              over which weren't finished by the previous command
573  *              this can be for a number of reasons - the main one is
574  *              I/O errors in the middle of the request, in which case
575  *              we need to request the blocks that come after the bad
576  *              sector.
577  * Notes:       Upon return, cmd is a stale pointer.
578  */
579 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
580 {
581         struct request *req = cmd->request;
582         unsigned long flags;
583
584         scsi_unprep_request(req);
585         spin_lock_irqsave(q->queue_lock, flags);
586         blk_requeue_request(q, req);
587         spin_unlock_irqrestore(q->queue_lock, flags);
588
589         scsi_run_queue(q);
590 }
591
592 void scsi_next_command(struct scsi_cmnd *cmd)
593 {
594         struct request_queue *q = cmd->device->request_queue;
595
596         scsi_put_command(cmd);
597         scsi_run_queue(q);
598 }
599
600 void scsi_run_host_queues(struct Scsi_Host *shost)
601 {
602         struct scsi_device *sdev;
603
604         shost_for_each_device(sdev, shost)
605                 scsi_run_queue(sdev->request_queue);
606 }
607
608 /*
609  * Function:    scsi_end_request()
610  *
611  * Purpose:     Post-processing of completed commands (usually invoked at end
612  *              of upper level post-processing and scsi_io_completion).
613  *
614  * Arguments:   cmd      - command that is complete.
615  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
616  *              bytes    - number of bytes of completed I/O
617  *              requeue  - indicates whether we should requeue leftovers.
618  *
619  * Lock status: Assumed that lock is not held upon entry.
620  *
621  * Returns:     cmd if requeue required, NULL otherwise.
622  *
623  * Notes:       This is called for block device requests in order to
624  *              mark some number of sectors as complete.
625  * 
626  *              We are guaranteeing that the request queue will be goosed
627  *              at some point during this call.
628  * Notes:       If cmd was requeued, upon return it will be a stale pointer.
629  */
630 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
631                                           int bytes, int requeue)
632 {
633         request_queue_t *q = cmd->device->request_queue;
634         struct request *req = cmd->request;
635         unsigned long flags;
636
637         /*
638          * If there are blocks left over at the end, set up the command
639          * to queue the remainder of them.
640          */
641         if (end_that_request_chunk(req, uptodate, bytes)) {
642                 int leftover = (req->hard_nr_sectors << 9);
643
644                 if (blk_pc_request(req))
645                         leftover = req->data_len;
646
647                 /* kill remainder if no retrys */
648                 if (!uptodate && blk_noretry_request(req))
649                         end_that_request_chunk(req, 0, leftover);
650                 else {
651                         if (requeue) {
652                                 /*
653                                  * Bleah.  Leftovers again.  Stick the
654                                  * leftovers in the front of the
655                                  * queue, and goose the queue again.
656                                  */
657                                 scsi_requeue_command(q, cmd);
658                                 cmd = NULL;
659                         }
660                         return cmd;
661                 }
662         }
663
664         add_disk_randomness(req->rq_disk);
665
666         spin_lock_irqsave(q->queue_lock, flags);
667         if (blk_rq_tagged(req))
668                 blk_queue_end_tag(q, req);
669         end_that_request_last(req);
670         spin_unlock_irqrestore(q->queue_lock, flags);
671
672         /*
673          * This will goose the queue request function at the end, so we don't
674          * need to worry about launching another command.
675          */
676         scsi_next_command(cmd);
677         return NULL;
678 }
679
680 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
681 {
682         struct scsi_host_sg_pool *sgp;
683         struct scatterlist *sgl;
684
685         BUG_ON(!cmd->use_sg);
686
687         switch (cmd->use_sg) {
688         case 1 ... 8:
689                 cmd->sglist_len = 0;
690                 break;
691         case 9 ... 16:
692                 cmd->sglist_len = 1;
693                 break;
694         case 17 ... 32:
695                 cmd->sglist_len = 2;
696                 break;
697 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
698         case 33 ... 64:
699                 cmd->sglist_len = 3;
700                 break;
701 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
702         case 65 ... 128:
703                 cmd->sglist_len = 4;
704                 break;
705 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
706         case 129 ... 256:
707                 cmd->sglist_len = 5;
708                 break;
709 #endif
710 #endif
711 #endif
712         default:
713                 return NULL;
714         }
715
716         sgp = scsi_sg_pools + cmd->sglist_len;
717         sgl = mempool_alloc(sgp->pool, gfp_mask);
718         return sgl;
719 }
720
721 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
722 {
723         struct scsi_host_sg_pool *sgp;
724
725         BUG_ON(index >= SG_MEMPOOL_NR);
726
727         sgp = scsi_sg_pools + index;
728         mempool_free(sgl, sgp->pool);
729 }
730
731 /*
732  * Function:    scsi_release_buffers()
733  *
734  * Purpose:     Completion processing for block device I/O requests.
735  *
736  * Arguments:   cmd     - command that we are bailing.
737  *
738  * Lock status: Assumed that no lock is held upon entry.
739  *
740  * Returns:     Nothing
741  *
742  * Notes:       In the event that an upper level driver rejects a
743  *              command, we must release resources allocated during
744  *              the __init_io() function.  Primarily this would involve
745  *              the scatter-gather table, and potentially any bounce
746  *              buffers.
747  */
748 static void scsi_release_buffers(struct scsi_cmnd *cmd)
749 {
750         struct request *req = cmd->request;
751
752         /*
753          * Free up any indirection buffers we allocated for DMA purposes. 
754          */
755         if (cmd->use_sg)
756                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
757         else if (cmd->request_buffer != req->buffer)
758                 kfree(cmd->request_buffer);
759
760         /*
761          * Zero these out.  They now point to freed memory, and it is
762          * dangerous to hang onto the pointers.
763          */
764         cmd->buffer  = NULL;
765         cmd->bufflen = 0;
766         cmd->request_buffer = NULL;
767         cmd->request_bufflen = 0;
768 }
769
770 /*
771  * Function:    scsi_io_completion()
772  *
773  * Purpose:     Completion processing for block device I/O requests.
774  *
775  * Arguments:   cmd   - command that is finished.
776  *
777  * Lock status: Assumed that no lock is held upon entry.
778  *
779  * Returns:     Nothing
780  *
781  * Notes:       This function is matched in terms of capabilities to
782  *              the function that created the scatter-gather list.
783  *              In other words, if there are no bounce buffers
784  *              (the normal case for most drivers), we don't need
785  *              the logic to deal with cleaning up afterwards.
786  *
787  *              We must do one of several things here:
788  *
789  *              a) Call scsi_end_request.  This will finish off the
790  *                 specified number of sectors.  If we are done, the
791  *                 command block will be released, and the queue
792  *                 function will be goosed.  If we are not done, then
793  *                 scsi_end_request will directly goose the queue.
794  *
795  *              b) We can just use scsi_requeue_command() here.  This would
796  *                 be used if we just wanted to retry, for example.
797  */
798 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
799                         unsigned int block_bytes)
800 {
801         int result = cmd->result;
802         int this_count = cmd->bufflen;
803         request_queue_t *q = cmd->device->request_queue;
804         struct request *req = cmd->request;
805         int clear_errors = 1;
806         struct scsi_sense_hdr sshdr;
807         int sense_valid = 0;
808         int sense_deferred = 0;
809
810         if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
811                 return;
812
813         /*
814          * Free up any indirection buffers we allocated for DMA purposes. 
815          * For the case of a READ, we need to copy the data out of the
816          * bounce buffer and into the real buffer.
817          */
818         if (cmd->use_sg)
819                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
820         else if (cmd->buffer != req->buffer) {
821                 if (rq_data_dir(req) == READ) {
822                         unsigned long flags;
823                         char *to = bio_kmap_irq(req->bio, &flags);
824                         memcpy(to, cmd->buffer, cmd->bufflen);
825                         bio_kunmap_irq(to, &flags);
826                 }
827                 kfree(cmd->buffer);
828         }
829
830         if (result) {
831                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
832                 if (sense_valid)
833                         sense_deferred = scsi_sense_is_deferred(&sshdr);
834         }
835         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
836                 req->errors = result;
837                 if (result) {
838                         clear_errors = 0;
839                         if (sense_valid && req->sense) {
840                                 /*
841                                  * SG_IO wants current and deferred errors
842                                  */
843                                 int len = 8 + cmd->sense_buffer[7];
844
845                                 if (len > SCSI_SENSE_BUFFERSIZE)
846                                         len = SCSI_SENSE_BUFFERSIZE;
847                                 memcpy(req->sense, cmd->sense_buffer,  len);
848                                 req->sense_len = len;
849                         }
850                 } else
851                         req->data_len = cmd->resid;
852         }
853
854         /*
855          * Zero these out.  They now point to freed memory, and it is
856          * dangerous to hang onto the pointers.
857          */
858         cmd->buffer  = NULL;
859         cmd->bufflen = 0;
860         cmd->request_buffer = NULL;
861         cmd->request_bufflen = 0;
862
863         /*
864          * Next deal with any sectors which we were able to correctly
865          * handle.
866          */
867         if (good_bytes >= 0) {
868                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
869                                               req->nr_sectors, good_bytes));
870                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
871
872                 if (clear_errors)
873                         req->errors = 0;
874                 /*
875                  * If multiple sectors are requested in one buffer, then
876                  * they will have been finished off by the first command.
877                  * If not, then we have a multi-buffer command.
878                  *
879                  * If block_bytes != 0, it means we had a medium error
880                  * of some sort, and that we want to mark some number of
881                  * sectors as not uptodate.  Thus we want to inhibit
882                  * requeueing right here - we will requeue down below
883                  * when we handle the bad sectors.
884                  */
885
886                 /*
887                  * If the command completed without error, then either
888                  * finish off the rest of the command, or start a new one.
889                  */
890                 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
891                         return;
892         }
893         /*
894          * Now, if we were good little boys and girls, Santa left us a request
895          * sense buffer.  We can extract information from this, so we
896          * can choose a block to remap, etc.
897          */
898         if (sense_valid && !sense_deferred) {
899                 switch (sshdr.sense_key) {
900                 case UNIT_ATTENTION:
901                         if (cmd->device->removable) {
902                                 /* detected disc change.  set a bit 
903                                  * and quietly refuse further access.
904                                  */
905                                 cmd->device->changed = 1;
906                                 scsi_end_request(cmd, 0,
907                                                 this_count, 1);
908                                 return;
909                         } else {
910                                 /*
911                                 * Must have been a power glitch, or a
912                                 * bus reset.  Could not have been a
913                                 * media change, so we just retry the
914                                 * request and see what happens.  
915                                 */
916                                 scsi_requeue_command(q, cmd);
917                                 return;
918                         }
919                         break;
920                 case ILLEGAL_REQUEST:
921                         /*
922                         * If we had an ILLEGAL REQUEST returned, then we may
923                         * have performed an unsupported command.  The only
924                         * thing this should be would be a ten byte read where
925                         * only a six byte read was supported.  Also, on a
926                         * system where READ CAPACITY failed, we may have read
927                         * past the end of the disk.
928                         */
929                         if (cmd->device->use_10_for_rw &&
930                             (cmd->cmnd[0] == READ_10 ||
931                              cmd->cmnd[0] == WRITE_10)) {
932                                 cmd->device->use_10_for_rw = 0;
933                                 /*
934                                  * This will cause a retry with a 6-byte
935                                  * command.
936                                  */
937                                 scsi_requeue_command(q, cmd);
938                                 result = 0;
939                         } else {
940                                 scsi_end_request(cmd, 0, this_count, 1);
941                                 return;
942                         }
943                         break;
944                 case NOT_READY:
945                         /*
946                          * If the device is in the process of becoming ready,
947                          * retry.
948                          */
949                         if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
950                                 scsi_requeue_command(q, cmd);
951                                 return;
952                         }
953                         if (!(req->flags & REQ_QUIET))
954                                 scmd_printk(KERN_INFO, cmd,
955                                            "Device not ready.\n");
956                         scsi_end_request(cmd, 0, this_count, 1);
957                         return;
958                 case VOLUME_OVERFLOW:
959                         if (!(req->flags & REQ_QUIET)) {
960                                 scmd_printk(KERN_INFO, cmd,
961                                            "Volume overflow, CDB: ");
962                                 __scsi_print_command(cmd->data_cmnd);
963                                 scsi_print_sense("", cmd);
964                         }
965                         scsi_end_request(cmd, 0, block_bytes, 1);
966                         return;
967                 default:
968                         break;
969                 }
970         }                       /* driver byte != 0 */
971         if (host_byte(result) == DID_RESET) {
972                 /*
973                  * Third party bus reset or reset for error
974                  * recovery reasons.  Just retry the request
975                  * and see what happens.  
976                  */
977                 scsi_requeue_command(q, cmd);
978                 return;
979         }
980         if (result) {
981                 if (!(req->flags & REQ_QUIET)) {
982                         scmd_printk(KERN_INFO, cmd,
983                                    "SCSI error: return code = 0x%x\n", result);
984
985                         if (driver_byte(result) & DRIVER_SENSE)
986                                 scsi_print_sense("", cmd);
987                 }
988                 /*
989                  * Mark a single buffer as not uptodate.  Queue the remainder.
990                  * We sometimes get this cruft in the event that a medium error
991                  * isn't properly reported.
992                  */
993                 block_bytes = req->hard_cur_sectors << 9;
994                 if (!block_bytes)
995                         block_bytes = req->data_len;
996                 scsi_end_request(cmd, 0, block_bytes, 1);
997         }
998 }
999 EXPORT_SYMBOL(scsi_io_completion);
1000
1001 /*
1002  * Function:    scsi_init_io()
1003  *
1004  * Purpose:     SCSI I/O initialize function.
1005  *
1006  * Arguments:   cmd   - Command descriptor we wish to initialize
1007  *
1008  * Returns:     0 on success
1009  *              BLKPREP_DEFER if the failure is retryable
1010  *              BLKPREP_KILL if the failure is fatal
1011  */
1012 static int scsi_init_io(struct scsi_cmnd *cmd)
1013 {
1014         struct request     *req = cmd->request;
1015         struct scatterlist *sgpnt;
1016         int                count;
1017
1018         /*
1019          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
1020          */
1021         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1022                 cmd->request_bufflen = req->data_len;
1023                 cmd->request_buffer = req->data;
1024                 req->buffer = req->data;
1025                 cmd->use_sg = 0;
1026                 return 0;
1027         }
1028
1029         /*
1030          * we used to not use scatter-gather for single segment request,
1031          * but now we do (it makes highmem I/O easier to support without
1032          * kmapping pages)
1033          */
1034         cmd->use_sg = req->nr_phys_segments;
1035
1036         /*
1037          * if sg table allocation fails, requeue request later.
1038          */
1039         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1040         if (unlikely(!sgpnt)) {
1041                 scsi_unprep_request(req);
1042                 return BLKPREP_DEFER;
1043         }
1044
1045         cmd->request_buffer = (char *) sgpnt;
1046         cmd->request_bufflen = req->nr_sectors << 9;
1047         if (blk_pc_request(req))
1048                 cmd->request_bufflen = req->data_len;
1049         req->buffer = NULL;
1050
1051         /* 
1052          * Next, walk the list, and fill in the addresses and sizes of
1053          * each segment.
1054          */
1055         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1056
1057         /*
1058          * mapped well, send it off
1059          */
1060         if (likely(count <= cmd->use_sg)) {
1061                 cmd->use_sg = count;
1062                 return 0;
1063         }
1064
1065         printk(KERN_ERR "Incorrect number of segments after building list\n");
1066         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1067         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1068                         req->current_nr_sectors);
1069
1070         /* release the command and kill it */
1071         scsi_release_buffers(cmd);
1072         scsi_put_command(cmd);
1073         return BLKPREP_KILL;
1074 }
1075
1076 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1077 {
1078         struct scsi_device *sdev = q->queuedata;
1079         struct scsi_driver *drv;
1080
1081         if (sdev->sdev_state == SDEV_RUNNING) {
1082                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1083
1084                 if (drv->prepare_flush)
1085                         return drv->prepare_flush(q, rq);
1086         }
1087
1088         return 0;
1089 }
1090
1091 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1092 {
1093         struct scsi_device *sdev = q->queuedata;
1094         struct request *flush_rq = rq->end_io_data;
1095         struct scsi_driver *drv;
1096
1097         if (flush_rq->errors) {
1098                 printk("scsi: barrier error, disabling flush support\n");
1099                 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1100         }
1101
1102         if (sdev->sdev_state == SDEV_RUNNING) {
1103                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1104                 drv->end_flush(q, rq);
1105         }
1106 }
1107
1108 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1109                                sector_t *error_sector)
1110 {
1111         struct scsi_device *sdev = q->queuedata;
1112         struct scsi_driver *drv;
1113
1114         if (sdev->sdev_state != SDEV_RUNNING)
1115                 return -ENXIO;
1116
1117         drv = *(struct scsi_driver **) disk->private_data;
1118         if (drv->issue_flush)
1119                 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1120
1121         return -EOPNOTSUPP;
1122 }
1123
1124 static void scsi_generic_done(struct scsi_cmnd *cmd)
1125 {
1126         BUG_ON(!blk_pc_request(cmd->request));
1127         scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1128 }
1129
1130 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1131 {
1132         struct scsi_device *sdev = q->queuedata;
1133         struct scsi_cmnd *cmd;
1134         int specials_only = 0;
1135
1136         /*
1137          * Just check to see if the device is online.  If it isn't, we
1138          * refuse to process any commands.  The device must be brought
1139          * online before trying any recovery commands
1140          */
1141         if (unlikely(!scsi_device_online(sdev))) {
1142                 sdev_printk(KERN_ERR, sdev,
1143                             "rejecting I/O to offline device\n");
1144                 goto kill;
1145         }
1146         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1147                 /* OK, we're not in a running state don't prep
1148                  * user commands */
1149                 if (sdev->sdev_state == SDEV_DEL) {
1150                         /* Device is fully deleted, no commands
1151                          * at all allowed down */
1152                         sdev_printk(KERN_ERR, sdev,
1153                                     "rejecting I/O to dead device\n");
1154                         goto kill;
1155                 }
1156                 /* OK, we only allow special commands (i.e. not
1157                  * user initiated ones */
1158                 specials_only = sdev->sdev_state;
1159         }
1160
1161         /*
1162          * Find the actual device driver associated with this command.
1163          * The SPECIAL requests are things like character device or
1164          * ioctls, which did not originate from ll_rw_blk.  Note that
1165          * the special field is also used to indicate the cmd for
1166          * the remainder of a partially fulfilled request that can 
1167          * come up when there is a medium error.  We have to treat
1168          * these two cases differently.  We differentiate by looking
1169          * at request->cmd, as this tells us the real story.
1170          */
1171         if (req->flags & REQ_SPECIAL && req->special) {
1172                 struct scsi_request *sreq = req->special;
1173
1174                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1175                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1176                         if (unlikely(!cmd))
1177                                 goto defer;
1178                         scsi_init_cmd_from_req(cmd, sreq);
1179                 } else
1180                         cmd = req->special;
1181         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1182
1183                 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1184                         if(specials_only == SDEV_QUIESCE ||
1185                                         specials_only == SDEV_BLOCK)
1186                                 goto defer;
1187                         
1188                         sdev_printk(KERN_ERR, sdev,
1189                                     "rejecting I/O to device being removed\n");
1190                         goto kill;
1191                 }
1192                         
1193                         
1194                 /*
1195                  * Now try and find a command block that we can use.
1196                  */
1197                 if (!req->special) {
1198                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1199                         if (unlikely(!cmd))
1200                                 goto defer;
1201                 } else
1202                         cmd = req->special;
1203                 
1204                 /* pull a tag out of the request if we have one */
1205                 cmd->tag = req->tag;
1206         } else {
1207                 blk_dump_rq_flags(req, "SCSI bad req");
1208                 goto kill;
1209         }
1210         
1211         /* note the overloading of req->special.  When the tag
1212          * is active it always means cmd.  If the tag goes
1213          * back for re-queueing, it may be reset */
1214         req->special = cmd;
1215         cmd->request = req;
1216         
1217         /*
1218          * FIXME: drop the lock here because the functions below
1219          * expect to be called without the queue lock held.  Also,
1220          * previously, we dequeued the request before dropping the
1221          * lock.  We hope REQ_STARTED prevents anything untoward from
1222          * happening now.
1223          */
1224         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1225                 struct scsi_driver *drv;
1226                 int ret;
1227
1228                 /*
1229                  * This will do a couple of things:
1230                  *  1) Fill in the actual SCSI command.
1231                  *  2) Fill in any other upper-level specific fields
1232                  * (timeout).
1233                  *
1234                  * If this returns 0, it means that the request failed
1235                  * (reading past end of disk, reading offline device,
1236                  * etc).   This won't actually talk to the device, but
1237                  * some kinds of consistency checking may cause the     
1238                  * request to be rejected immediately.
1239                  */
1240
1241                 /* 
1242                  * This sets up the scatter-gather table (allocating if
1243                  * required).
1244                  */
1245                 ret = scsi_init_io(cmd);
1246                 switch(ret) {
1247                         /* For BLKPREP_KILL/DEFER the cmd was released */
1248                 case BLKPREP_KILL:
1249                         goto kill;
1250                 case BLKPREP_DEFER:
1251                         goto defer;
1252                 }
1253                 
1254                 /*
1255                  * Initialize the actual SCSI command for this request.
1256                  */
1257                 if (req->rq_disk) {
1258                         drv = *(struct scsi_driver **)req->rq_disk->private_data;
1259                         if (unlikely(!drv->init_command(cmd))) {
1260                                 scsi_release_buffers(cmd);
1261                                 scsi_put_command(cmd);
1262                                 goto kill;
1263                         }
1264                 } else {
1265                         memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1266                         cmd->cmd_len = req->cmd_len;
1267                         if (rq_data_dir(req) == WRITE)
1268                                 cmd->sc_data_direction = DMA_TO_DEVICE;
1269                         else if (req->data_len)
1270                                 cmd->sc_data_direction = DMA_FROM_DEVICE;
1271                         else
1272                                 cmd->sc_data_direction = DMA_NONE;
1273                         
1274                         cmd->transfersize = req->data_len;
1275                         cmd->allowed = 3;
1276                         cmd->timeout_per_command = req->timeout;
1277                         cmd->done = scsi_generic_done;
1278                 }
1279         }
1280
1281         /*
1282          * The request is now prepped, no need to come back here
1283          */
1284         req->flags |= REQ_DONTPREP;
1285         return BLKPREP_OK;
1286
1287  defer:
1288         /* If we defer, the elv_next_request() returns NULL, but the
1289          * queue must be restarted, so we plug here if no returning
1290          * command will automatically do that. */
1291         if (sdev->device_busy == 0)
1292                 blk_plug_device(q);
1293         return BLKPREP_DEFER;
1294  kill:
1295         req->errors = DID_NO_CONNECT << 16;
1296         return BLKPREP_KILL;
1297 }
1298
1299 /*
1300  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1301  * return 0.
1302  *
1303  * Called with the queue_lock held.
1304  */
1305 static inline int scsi_dev_queue_ready(struct request_queue *q,
1306                                   struct scsi_device *sdev)
1307 {
1308         if (sdev->device_busy >= sdev->queue_depth)
1309                 return 0;
1310         if (sdev->device_busy == 0 && sdev->device_blocked) {
1311                 /*
1312                  * unblock after device_blocked iterates to zero
1313                  */
1314                 if (--sdev->device_blocked == 0) {
1315                         SCSI_LOG_MLQUEUE(3,
1316                                    sdev_printk(KERN_INFO, sdev,
1317                                    "unblocking device at zero depth\n"));
1318                 } else {
1319                         blk_plug_device(q);
1320                         return 0;
1321                 }
1322         }
1323         if (sdev->device_blocked)
1324                 return 0;
1325
1326         return 1;
1327 }
1328
1329 /*
1330  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1331  * return 0. We must end up running the queue again whenever 0 is
1332  * returned, else IO can hang.
1333  *
1334  * Called with host_lock held.
1335  */
1336 static inline int scsi_host_queue_ready(struct request_queue *q,
1337                                    struct Scsi_Host *shost,
1338                                    struct scsi_device *sdev)
1339 {
1340         if (scsi_host_in_recovery(shost))
1341                 return 0;
1342         if (shost->host_busy == 0 && shost->host_blocked) {
1343                 /*
1344                  * unblock after host_blocked iterates to zero
1345                  */
1346                 if (--shost->host_blocked == 0) {
1347                         SCSI_LOG_MLQUEUE(3,
1348                                 printk("scsi%d unblocking host at zero depth\n",
1349                                         shost->host_no));
1350                 } else {
1351                         blk_plug_device(q);
1352                         return 0;
1353                 }
1354         }
1355         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1356             shost->host_blocked || shost->host_self_blocked) {
1357                 if (list_empty(&sdev->starved_entry))
1358                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1359                 return 0;
1360         }
1361
1362         /* We're OK to process the command, so we can't be starved */
1363         if (!list_empty(&sdev->starved_entry))
1364                 list_del_init(&sdev->starved_entry);
1365
1366         return 1;
1367 }
1368
1369 /*
1370  * Kill a request for a dead device
1371  */
1372 static void scsi_kill_request(struct request *req, request_queue_t *q)
1373 {
1374         struct scsi_cmnd *cmd = req->special;
1375
1376         blkdev_dequeue_request(req);
1377
1378         if (unlikely(cmd == NULL)) {
1379                 printk(KERN_CRIT "impossible request in %s.\n",
1380                                  __FUNCTION__);
1381                 BUG();
1382         }
1383
1384         scsi_init_cmd_errh(cmd);
1385         cmd->result = DID_NO_CONNECT << 16;
1386         atomic_inc(&cmd->device->iorequest_cnt);
1387         __scsi_done(cmd);
1388 }
1389
1390 /*
1391  * Function:    scsi_request_fn()
1392  *
1393  * Purpose:     Main strategy routine for SCSI.
1394  *
1395  * Arguments:   q       - Pointer to actual queue.
1396  *
1397  * Returns:     Nothing
1398  *
1399  * Lock status: IO request lock assumed to be held when called.
1400  */
1401 static void scsi_request_fn(struct request_queue *q)
1402 {
1403         struct scsi_device *sdev = q->queuedata;
1404         struct Scsi_Host *shost;
1405         struct scsi_cmnd *cmd;
1406         struct request *req;
1407
1408         if (!sdev) {
1409                 printk("scsi: killing requests for dead queue\n");
1410                 while ((req = elv_next_request(q)) != NULL)
1411                         scsi_kill_request(req, q);
1412                 return;
1413         }
1414
1415         if(!get_device(&sdev->sdev_gendev))
1416                 /* We must be tearing the block queue down already */
1417                 return;
1418
1419         /*
1420          * To start with, we keep looping until the queue is empty, or until
1421          * the host is no longer able to accept any more requests.
1422          */
1423         shost = sdev->host;
1424         while (!blk_queue_plugged(q)) {
1425                 int rtn;
1426                 /*
1427                  * get next queueable request.  We do this early to make sure
1428                  * that the request is fully prepared even if we cannot 
1429                  * accept it.
1430                  */
1431                 req = elv_next_request(q);
1432                 if (!req || !scsi_dev_queue_ready(q, sdev))
1433                         break;
1434
1435                 if (unlikely(!scsi_device_online(sdev))) {
1436                         sdev_printk(KERN_ERR, sdev,
1437                                     "rejecting I/O to offline device\n");
1438                         scsi_kill_request(req, q);
1439                         continue;
1440                 }
1441
1442
1443                 /*
1444                  * Remove the request from the request list.
1445                  */
1446                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1447                         blkdev_dequeue_request(req);
1448                 sdev->device_busy++;
1449
1450                 spin_unlock(q->queue_lock);
1451                 cmd = req->special;
1452                 if (unlikely(cmd == NULL)) {
1453                         printk(KERN_CRIT "impossible request in %s.\n"
1454                                          "please mail a stack trace to "
1455                                          "linux-scsi@vger.kernel.org",
1456                                          __FUNCTION__);
1457                         BUG();
1458                 }
1459                 spin_lock(shost->host_lock);
1460
1461                 if (!scsi_host_queue_ready(q, shost, sdev))
1462                         goto not_ready;
1463                 if (sdev->single_lun) {
1464                         if (scsi_target(sdev)->starget_sdev_user &&
1465                             scsi_target(sdev)->starget_sdev_user != sdev)
1466                                 goto not_ready;
1467                         scsi_target(sdev)->starget_sdev_user = sdev;
1468                 }
1469                 shost->host_busy++;
1470
1471                 /*
1472                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1473                  *              take the lock again.
1474                  */
1475                 spin_unlock_irq(shost->host_lock);
1476
1477                 /*
1478                  * Finally, initialize any error handling parameters, and set up
1479                  * the timers for timeouts.
1480                  */
1481                 scsi_init_cmd_errh(cmd);
1482
1483                 /*
1484                  * Dispatch the command to the low-level driver.
1485                  */
1486                 rtn = scsi_dispatch_cmd(cmd);
1487                 spin_lock_irq(q->queue_lock);
1488                 if(rtn) {
1489                         /* we're refusing the command; because of
1490                          * the way locks get dropped, we need to 
1491                          * check here if plugging is required */
1492                         if(sdev->device_busy == 0)
1493                                 blk_plug_device(q);
1494
1495                         break;
1496                 }
1497         }
1498
1499         goto out;
1500
1501  not_ready:
1502         spin_unlock_irq(shost->host_lock);
1503
1504         /*
1505          * lock q, handle tag, requeue req, and decrement device_busy. We
1506          * must return with queue_lock held.
1507          *
1508          * Decrementing device_busy without checking it is OK, as all such
1509          * cases (host limits or settings) should run the queue at some
1510          * later time.
1511          */
1512         spin_lock_irq(q->queue_lock);
1513         blk_requeue_request(q, req);
1514         sdev->device_busy--;
1515         if(sdev->device_busy == 0)
1516                 blk_plug_device(q);
1517  out:
1518         /* must be careful here...if we trigger the ->remove() function
1519          * we cannot be holding the q lock */
1520         spin_unlock_irq(q->queue_lock);
1521         put_device(&sdev->sdev_gendev);
1522         spin_lock_irq(q->queue_lock);
1523 }
1524
1525 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1526 {
1527         struct device *host_dev;
1528         u64 bounce_limit = 0xffffffff;
1529
1530         if (shost->unchecked_isa_dma)
1531                 return BLK_BOUNCE_ISA;
1532         /*
1533          * Platforms with virtual-DMA translation
1534          * hardware have no practical limit.
1535          */
1536         if (!PCI_DMA_BUS_IS_PHYS)
1537                 return BLK_BOUNCE_ANY;
1538
1539         host_dev = scsi_get_device(shost);
1540         if (host_dev && host_dev->dma_mask)
1541                 bounce_limit = *host_dev->dma_mask;
1542
1543         return bounce_limit;
1544 }
1545 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1546
1547 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1548 {
1549         struct Scsi_Host *shost = sdev->host;
1550         struct request_queue *q;
1551
1552         q = blk_init_queue(scsi_request_fn, NULL);
1553         if (!q)
1554                 return NULL;
1555
1556         blk_queue_prep_rq(q, scsi_prep_fn);
1557
1558         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1559         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1560         blk_queue_max_sectors(q, shost->max_sectors);
1561         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1562         blk_queue_segment_boundary(q, shost->dma_boundary);
1563         blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1564
1565         /*
1566          * ordered tags are superior to flush ordering
1567          */
1568         if (shost->ordered_tag)
1569                 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1570         else if (shost->ordered_flush) {
1571                 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1572                 q->prepare_flush_fn = scsi_prepare_flush_fn;
1573                 q->end_flush_fn = scsi_end_flush_fn;
1574         }
1575
1576         if (!shost->use_clustering)
1577                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1578         return q;
1579 }
1580
1581 void scsi_free_queue(struct request_queue *q)
1582 {
1583         blk_cleanup_queue(q);
1584 }
1585
1586 /*
1587  * Function:    scsi_block_requests()
1588  *
1589  * Purpose:     Utility function used by low-level drivers to prevent further
1590  *              commands from being queued to the device.
1591  *
1592  * Arguments:   shost       - Host in question
1593  *
1594  * Returns:     Nothing
1595  *
1596  * Lock status: No locks are assumed held.
1597  *
1598  * Notes:       There is no timer nor any other means by which the requests
1599  *              get unblocked other than the low-level driver calling
1600  *              scsi_unblock_requests().
1601  */
1602 void scsi_block_requests(struct Scsi_Host *shost)
1603 {
1604         shost->host_self_blocked = 1;
1605 }
1606 EXPORT_SYMBOL(scsi_block_requests);
1607
1608 /*
1609  * Function:    scsi_unblock_requests()
1610  *
1611  * Purpose:     Utility function used by low-level drivers to allow further
1612  *              commands from being queued to the device.
1613  *
1614  * Arguments:   shost       - Host in question
1615  *
1616  * Returns:     Nothing
1617  *
1618  * Lock status: No locks are assumed held.
1619  *
1620  * Notes:       There is no timer nor any other means by which the requests
1621  *              get unblocked other than the low-level driver calling
1622  *              scsi_unblock_requests().
1623  *
1624  *              This is done as an API function so that changes to the
1625  *              internals of the scsi mid-layer won't require wholesale
1626  *              changes to drivers that use this feature.
1627  */
1628 void scsi_unblock_requests(struct Scsi_Host *shost)
1629 {
1630         shost->host_self_blocked = 0;
1631         scsi_run_host_queues(shost);
1632 }
1633 EXPORT_SYMBOL(scsi_unblock_requests);
1634
1635 int __init scsi_init_queue(void)
1636 {
1637         int i;
1638
1639         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1640                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1641                 int size = sgp->size * sizeof(struct scatterlist);
1642
1643                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1644                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1645                 if (!sgp->slab) {
1646                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1647                                         sgp->name);
1648                 }
1649
1650                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1651                                 mempool_alloc_slab, mempool_free_slab,
1652                                 sgp->slab);
1653                 if (!sgp->pool) {
1654                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1655                                         sgp->name);
1656                 }
1657         }
1658
1659         return 0;
1660 }
1661
1662 void scsi_exit_queue(void)
1663 {
1664         int i;
1665
1666         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1667                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1668                 mempool_destroy(sgp->pool);
1669                 kmem_cache_destroy(sgp->slab);
1670         }
1671 }
1672 /**
1673  *      scsi_mode_sense - issue a mode sense, falling back from 10 to 
1674  *              six bytes if necessary.
1675  *      @sdev:  SCSI device to be queried
1676  *      @dbd:   set if mode sense will allow block descriptors to be returned
1677  *      @modepage: mode page being requested
1678  *      @buffer: request buffer (may not be smaller than eight bytes)
1679  *      @len:   length of request buffer.
1680  *      @timeout: command timeout
1681  *      @retries: number of retries before failing
1682  *      @data: returns a structure abstracting the mode header data
1683  *      @sense: place to put sense data (or NULL if no sense to be collected).
1684  *              must be SCSI_SENSE_BUFFERSIZE big.
1685  *
1686  *      Returns zero if unsuccessful, or the header offset (either 4
1687  *      or 8 depending on whether a six or ten byte command was
1688  *      issued) if successful.
1689  **/
1690 int
1691 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1692                   unsigned char *buffer, int len, int timeout, int retries,
1693                   struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1694         unsigned char cmd[12];
1695         int use_10_for_ms;
1696         int header_length;
1697         int result;
1698         struct scsi_sense_hdr my_sshdr;
1699
1700         memset(data, 0, sizeof(*data));
1701         memset(&cmd[0], 0, 12);
1702         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1703         cmd[2] = modepage;
1704
1705         /* caller might not be interested in sense, but we need it */
1706         if (!sshdr)
1707                 sshdr = &my_sshdr;
1708
1709  retry:
1710         use_10_for_ms = sdev->use_10_for_ms;
1711
1712         if (use_10_for_ms) {
1713                 if (len < 8)
1714                         len = 8;
1715
1716                 cmd[0] = MODE_SENSE_10;
1717                 cmd[8] = len;
1718                 header_length = 8;
1719         } else {
1720                 if (len < 4)
1721                         len = 4;
1722
1723                 cmd[0] = MODE_SENSE;
1724                 cmd[4] = len;
1725                 header_length = 4;
1726         }
1727
1728         memset(buffer, 0, len);
1729
1730         result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1731                                   sshdr, timeout, retries);
1732
1733         /* This code looks awful: what it's doing is making sure an
1734          * ILLEGAL REQUEST sense return identifies the actual command
1735          * byte as the problem.  MODE_SENSE commands can return
1736          * ILLEGAL REQUEST if the code page isn't supported */
1737
1738         if (use_10_for_ms && !scsi_status_is_good(result) &&
1739             (driver_byte(result) & DRIVER_SENSE)) {
1740                 if (scsi_sense_valid(sshdr)) {
1741                         if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1742                             (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1743                                 /* 
1744                                  * Invalid command operation code
1745                                  */
1746                                 sdev->use_10_for_ms = 0;
1747                                 goto retry;
1748                         }
1749                 }
1750         }
1751
1752         if(scsi_status_is_good(result)) {
1753                 data->header_length = header_length;
1754                 if(use_10_for_ms) {
1755                         data->length = buffer[0]*256 + buffer[1] + 2;
1756                         data->medium_type = buffer[2];
1757                         data->device_specific = buffer[3];
1758                         data->longlba = buffer[4] & 0x01;
1759                         data->block_descriptor_length = buffer[6]*256
1760                                 + buffer[7];
1761                 } else {
1762                         data->length = buffer[0] + 1;
1763                         data->medium_type = buffer[1];
1764                         data->device_specific = buffer[2];
1765                         data->block_descriptor_length = buffer[3];
1766                 }
1767         }
1768
1769         return result;
1770 }
1771 EXPORT_SYMBOL(scsi_mode_sense);
1772
1773 int
1774 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1775 {
1776         char cmd[] = {
1777                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1778         };
1779         struct scsi_sense_hdr sshdr;
1780         int result;
1781         
1782         result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1783                                   timeout, retries);
1784
1785         if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1786
1787                 if ((scsi_sense_valid(&sshdr)) &&
1788                     ((sshdr.sense_key == UNIT_ATTENTION) ||
1789                      (sshdr.sense_key == NOT_READY))) {
1790                         sdev->changed = 1;
1791                         result = 0;
1792                 }
1793         }
1794         return result;
1795 }
1796 EXPORT_SYMBOL(scsi_test_unit_ready);
1797
1798 /**
1799  *      scsi_device_set_state - Take the given device through the device
1800  *              state model.
1801  *      @sdev:  scsi device to change the state of.
1802  *      @state: state to change to.
1803  *
1804  *      Returns zero if unsuccessful or an error if the requested 
1805  *      transition is illegal.
1806  **/
1807 int
1808 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1809 {
1810         enum scsi_device_state oldstate = sdev->sdev_state;
1811
1812         if (state == oldstate)
1813                 return 0;
1814
1815         switch (state) {
1816         case SDEV_CREATED:
1817                 /* There are no legal states that come back to
1818                  * created.  This is the manually initialised start
1819                  * state */
1820                 goto illegal;
1821                         
1822         case SDEV_RUNNING:
1823                 switch (oldstate) {
1824                 case SDEV_CREATED:
1825                 case SDEV_OFFLINE:
1826                 case SDEV_QUIESCE:
1827                 case SDEV_BLOCK:
1828                         break;
1829                 default:
1830                         goto illegal;
1831                 }
1832                 break;
1833
1834         case SDEV_QUIESCE:
1835                 switch (oldstate) {
1836                 case SDEV_RUNNING:
1837                 case SDEV_OFFLINE:
1838                         break;
1839                 default:
1840                         goto illegal;
1841                 }
1842                 break;
1843
1844         case SDEV_OFFLINE:
1845                 switch (oldstate) {
1846                 case SDEV_CREATED:
1847                 case SDEV_RUNNING:
1848                 case SDEV_QUIESCE:
1849                 case SDEV_BLOCK:
1850                         break;
1851                 default:
1852                         goto illegal;
1853                 }
1854                 break;
1855
1856         case SDEV_BLOCK:
1857                 switch (oldstate) {
1858                 case SDEV_CREATED:
1859                 case SDEV_RUNNING:
1860                         break;
1861                 default:
1862                         goto illegal;
1863                 }
1864                 break;
1865
1866         case SDEV_CANCEL:
1867                 switch (oldstate) {
1868                 case SDEV_CREATED:
1869                 case SDEV_RUNNING:
1870                 case SDEV_OFFLINE:
1871                 case SDEV_BLOCK:
1872                         break;
1873                 default:
1874                         goto illegal;
1875                 }
1876                 break;
1877
1878         case SDEV_DEL:
1879                 switch (oldstate) {
1880                 case SDEV_CANCEL:
1881                         break;
1882                 default:
1883                         goto illegal;
1884                 }
1885                 break;
1886
1887         }
1888         sdev->sdev_state = state;
1889         return 0;
1890
1891  illegal:
1892         SCSI_LOG_ERROR_RECOVERY(1, 
1893                                 sdev_printk(KERN_ERR, sdev,
1894                                             "Illegal state transition %s->%s\n",
1895                                             scsi_device_state_name(oldstate),
1896                                             scsi_device_state_name(state))
1897                                 );
1898         return -EINVAL;
1899 }
1900 EXPORT_SYMBOL(scsi_device_set_state);
1901
1902 /**
1903  *      scsi_device_quiesce - Block user issued commands.
1904  *      @sdev:  scsi device to quiesce.
1905  *
1906  *      This works by trying to transition to the SDEV_QUIESCE state
1907  *      (which must be a legal transition).  When the device is in this
1908  *      state, only special requests will be accepted, all others will
1909  *      be deferred.  Since special requests may also be requeued requests,
1910  *      a successful return doesn't guarantee the device will be 
1911  *      totally quiescent.
1912  *
1913  *      Must be called with user context, may sleep.
1914  *
1915  *      Returns zero if unsuccessful or an error if not.
1916  **/
1917 int
1918 scsi_device_quiesce(struct scsi_device *sdev)
1919 {
1920         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1921         if (err)
1922                 return err;
1923
1924         scsi_run_queue(sdev->request_queue);
1925         while (sdev->device_busy) {
1926                 msleep_interruptible(200);
1927                 scsi_run_queue(sdev->request_queue);
1928         }
1929         return 0;
1930 }
1931 EXPORT_SYMBOL(scsi_device_quiesce);
1932
1933 /**
1934  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1935  *      @sdev:  scsi device to resume.
1936  *
1937  *      Moves the device from quiesced back to running and restarts the
1938  *      queues.
1939  *
1940  *      Must be called with user context, may sleep.
1941  **/
1942 void
1943 scsi_device_resume(struct scsi_device *sdev)
1944 {
1945         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1946                 return;
1947         scsi_run_queue(sdev->request_queue);
1948 }
1949 EXPORT_SYMBOL(scsi_device_resume);
1950
1951 static void
1952 device_quiesce_fn(struct scsi_device *sdev, void *data)
1953 {
1954         scsi_device_quiesce(sdev);
1955 }
1956
1957 void
1958 scsi_target_quiesce(struct scsi_target *starget)
1959 {
1960         starget_for_each_device(starget, NULL, device_quiesce_fn);
1961 }
1962 EXPORT_SYMBOL(scsi_target_quiesce);
1963
1964 static void
1965 device_resume_fn(struct scsi_device *sdev, void *data)
1966 {
1967         scsi_device_resume(sdev);
1968 }
1969
1970 void
1971 scsi_target_resume(struct scsi_target *starget)
1972 {
1973         starget_for_each_device(starget, NULL, device_resume_fn);
1974 }
1975 EXPORT_SYMBOL(scsi_target_resume);
1976
1977 /**
1978  * scsi_internal_device_block - internal function to put a device
1979  *                              temporarily into the SDEV_BLOCK state
1980  * @sdev:       device to block
1981  *
1982  * Block request made by scsi lld's to temporarily stop all
1983  * scsi commands on the specified device.  Called from interrupt
1984  * or normal process context.
1985  *
1986  * Returns zero if successful or error if not
1987  *
1988  * Notes:       
1989  *      This routine transitions the device to the SDEV_BLOCK state
1990  *      (which must be a legal transition).  When the device is in this
1991  *      state, all commands are deferred until the scsi lld reenables
1992  *      the device with scsi_device_unblock or device_block_tmo fires.
1993  *      This routine assumes the host_lock is held on entry.
1994  **/
1995 int
1996 scsi_internal_device_block(struct scsi_device *sdev)
1997 {
1998         request_queue_t *q = sdev->request_queue;
1999         unsigned long flags;
2000         int err = 0;
2001
2002         err = scsi_device_set_state(sdev, SDEV_BLOCK);
2003         if (err)
2004                 return err;
2005
2006         /* 
2007          * The device has transitioned to SDEV_BLOCK.  Stop the
2008          * block layer from calling the midlayer with this device's
2009          * request queue. 
2010          */
2011         spin_lock_irqsave(q->queue_lock, flags);
2012         blk_stop_queue(q);
2013         spin_unlock_irqrestore(q->queue_lock, flags);
2014
2015         return 0;
2016 }
2017 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2018  
2019 /**
2020  * scsi_internal_device_unblock - resume a device after a block request
2021  * @sdev:       device to resume
2022  *
2023  * Called by scsi lld's or the midlayer to restart the device queue
2024  * for the previously suspended scsi device.  Called from interrupt or
2025  * normal process context.
2026  *
2027  * Returns zero if successful or error if not.
2028  *
2029  * Notes:       
2030  *      This routine transitions the device to the SDEV_RUNNING state
2031  *      (which must be a legal transition) allowing the midlayer to
2032  *      goose the queue for this device.  This routine assumes the 
2033  *      host_lock is held upon entry.
2034  **/
2035 int
2036 scsi_internal_device_unblock(struct scsi_device *sdev)
2037 {
2038         request_queue_t *q = sdev->request_queue; 
2039         int err;
2040         unsigned long flags;
2041         
2042         /* 
2043          * Try to transition the scsi device to SDEV_RUNNING
2044          * and goose the device queue if successful.  
2045          */
2046         err = scsi_device_set_state(sdev, SDEV_RUNNING);
2047         if (err)
2048                 return err;
2049
2050         spin_lock_irqsave(q->queue_lock, flags);
2051         blk_start_queue(q);
2052         spin_unlock_irqrestore(q->queue_lock, flags);
2053
2054         return 0;
2055 }
2056 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2057
2058 static void
2059 device_block(struct scsi_device *sdev, void *data)
2060 {
2061         scsi_internal_device_block(sdev);
2062 }
2063
2064 static int
2065 target_block(struct device *dev, void *data)
2066 {
2067         if (scsi_is_target_device(dev))
2068                 starget_for_each_device(to_scsi_target(dev), NULL,
2069                                         device_block);
2070         return 0;
2071 }
2072
2073 void
2074 scsi_target_block(struct device *dev)
2075 {
2076         if (scsi_is_target_device(dev))
2077                 starget_for_each_device(to_scsi_target(dev), NULL,
2078                                         device_block);
2079         else
2080                 device_for_each_child(dev, NULL, target_block);
2081 }
2082 EXPORT_SYMBOL_GPL(scsi_target_block);
2083
2084 static void
2085 device_unblock(struct scsi_device *sdev, void *data)
2086 {
2087         scsi_internal_device_unblock(sdev);
2088 }
2089
2090 static int
2091 target_unblock(struct device *dev, void *data)
2092 {
2093         if (scsi_is_target_device(dev))
2094                 starget_for_each_device(to_scsi_target(dev), NULL,
2095                                         device_unblock);
2096         return 0;
2097 }
2098
2099 void
2100 scsi_target_unblock(struct device *dev)
2101 {
2102         if (scsi_is_target_device(dev))
2103                 starget_for_each_device(to_scsi_target(dev), NULL,
2104                                         device_unblock);
2105         else
2106                 device_for_each_child(dev, NULL, target_unblock);
2107 }
2108 EXPORT_SYMBOL_GPL(scsi_target_unblock);