Merge branch 'next' into for-linus
[sfrench/cifs-2.6.git] / drivers / scsi / hosts.c
1 /*
2  *  hosts.c Copyright (C) 1992 Drew Eckhardt
3  *          Copyright (C) 1993, 1994, 1995 Eric Youngdale
4  *          Copyright (C) 2002-2003 Christoph Hellwig
5  *
6  *  mid to lowlevel SCSI driver interface
7  *      Initial versions: Drew Eckhardt
8  *      Subsequent revisions: Eric Youngdale
9  *
10  *  <drew@colorado.edu>
11  *
12  *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
13  *  Added QLOGIC QLA1280 SCSI controller kernel host support. 
14  *     August 4, 1999 Fred Lewis, Intel DuPont
15  *
16  *  Updated to reflect the new initialization scheme for the higher 
17  *  level of scsi drivers (sd/sr/st)
18  *  September 17, 2000 Torben Mathiasen <tmm@image.dk>
19  *
20  *  Restructured scsi_host lists and associated functions.
21  *  September 04, 2002 Mike Anderson (andmike@us.ibm.com)
22  */
23
24 #include <linux/module.h>
25 #include <linux/blkdev.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/string.h>
30 #include <linux/mm.h>
31 #include <linux/init.h>
32 #include <linux/completion.h>
33 #include <linux/transport_class.h>
34 #include <linux/platform_device.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/idr.h>
37 #include <scsi/scsi_device.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_transport.h>
40
41 #include "scsi_priv.h"
42 #include "scsi_logging.h"
43
44
45 static int shost_eh_deadline = -1;
46
47 module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
48 MODULE_PARM_DESC(eh_deadline,
49                  "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
50
51 static DEFINE_IDA(host_index_ida);
52
53
54 static void scsi_host_cls_release(struct device *dev)
55 {
56         put_device(&class_to_shost(dev)->shost_gendev);
57 }
58
59 static struct class shost_class = {
60         .name           = "scsi_host",
61         .dev_release    = scsi_host_cls_release,
62 };
63
64 /**
65  *      scsi_host_set_state - Take the given host through the host state model.
66  *      @shost: scsi host to change the state of.
67  *      @state: state to change to.
68  *
69  *      Returns zero if unsuccessful or an error if the requested
70  *      transition is illegal.
71  **/
72 int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
73 {
74         enum scsi_host_state oldstate = shost->shost_state;
75
76         if (state == oldstate)
77                 return 0;
78
79         switch (state) {
80         case SHOST_CREATED:
81                 /* There are no legal states that come back to
82                  * created.  This is the manually initialised start
83                  * state */
84                 goto illegal;
85
86         case SHOST_RUNNING:
87                 switch (oldstate) {
88                 case SHOST_CREATED:
89                 case SHOST_RECOVERY:
90                         break;
91                 default:
92                         goto illegal;
93                 }
94                 break;
95
96         case SHOST_RECOVERY:
97                 switch (oldstate) {
98                 case SHOST_RUNNING:
99                         break;
100                 default:
101                         goto illegal;
102                 }
103                 break;
104
105         case SHOST_CANCEL:
106                 switch (oldstate) {
107                 case SHOST_CREATED:
108                 case SHOST_RUNNING:
109                 case SHOST_CANCEL_RECOVERY:
110                         break;
111                 default:
112                         goto illegal;
113                 }
114                 break;
115
116         case SHOST_DEL:
117                 switch (oldstate) {
118                 case SHOST_CANCEL:
119                 case SHOST_DEL_RECOVERY:
120                         break;
121                 default:
122                         goto illegal;
123                 }
124                 break;
125
126         case SHOST_CANCEL_RECOVERY:
127                 switch (oldstate) {
128                 case SHOST_CANCEL:
129                 case SHOST_RECOVERY:
130                         break;
131                 default:
132                         goto illegal;
133                 }
134                 break;
135
136         case SHOST_DEL_RECOVERY:
137                 switch (oldstate) {
138                 case SHOST_CANCEL_RECOVERY:
139                         break;
140                 default:
141                         goto illegal;
142                 }
143                 break;
144         }
145         shost->shost_state = state;
146         return 0;
147
148  illegal:
149         SCSI_LOG_ERROR_RECOVERY(1,
150                                 shost_printk(KERN_ERR, shost,
151                                              "Illegal host state transition"
152                                              "%s->%s\n",
153                                              scsi_host_state_name(oldstate),
154                                              scsi_host_state_name(state)));
155         return -EINVAL;
156 }
157
158 /**
159  * scsi_remove_host - remove a scsi host
160  * @shost:      a pointer to a scsi host to remove
161  **/
162 void scsi_remove_host(struct Scsi_Host *shost)
163 {
164         unsigned long flags;
165
166         mutex_lock(&shost->scan_mutex);
167         spin_lock_irqsave(shost->host_lock, flags);
168         if (scsi_host_set_state(shost, SHOST_CANCEL))
169                 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
170                         spin_unlock_irqrestore(shost->host_lock, flags);
171                         mutex_unlock(&shost->scan_mutex);
172                         return;
173                 }
174         spin_unlock_irqrestore(shost->host_lock, flags);
175
176         scsi_autopm_get_host(shost);
177         flush_workqueue(shost->tmf_work_q);
178         scsi_forget_host(shost);
179         mutex_unlock(&shost->scan_mutex);
180         scsi_proc_host_rm(shost);
181
182         spin_lock_irqsave(shost->host_lock, flags);
183         if (scsi_host_set_state(shost, SHOST_DEL))
184                 BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
185         spin_unlock_irqrestore(shost->host_lock, flags);
186
187         transport_unregister_device(&shost->shost_gendev);
188         device_unregister(&shost->shost_dev);
189         device_del(&shost->shost_gendev);
190 }
191 EXPORT_SYMBOL(scsi_remove_host);
192
193 /**
194  * scsi_add_host_with_dma - add a scsi host with dma device
195  * @shost:      scsi host pointer to add
196  * @dev:        a struct device of type scsi class
197  * @dma_dev:    dma device for the host
198  *
199  * Note: You rarely need to worry about this unless you're in a
200  * virtualised host environments, so use the simpler scsi_add_host()
201  * function instead.
202  *
203  * Return value: 
204  *      0 on success / != 0 for error
205  **/
206 int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
207                            struct device *dma_dev)
208 {
209         struct scsi_host_template *sht = shost->hostt;
210         int error = -EINVAL;
211
212         shost_printk(KERN_INFO, shost, "%s\n",
213                         sht->info ? sht->info(shost) : sht->name);
214
215         if (!shost->can_queue) {
216                 shost_printk(KERN_ERR, shost,
217                              "can_queue = 0 no longer supported\n");
218                 goto fail;
219         }
220
221         error = scsi_init_sense_cache(shost);
222         if (error)
223                 goto fail;
224
225         error = scsi_mq_setup_tags(shost);
226         if (error)
227                 goto fail;
228
229         if (!shost->shost_gendev.parent)
230                 shost->shost_gendev.parent = dev ? dev : &platform_bus;
231         if (!dma_dev)
232                 dma_dev = shost->shost_gendev.parent;
233
234         shost->dma_dev = dma_dev;
235
236         /*
237          * Increase usage count temporarily here so that calling
238          * scsi_autopm_put_host() will trigger runtime idle if there is
239          * nothing else preventing suspending the device.
240          */
241         pm_runtime_get_noresume(&shost->shost_gendev);
242         pm_runtime_set_active(&shost->shost_gendev);
243         pm_runtime_enable(&shost->shost_gendev);
244         device_enable_async_suspend(&shost->shost_gendev);
245
246         error = device_add(&shost->shost_gendev);
247         if (error)
248                 goto out_disable_runtime_pm;
249
250         scsi_host_set_state(shost, SHOST_RUNNING);
251         get_device(shost->shost_gendev.parent);
252
253         device_enable_async_suspend(&shost->shost_dev);
254
255         error = device_add(&shost->shost_dev);
256         if (error)
257                 goto out_del_gendev;
258
259         get_device(&shost->shost_gendev);
260
261         if (shost->transportt->host_size) {
262                 shost->shost_data = kzalloc(shost->transportt->host_size,
263                                          GFP_KERNEL);
264                 if (shost->shost_data == NULL) {
265                         error = -ENOMEM;
266                         goto out_del_dev;
267                 }
268         }
269
270         if (shost->transportt->create_work_queue) {
271                 snprintf(shost->work_q_name, sizeof(shost->work_q_name),
272                          "scsi_wq_%d", shost->host_no);
273                 shost->work_q = create_singlethread_workqueue(
274                                         shost->work_q_name);
275                 if (!shost->work_q) {
276                         error = -EINVAL;
277                         goto out_free_shost_data;
278                 }
279         }
280
281         error = scsi_sysfs_add_host(shost);
282         if (error)
283                 goto out_destroy_host;
284
285         scsi_proc_host_add(shost);
286         scsi_autopm_put_host(shost);
287         return error;
288
289  out_destroy_host:
290         if (shost->work_q)
291                 destroy_workqueue(shost->work_q);
292  out_free_shost_data:
293         kfree(shost->shost_data);
294  out_del_dev:
295         device_del(&shost->shost_dev);
296  out_del_gendev:
297         device_del(&shost->shost_gendev);
298  out_disable_runtime_pm:
299         device_disable_async_suspend(&shost->shost_gendev);
300         pm_runtime_disable(&shost->shost_gendev);
301         pm_runtime_set_suspended(&shost->shost_gendev);
302         pm_runtime_put_noidle(&shost->shost_gendev);
303         scsi_mq_destroy_tags(shost);
304  fail:
305         return error;
306 }
307 EXPORT_SYMBOL(scsi_add_host_with_dma);
308
309 static void scsi_host_dev_release(struct device *dev)
310 {
311         struct Scsi_Host *shost = dev_to_shost(dev);
312         struct device *parent = dev->parent;
313
314         scsi_proc_hostdir_rm(shost->hostt);
315
316         /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
317         rcu_barrier();
318
319         if (shost->tmf_work_q)
320                 destroy_workqueue(shost->tmf_work_q);
321         if (shost->ehandler)
322                 kthread_stop(shost->ehandler);
323         if (shost->work_q)
324                 destroy_workqueue(shost->work_q);
325
326         if (shost->shost_state == SHOST_CREATED) {
327                 /*
328                  * Free the shost_dev device name here if scsi_host_alloc()
329                  * and scsi_host_put() have been called but neither
330                  * scsi_host_add() nor scsi_host_remove() has been called.
331                  * This avoids that the memory allocated for the shost_dev
332                  * name is leaked.
333                  */
334                 kfree(dev_name(&shost->shost_dev));
335         }
336
337         if (shost->tag_set.tags)
338                 scsi_mq_destroy_tags(shost);
339
340         kfree(shost->shost_data);
341
342         ida_simple_remove(&host_index_ida, shost->host_no);
343
344         if (parent)
345                 put_device(parent);
346         kfree(shost);
347 }
348
349 static struct device_type scsi_host_type = {
350         .name =         "scsi_host",
351         .release =      scsi_host_dev_release,
352 };
353
354 /**
355  * scsi_host_alloc - register a scsi host adapter instance.
356  * @sht:        pointer to scsi host template
357  * @privsize:   extra bytes to allocate for driver
358  *
359  * Note:
360  *      Allocate a new Scsi_Host and perform basic initialization.
361  *      The host is not published to the scsi midlayer until scsi_add_host
362  *      is called.
363  *
364  * Return value:
365  *      Pointer to a new Scsi_Host
366  **/
367 struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
368 {
369         struct Scsi_Host *shost;
370         gfp_t gfp_mask = GFP_KERNEL;
371         int index;
372
373         if (sht->unchecked_isa_dma && privsize)
374                 gfp_mask |= __GFP_DMA;
375
376         shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
377         if (!shost)
378                 return NULL;
379
380         shost->host_lock = &shost->default_lock;
381         spin_lock_init(shost->host_lock);
382         shost->shost_state = SHOST_CREATED;
383         INIT_LIST_HEAD(&shost->__devices);
384         INIT_LIST_HEAD(&shost->__targets);
385         INIT_LIST_HEAD(&shost->eh_cmd_q);
386         INIT_LIST_HEAD(&shost->starved_list);
387         init_waitqueue_head(&shost->host_wait);
388         mutex_init(&shost->scan_mutex);
389
390         index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
391         if (index < 0)
392                 goto fail_kfree;
393         shost->host_no = index;
394
395         shost->dma_channel = 0xff;
396
397         /* These three are default values which can be overridden */
398         shost->max_channel = 0;
399         shost->max_id = 8;
400         shost->max_lun = 8;
401
402         /* Give each shost a default transportt */
403         shost->transportt = &blank_transport_template;
404
405         /*
406          * All drivers right now should be able to handle 12 byte
407          * commands.  Every so often there are requests for 16 byte
408          * commands, but individual low-level drivers need to certify that
409          * they actually do something sensible with such commands.
410          */
411         shost->max_cmd_len = 12;
412         shost->hostt = sht;
413         shost->this_id = sht->this_id;
414         shost->can_queue = sht->can_queue;
415         shost->sg_tablesize = sht->sg_tablesize;
416         shost->sg_prot_tablesize = sht->sg_prot_tablesize;
417         shost->cmd_per_lun = sht->cmd_per_lun;
418         shost->unchecked_isa_dma = sht->unchecked_isa_dma;
419         shost->no_write_same = sht->no_write_same;
420
421         if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
422                 shost->eh_deadline = -1;
423         else if ((ulong) shost_eh_deadline * HZ > INT_MAX) {
424                 shost_printk(KERN_WARNING, shost,
425                              "eh_deadline %u too large, setting to %u\n",
426                              shost_eh_deadline, INT_MAX / HZ);
427                 shost->eh_deadline = INT_MAX;
428         } else
429                 shost->eh_deadline = shost_eh_deadline * HZ;
430
431         if (sht->supported_mode == MODE_UNKNOWN)
432                 /* means we didn't set it ... default to INITIATOR */
433                 shost->active_mode = MODE_INITIATOR;
434         else
435                 shost->active_mode = sht->supported_mode;
436
437         if (sht->max_host_blocked)
438                 shost->max_host_blocked = sht->max_host_blocked;
439         else
440                 shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
441
442         /*
443          * If the driver imposes no hard sector transfer limit, start at
444          * machine infinity initially.
445          */
446         if (sht->max_sectors)
447                 shost->max_sectors = sht->max_sectors;
448         else
449                 shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
450
451         if (sht->max_segment_size)
452                 shost->max_segment_size = sht->max_segment_size;
453         else
454                 shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
455
456         /*
457          * assume a 4GB boundary, if not set
458          */
459         if (sht->dma_boundary)
460                 shost->dma_boundary = sht->dma_boundary;
461         else
462                 shost->dma_boundary = 0xffffffff;
463
464         device_initialize(&shost->shost_gendev);
465         dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
466         shost->shost_gendev.bus = &scsi_bus_type;
467         shost->shost_gendev.type = &scsi_host_type;
468
469         device_initialize(&shost->shost_dev);
470         shost->shost_dev.parent = &shost->shost_gendev;
471         shost->shost_dev.class = &shost_class;
472         dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
473         shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
474
475         shost->ehandler = kthread_run(scsi_error_handler, shost,
476                         "scsi_eh_%d", shost->host_no);
477         if (IS_ERR(shost->ehandler)) {
478                 shost_printk(KERN_WARNING, shost,
479                         "error handler thread failed to spawn, error = %ld\n",
480                         PTR_ERR(shost->ehandler));
481                 goto fail_index_remove;
482         }
483
484         shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
485                                             WQ_UNBOUND | WQ_MEM_RECLAIM,
486                                            1, shost->host_no);
487         if (!shost->tmf_work_q) {
488                 shost_printk(KERN_WARNING, shost,
489                              "failed to create tmf workq\n");
490                 goto fail_kthread;
491         }
492         scsi_proc_hostdir_add(shost->hostt);
493         return shost;
494
495  fail_kthread:
496         kthread_stop(shost->ehandler);
497  fail_index_remove:
498         ida_simple_remove(&host_index_ida, shost->host_no);
499  fail_kfree:
500         kfree(shost);
501         return NULL;
502 }
503 EXPORT_SYMBOL(scsi_host_alloc);
504
505 static int __scsi_host_match(struct device *dev, const void *data)
506 {
507         struct Scsi_Host *p;
508         const unsigned short *hostnum = data;
509
510         p = class_to_shost(dev);
511         return p->host_no == *hostnum;
512 }
513
514 /**
515  * scsi_host_lookup - get a reference to a Scsi_Host by host no
516  * @hostnum:    host number to locate
517  *
518  * Return value:
519  *      A pointer to located Scsi_Host or NULL.
520  *
521  *      The caller must do a scsi_host_put() to drop the reference
522  *      that scsi_host_get() took. The put_device() below dropped
523  *      the reference from class_find_device().
524  **/
525 struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
526 {
527         struct device *cdev;
528         struct Scsi_Host *shost = NULL;
529
530         cdev = class_find_device(&shost_class, NULL, &hostnum,
531                                  __scsi_host_match);
532         if (cdev) {
533                 shost = scsi_host_get(class_to_shost(cdev));
534                 put_device(cdev);
535         }
536         return shost;
537 }
538 EXPORT_SYMBOL(scsi_host_lookup);
539
540 /**
541  * scsi_host_get - inc a Scsi_Host ref count
542  * @shost:      Pointer to Scsi_Host to inc.
543  **/
544 struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
545 {
546         if ((shost->shost_state == SHOST_DEL) ||
547                 !get_device(&shost->shost_gendev))
548                 return NULL;
549         return shost;
550 }
551 EXPORT_SYMBOL(scsi_host_get);
552
553 /**
554  * scsi_host_busy - Return the host busy counter
555  * @shost:      Pointer to Scsi_Host to inc.
556  **/
557 int scsi_host_busy(struct Scsi_Host *shost)
558 {
559         return atomic_read(&shost->host_busy);
560 }
561 EXPORT_SYMBOL(scsi_host_busy);
562
563 /**
564  * scsi_host_put - dec a Scsi_Host ref count
565  * @shost:      Pointer to Scsi_Host to dec.
566  **/
567 void scsi_host_put(struct Scsi_Host *shost)
568 {
569         put_device(&shost->shost_gendev);
570 }
571 EXPORT_SYMBOL(scsi_host_put);
572
573 int scsi_init_hosts(void)
574 {
575         return class_register(&shost_class);
576 }
577
578 void scsi_exit_hosts(void)
579 {
580         class_unregister(&shost_class);
581         ida_destroy(&host_index_ida);
582 }
583
584 int scsi_is_host_device(const struct device *dev)
585 {
586         return dev->type == &scsi_host_type;
587 }
588 EXPORT_SYMBOL(scsi_is_host_device);
589
590 /**
591  * scsi_queue_work - Queue work to the Scsi_Host workqueue.
592  * @shost:      Pointer to Scsi_Host.
593  * @work:       Work to queue for execution.
594  *
595  * Return value:
596  *      1 - work queued for execution
597  *      0 - work is already queued
598  *      -EINVAL - work queue doesn't exist
599  **/
600 int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
601 {
602         if (unlikely(!shost->work_q)) {
603                 shost_printk(KERN_ERR, shost,
604                         "ERROR: Scsi host '%s' attempted to queue scsi-work, "
605                         "when no workqueue created.\n", shost->hostt->name);
606                 dump_stack();
607
608                 return -EINVAL;
609         }
610
611         return queue_work(shost->work_q, work);
612 }
613 EXPORT_SYMBOL_GPL(scsi_queue_work);
614
615 /**
616  * scsi_flush_work - Flush a Scsi_Host's workqueue.
617  * @shost:      Pointer to Scsi_Host.
618  **/
619 void scsi_flush_work(struct Scsi_Host *shost)
620 {
621         if (!shost->work_q) {
622                 shost_printk(KERN_ERR, shost,
623                         "ERROR: Scsi host '%s' attempted to flush scsi-work, "
624                         "when no workqueue created.\n", shost->hostt->name);
625                 dump_stack();
626                 return;
627         }
628
629         flush_workqueue(shost->work_q);
630 }
631 EXPORT_SYMBOL_GPL(scsi_flush_work);