Merge branch 'upstream-fixes'
[sfrench/cifs-2.6.git] / drivers / s390 / block / dasd.c
1 /*
2  * File...........: linux/drivers/s390/block/dasd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *                  Horst Hummel <Horst.Hummel@de.ibm.com>
5  *                  Carsten Otte <Cotte@de.ibm.com>
6  *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9  *
10  */
11
12 #include <linux/config.h>
13 #include <linux/kmod.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/ctype.h>
17 #include <linux/major.h>
18 #include <linux/slab.h>
19 #include <linux/buffer_head.h>
20 #include <linux/hdreg.h>
21
22 #include <asm/ccwdev.h>
23 #include <asm/ebcdic.h>
24 #include <asm/idals.h>
25 #include <asm/todclk.h>
26
27 /* This is ugly... */
28 #define PRINTK_HEADER "dasd:"
29
30 #include "dasd_int.h"
31 /*
32  * SECTION: Constant definitions to be used within this file
33  */
34 #define DASD_CHANQ_MAX_SIZE 4
35
36 /*
37  * SECTION: exported variables of dasd.c
38  */
39 debug_info_t *dasd_debug_area;
40 struct dasd_discipline *dasd_diag_discipline_pointer;
41
42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
44                    " Copyright 2000 IBM Corporation");
45 MODULE_SUPPORTED_DEVICE("dasd");
46 MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
47 MODULE_LICENSE("GPL");
48
49 /*
50  * SECTION: prototypes for static functions of dasd.c
51  */
52 static int  dasd_alloc_queue(struct dasd_device * device);
53 static void dasd_setup_queue(struct dasd_device * device);
54 static void dasd_free_queue(struct dasd_device * device);
55 static void dasd_flush_request_queue(struct dasd_device *);
56 static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
57 static void dasd_flush_ccw_queue(struct dasd_device *, int);
58 static void dasd_tasklet(struct dasd_device *);
59 static void do_kick_device(void *data);
60
61 /*
62  * SECTION: Operations on the device structure.
63  */
64 static wait_queue_head_t dasd_init_waitq;
65
66 /*
67  * Allocate memory for a new device structure.
68  */
69 struct dasd_device *
70 dasd_alloc_device(void)
71 {
72         struct dasd_device *device;
73
74         device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
75         if (device == NULL)
76                 return ERR_PTR(-ENOMEM);
77         memset(device, 0, sizeof (struct dasd_device));
78         /* open_count = 0 means device online but not in use */
79         atomic_set(&device->open_count, -1);
80
81         /* Get two pages for normal block device operations. */
82         device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
83         if (device->ccw_mem == NULL) {
84                 kfree(device);
85                 return ERR_PTR(-ENOMEM);
86         }
87         /* Get one page for error recovery. */
88         device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
89         if (device->erp_mem == NULL) {
90                 free_pages((unsigned long) device->ccw_mem, 1);
91                 kfree(device);
92                 return ERR_PTR(-ENOMEM);
93         }
94
95         dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
96         dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
97         spin_lock_init(&device->mem_lock);
98         spin_lock_init(&device->request_queue_lock);
99         atomic_set (&device->tasklet_scheduled, 0);
100         tasklet_init(&device->tasklet, 
101                      (void (*)(unsigned long)) dasd_tasklet,
102                      (unsigned long) device);
103         INIT_LIST_HEAD(&device->ccw_queue);
104         init_timer(&device->timer);
105         INIT_WORK(&device->kick_work, do_kick_device, device);
106         device->state = DASD_STATE_NEW;
107         device->target = DASD_STATE_NEW;
108
109         return device;
110 }
111
112 /*
113  * Free memory of a device structure.
114  */
115 void
116 dasd_free_device(struct dasd_device *device)
117 {
118         kfree(device->private);
119         free_page((unsigned long) device->erp_mem);
120         free_pages((unsigned long) device->ccw_mem, 1);
121         kfree(device);
122 }
123
124 /*
125  * Make a new device known to the system.
126  */
127 static inline int
128 dasd_state_new_to_known(struct dasd_device *device)
129 {
130         int rc;
131
132         /*
133          * As long as the device is not in state DASD_STATE_NEW we want to 
134          * keep the reference count > 0.
135          */
136         dasd_get_device(device);
137
138         rc = dasd_alloc_queue(device);
139         if (rc) {
140                 dasd_put_device(device);
141                 return rc;
142         }
143
144         device->state = DASD_STATE_KNOWN;
145         return 0;
146 }
147
148 /*
149  * Let the system forget about a device.
150  */
151 static inline void
152 dasd_state_known_to_new(struct dasd_device * device)
153 {
154         /* Forget the discipline information. */
155         if (device->discipline)
156                 module_put(device->discipline->owner);
157         device->discipline = NULL;
158         if (device->base_discipline)
159                 module_put(device->base_discipline->owner);
160         device->base_discipline = NULL;
161         device->state = DASD_STATE_NEW;
162
163         dasd_free_queue(device);
164
165         /* Give up reference we took in dasd_state_new_to_known. */
166         dasd_put_device(device);
167 }
168
169 /*
170  * Request the irq line for the device.
171  */
172 static inline int
173 dasd_state_known_to_basic(struct dasd_device * device)
174 {
175         int rc;
176
177         /* Allocate and register gendisk structure. */
178         rc = dasd_gendisk_alloc(device);
179         if (rc)
180                 return rc;
181
182         /* register 'device' debug area, used for all DBF_DEV_XXX calls */
183         device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
184                                             8 * sizeof (long));
185         debug_register_view(device->debug_area, &debug_sprintf_view);
186         debug_set_level(device->debug_area, DBF_EMERG);
187         DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
188
189         device->state = DASD_STATE_BASIC;
190         return 0;
191 }
192
193 /*
194  * Release the irq line for the device. Terminate any running i/o.
195  */
196 static inline void
197 dasd_state_basic_to_known(struct dasd_device * device)
198 {
199         dasd_gendisk_free(device);
200         dasd_flush_ccw_queue(device, 1);
201         DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
202         if (device->debug_area != NULL) {
203                 debug_unregister(device->debug_area);
204                 device->debug_area = NULL;
205         }
206         device->state = DASD_STATE_KNOWN;
207 }
208
209 /*
210  * Do the initial analysis. The do_analysis function may return
211  * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
212  * until the discipline decides to continue the startup sequence
213  * by calling the function dasd_change_state. The eckd disciplines
214  * uses this to start a ccw that detects the format. The completion
215  * interrupt for this detection ccw uses the kernel event daemon to
216  * trigger the call to dasd_change_state. All this is done in the
217  * discipline code, see dasd_eckd.c.
218  * After the analysis ccw is done (do_analysis returned 0) the block
219  * device is setup.
220  * In case the analysis returns an error, the device setup is stopped
221  * (a fake disk was already added to allow formatting).
222  */
223 static inline int
224 dasd_state_basic_to_ready(struct dasd_device * device)
225 {
226         int rc;
227
228         rc = 0;
229         if (device->discipline->do_analysis != NULL)
230                 rc = device->discipline->do_analysis(device);
231         if (rc) {
232                 if (rc != -EAGAIN)
233                         device->state = DASD_STATE_UNFMT;
234                 return rc;
235         }
236         /* make disk known with correct capacity */
237         dasd_setup_queue(device);
238         set_capacity(device->gdp, device->blocks << device->s2b_shift);
239         device->state = DASD_STATE_READY;
240         rc = dasd_scan_partitions(device);
241         if (rc)
242                 device->state = DASD_STATE_BASIC;
243         return rc;
244 }
245
246 /*
247  * Remove device from block device layer. Destroy dirty buffers.
248  * Forget format information. Check if the target level is basic
249  * and if it is create fake disk for formatting.
250  */
251 static inline void
252 dasd_state_ready_to_basic(struct dasd_device * device)
253 {
254         dasd_flush_ccw_queue(device, 0);
255         dasd_destroy_partitions(device);
256         dasd_flush_request_queue(device);
257         device->blocks = 0;
258         device->bp_block = 0;
259         device->s2b_shift = 0;
260         device->state = DASD_STATE_BASIC;
261 }
262
263 /*
264  * Back to basic.
265  */
266 static inline void
267 dasd_state_unfmt_to_basic(struct dasd_device * device)
268 {
269         device->state = DASD_STATE_BASIC;
270 }
271
272 /*
273  * Make the device online and schedule the bottom half to start
274  * the requeueing of requests from the linux request queue to the
275  * ccw queue.
276  */
277 static inline int
278 dasd_state_ready_to_online(struct dasd_device * device)
279 {
280         device->state = DASD_STATE_ONLINE;
281         dasd_schedule_bh(device);
282         return 0;
283 }
284
285 /*
286  * Stop the requeueing of requests again.
287  */
288 static inline void
289 dasd_state_online_to_ready(struct dasd_device * device)
290 {
291         device->state = DASD_STATE_READY;
292 }
293
294 /*
295  * Device startup state changes.
296  */
297 static inline int
298 dasd_increase_state(struct dasd_device *device)
299 {
300         int rc;
301
302         rc = 0;
303         if (device->state == DASD_STATE_NEW &&
304             device->target >= DASD_STATE_KNOWN)
305                 rc = dasd_state_new_to_known(device);
306
307         if (!rc &&
308             device->state == DASD_STATE_KNOWN &&
309             device->target >= DASD_STATE_BASIC)
310                 rc = dasd_state_known_to_basic(device);
311
312         if (!rc &&
313             device->state == DASD_STATE_BASIC &&
314             device->target >= DASD_STATE_READY)
315                 rc = dasd_state_basic_to_ready(device);
316
317         if (!rc &&
318             device->state == DASD_STATE_READY &&
319             device->target >= DASD_STATE_ONLINE)
320                 rc = dasd_state_ready_to_online(device);
321
322         return rc;
323 }
324
325 /*
326  * Device shutdown state changes.
327  */
328 static inline int
329 dasd_decrease_state(struct dasd_device *device)
330 {
331         if (device->state == DASD_STATE_ONLINE &&
332             device->target <= DASD_STATE_READY)
333                 dasd_state_online_to_ready(device);
334         
335         if (device->state == DASD_STATE_READY &&
336             device->target <= DASD_STATE_BASIC)
337                 dasd_state_ready_to_basic(device);
338
339         if (device->state == DASD_STATE_UNFMT &&
340             device->target <= DASD_STATE_BASIC)
341                 dasd_state_unfmt_to_basic(device);
342
343         if (device->state == DASD_STATE_BASIC &&
344             device->target <= DASD_STATE_KNOWN)
345                 dasd_state_basic_to_known(device);
346         
347         if (device->state == DASD_STATE_KNOWN &&
348             device->target <= DASD_STATE_NEW)
349                 dasd_state_known_to_new(device);
350
351         return 0;
352 }
353
354 /*
355  * This is the main startup/shutdown routine.
356  */
357 static void
358 dasd_change_state(struct dasd_device *device)
359 {
360         int rc;
361
362         if (device->state == device->target)
363                 /* Already where we want to go today... */
364                 return;
365         if (device->state < device->target)
366                 rc = dasd_increase_state(device);
367         else
368                 rc = dasd_decrease_state(device);
369         if (rc && rc != -EAGAIN)
370                 device->target = device->state;
371
372         if (device->state == device->target)
373                 wake_up(&dasd_init_waitq);
374 }
375
376 /*
377  * Kick starter for devices that did not complete the startup/shutdown
378  * procedure or were sleeping because of a pending state.
379  * dasd_kick_device will schedule a call do do_kick_device to the kernel
380  * event daemon.
381  */
382 static void
383 do_kick_device(void *data)
384 {
385         struct dasd_device *device;
386
387         device = (struct dasd_device *) data;
388         dasd_change_state(device);
389         dasd_schedule_bh(device);
390         dasd_put_device(device);
391 }
392
393 void
394 dasd_kick_device(struct dasd_device *device)
395 {
396         dasd_get_device(device);
397         /* queue call to dasd_kick_device to the kernel event daemon. */
398         schedule_work(&device->kick_work);
399 }
400
401 /*
402  * Set the target state for a device and starts the state change.
403  */
404 void
405 dasd_set_target_state(struct dasd_device *device, int target)
406 {
407         /* If we are in probeonly mode stop at DASD_STATE_READY. */
408         if (dasd_probeonly && target > DASD_STATE_READY)
409                 target = DASD_STATE_READY;
410         if (device->target != target) {
411                 if (device->state == target)
412                         wake_up(&dasd_init_waitq);
413                 device->target = target;
414         }
415         if (device->state != device->target)
416                 dasd_change_state(device);
417 }
418
419 /*
420  * Enable devices with device numbers in [from..to].
421  */
422 static inline int
423 _wait_for_device(struct dasd_device *device)
424 {
425         return (device->state == device->target);
426 }
427
428 void
429 dasd_enable_device(struct dasd_device *device)
430 {
431         dasd_set_target_state(device, DASD_STATE_ONLINE);
432         if (device->state <= DASD_STATE_KNOWN)
433                 /* No discipline for device found. */
434                 dasd_set_target_state(device, DASD_STATE_NEW);
435         /* Now wait for the devices to come up. */
436         wait_event(dasd_init_waitq, _wait_for_device(device));
437 }
438
439 /*
440  * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
441  */
442 #ifdef CONFIG_DASD_PROFILE
443
444 struct dasd_profile_info_t dasd_global_profile;
445 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
446
447 /*
448  * Increments counter in global and local profiling structures.
449  */
450 #define dasd_profile_counter(value, counter, device) \
451 { \
452         int index; \
453         for (index = 0; index < 31 && value >> (2+index); index++); \
454         dasd_global_profile.counter[index]++; \
455         device->profile.counter[index]++; \
456 }
457
458 /*
459  * Add profiling information for cqr before execution.
460  */
461 static inline void
462 dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
463                    struct request *req)
464 {
465         struct list_head *l;
466         unsigned int counter;
467
468         if (dasd_profile_level != DASD_PROFILE_ON)
469                 return;
470
471         /* count the length of the chanq for statistics */
472         counter = 0;
473         list_for_each(l, &device->ccw_queue)
474                 if (++counter >= 31)
475                         break;
476         dasd_global_profile.dasd_io_nr_req[counter]++;
477         device->profile.dasd_io_nr_req[counter]++;
478 }
479
480 /*
481  * Add profiling information for cqr after execution.
482  */
483 static inline void
484 dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
485                  struct request *req)
486 {
487         long strtime, irqtime, endtime, tottime;        /* in microseconds */
488         long tottimeps, sectors;
489
490         if (dasd_profile_level != DASD_PROFILE_ON)
491                 return;
492
493         sectors = req->nr_sectors;
494         if (!cqr->buildclk || !cqr->startclk ||
495             !cqr->stopclk || !cqr->endclk ||
496             !sectors)
497                 return;
498
499         strtime = ((cqr->startclk - cqr->buildclk) >> 12);
500         irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
501         endtime = ((cqr->endclk - cqr->stopclk) >> 12);
502         tottime = ((cqr->endclk - cqr->buildclk) >> 12);
503         tottimeps = tottime / sectors;
504
505         if (!dasd_global_profile.dasd_io_reqs)
506                 memset(&dasd_global_profile, 0,
507                        sizeof (struct dasd_profile_info_t));
508         dasd_global_profile.dasd_io_reqs++;
509         dasd_global_profile.dasd_io_sects += sectors;
510
511         if (!device->profile.dasd_io_reqs)
512                 memset(&device->profile, 0,
513                        sizeof (struct dasd_profile_info_t));
514         device->profile.dasd_io_reqs++;
515         device->profile.dasd_io_sects += sectors;
516
517         dasd_profile_counter(sectors, dasd_io_secs, device);
518         dasd_profile_counter(tottime, dasd_io_times, device);
519         dasd_profile_counter(tottimeps, dasd_io_timps, device);
520         dasd_profile_counter(strtime, dasd_io_time1, device);
521         dasd_profile_counter(irqtime, dasd_io_time2, device);
522         dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
523         dasd_profile_counter(endtime, dasd_io_time3, device);
524 }
525 #else
526 #define dasd_profile_start(device, cqr, req) do {} while (0)
527 #define dasd_profile_end(device, cqr, req) do {} while (0)
528 #endif                          /* CONFIG_DASD_PROFILE */
529
530 /*
531  * Allocate memory for a channel program with 'cplength' channel
532  * command words and 'datasize' additional space. There are two
533  * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
534  * memory and 2) dasd_smalloc_request uses the static ccw memory
535  * that gets allocated for each device.
536  */
537 struct dasd_ccw_req *
538 dasd_kmalloc_request(char *magic, int cplength, int datasize,
539                    struct dasd_device * device)
540 {
541         struct dasd_ccw_req *cqr;
542
543         /* Sanity checks */
544         if ( magic == NULL || datasize > PAGE_SIZE ||
545              (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
546                 BUG();
547
548         cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
549         if (cqr == NULL)
550                 return ERR_PTR(-ENOMEM);
551         memset(cqr, 0, sizeof(struct dasd_ccw_req));
552         cqr->cpaddr = NULL;
553         if (cplength > 0) {
554                 cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
555                                       GFP_ATOMIC | GFP_DMA);
556                 if (cqr->cpaddr == NULL) {
557                         kfree(cqr);
558                         return ERR_PTR(-ENOMEM);
559                 }
560                 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
561         }
562         cqr->data = NULL;
563         if (datasize > 0) {
564                 cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
565                 if (cqr->data == NULL) {
566                         kfree(cqr->cpaddr);
567                         kfree(cqr);
568                         return ERR_PTR(-ENOMEM);
569                 }
570                 memset(cqr->data, 0, datasize);
571         }
572         strncpy((char *) &cqr->magic, magic, 4);
573         ASCEBC((char *) &cqr->magic, 4);
574         set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
575         dasd_get_device(device);
576         return cqr;
577 }
578
579 struct dasd_ccw_req *
580 dasd_smalloc_request(char *magic, int cplength, int datasize,
581                    struct dasd_device * device)
582 {
583         unsigned long flags;
584         struct dasd_ccw_req *cqr;
585         char *data;
586         int size;
587
588         /* Sanity checks */
589         if ( magic == NULL || datasize > PAGE_SIZE ||
590              (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
591                 BUG();
592
593         size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
594         if (cplength > 0)
595                 size += cplength * sizeof(struct ccw1);
596         if (datasize > 0)
597                 size += datasize;
598         spin_lock_irqsave(&device->mem_lock, flags);
599         cqr = (struct dasd_ccw_req *)
600                 dasd_alloc_chunk(&device->ccw_chunks, size);
601         spin_unlock_irqrestore(&device->mem_lock, flags);
602         if (cqr == NULL)
603                 return ERR_PTR(-ENOMEM);
604         memset(cqr, 0, sizeof(struct dasd_ccw_req));
605         data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
606         cqr->cpaddr = NULL;
607         if (cplength > 0) {
608                 cqr->cpaddr = (struct ccw1 *) data;
609                 data += cplength*sizeof(struct ccw1);
610                 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
611         }
612         cqr->data = NULL;
613         if (datasize > 0) {
614                 cqr->data = data;
615                 memset(cqr->data, 0, datasize);
616         }
617         strncpy((char *) &cqr->magic, magic, 4);
618         ASCEBC((char *) &cqr->magic, 4);
619         set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
620         dasd_get_device(device);
621         return cqr;
622 }
623
624 /*
625  * Free memory of a channel program. This function needs to free all the
626  * idal lists that might have been created by dasd_set_cda and the
627  * struct dasd_ccw_req itself.
628  */
629 void
630 dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
631 {
632 #ifdef CONFIG_64BIT
633         struct ccw1 *ccw;
634
635         /* Clear any idals used for the request. */
636         ccw = cqr->cpaddr;
637         do {
638                 clear_normalized_cda(ccw);
639         } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
640 #endif
641         kfree(cqr->cpaddr);
642         kfree(cqr->data);
643         kfree(cqr);
644         dasd_put_device(device);
645 }
646
647 void
648 dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
649 {
650         unsigned long flags;
651
652         spin_lock_irqsave(&device->mem_lock, flags);
653         dasd_free_chunk(&device->ccw_chunks, cqr);
654         spin_unlock_irqrestore(&device->mem_lock, flags);
655         dasd_put_device(device);
656 }
657
658 /*
659  * Check discipline magic in cqr.
660  */
661 static inline int
662 dasd_check_cqr(struct dasd_ccw_req *cqr)
663 {
664         struct dasd_device *device;
665
666         if (cqr == NULL)
667                 return -EINVAL;
668         device = cqr->device;
669         if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
670                 DEV_MESSAGE(KERN_WARNING, device,
671                             " dasd_ccw_req 0x%08x magic doesn't match"
672                             " discipline 0x%08x",
673                             cqr->magic,
674                             *(unsigned int *) device->discipline->name);
675                 return -EINVAL;
676         }
677         return 0;
678 }
679
680 /*
681  * Terminate the current i/o and set the request to clear_pending.
682  * Timer keeps device runnig.
683  * ccw_device_clear can fail if the i/o subsystem
684  * is in a bad mood.
685  */
686 int
687 dasd_term_IO(struct dasd_ccw_req * cqr)
688 {
689         struct dasd_device *device;
690         int retries, rc;
691
692         /* Check the cqr */
693         rc = dasd_check_cqr(cqr);
694         if (rc)
695                 return rc;
696         retries = 0;
697         device = (struct dasd_device *) cqr->device;
698         while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
699                 rc = ccw_device_clear(device->cdev, (long) cqr);
700                 switch (rc) {
701                 case 0: /* termination successful */
702                         cqr->retries--;
703                         cqr->status = DASD_CQR_CLEAR;
704                         cqr->stopclk = get_clock();
705                         DBF_DEV_EVENT(DBF_DEBUG, device,
706                                       "terminate cqr %p successful",
707                                       cqr);
708                         break;
709                 case -ENODEV:
710                         DBF_DEV_EVENT(DBF_ERR, device, "%s",
711                                       "device gone, retry");
712                         break;
713                 case -EIO:
714                         DBF_DEV_EVENT(DBF_ERR, device, "%s",
715                                       "I/O error, retry");
716                         break;
717                 case -EINVAL:
718                 case -EBUSY:
719                         DBF_DEV_EVENT(DBF_ERR, device, "%s",
720                                       "device busy, retry later");
721                         break;
722                 default:
723                         DEV_MESSAGE(KERN_ERR, device,
724                                     "line %d unknown RC=%d, please "
725                                     "report to linux390@de.ibm.com",
726                                     __LINE__, rc);
727                         BUG();
728                         break;
729                 }
730                 retries++;
731         }
732         dasd_schedule_bh(device);
733         return rc;
734 }
735
736 /*
737  * Start the i/o. This start_IO can fail if the channel is really busy.
738  * In that case set up a timer to start the request later.
739  */
740 int
741 dasd_start_IO(struct dasd_ccw_req * cqr)
742 {
743         struct dasd_device *device;
744         int rc;
745
746         /* Check the cqr */
747         rc = dasd_check_cqr(cqr);
748         if (rc)
749                 return rc;
750         device = (struct dasd_device *) cqr->device;
751         if (cqr->retries < 0) {
752                 DEV_MESSAGE(KERN_DEBUG, device,
753                             "start_IO: request %p (%02x/%i) - no retry left.",
754                             cqr, cqr->status, cqr->retries);
755                 cqr->status = DASD_CQR_FAILED;
756                 return -EIO;
757         }
758         cqr->startclk = get_clock();
759         cqr->starttime = jiffies;
760         cqr->retries--;
761         rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
762                               cqr->lpm, 0);
763         switch (rc) {
764         case 0:
765                 cqr->status = DASD_CQR_IN_IO;
766                 DBF_DEV_EVENT(DBF_DEBUG, device,
767                               "start_IO: request %p started successful",
768                               cqr);
769                 break;
770         case -EBUSY:
771                 DBF_DEV_EVENT(DBF_ERR, device, "%s",
772                               "start_IO: device busy, retry later");
773                 break;
774         case -ETIMEDOUT:
775                 DBF_DEV_EVENT(DBF_ERR, device, "%s",
776                               "start_IO: request timeout, retry later");
777                 break;
778         case -EACCES:
779                 /* -EACCES indicates that the request used only a
780                  * subset of the available pathes and all these
781                  * pathes are gone.
782                  * Do a retry with all available pathes.
783                  */
784                 cqr->lpm = LPM_ANYPATH;
785                 DBF_DEV_EVENT(DBF_ERR, device, "%s",
786                               "start_IO: selected pathes gone,"
787                               " retry on all pathes");
788                 break;
789         case -ENODEV:
790         case -EIO:
791                 DBF_DEV_EVENT(DBF_ERR, device, "%s",
792                               "start_IO: device gone, retry");
793                 break;
794         default:
795                 DEV_MESSAGE(KERN_ERR, device,
796                             "line %d unknown RC=%d, please report"
797                             " to linux390@de.ibm.com", __LINE__, rc);
798                 BUG();
799                 break;
800         }
801         return rc;
802 }
803
804 /*
805  * Timeout function for dasd devices. This is used for different purposes
806  *  1) missing interrupt handler for normal operation
807  *  2) delayed start of request where start_IO failed with -EBUSY
808  *  3) timeout for missing state change interrupts
809  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
810  * DASD_CQR_QUEUED for 2) and 3).
811  */
812 static void
813 dasd_timeout_device(unsigned long ptr)
814 {
815         unsigned long flags;
816         struct dasd_device *device;
817
818         device = (struct dasd_device *) ptr;
819         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
820         /* re-activate request queue */
821         device->stopped &= ~DASD_STOPPED_PENDING;
822         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
823         dasd_schedule_bh(device);
824 }
825
826 /*
827  * Setup timeout for a device in jiffies.
828  */
829 void
830 dasd_set_timer(struct dasd_device *device, int expires)
831 {
832         if (expires == 0) {
833                 if (timer_pending(&device->timer))
834                         del_timer(&device->timer);
835                 return;
836         }
837         if (timer_pending(&device->timer)) {
838                 if (mod_timer(&device->timer, jiffies + expires))
839                         return;
840         }
841         device->timer.function = dasd_timeout_device;
842         device->timer.data = (unsigned long) device;
843         device->timer.expires = jiffies + expires;
844         add_timer(&device->timer);
845 }
846
847 /*
848  * Clear timeout for a device.
849  */
850 void
851 dasd_clear_timer(struct dasd_device *device)
852 {
853         if (timer_pending(&device->timer))
854                 del_timer(&device->timer);
855 }
856
857 static void
858 dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
859 {
860         struct dasd_ccw_req *cqr;
861         struct dasd_device *device;
862
863         cqr = (struct dasd_ccw_req *) intparm;
864         if (cqr->status != DASD_CQR_IN_IO) {
865                 MESSAGE(KERN_DEBUG,
866                         "invalid status in handle_killed_request: "
867                         "bus_id %s, status %02x",
868                         cdev->dev.bus_id, cqr->status);
869                 return;
870         }
871
872         device = (struct dasd_device *) cqr->device;
873         if (device == NULL ||
874             device != dasd_device_from_cdev(cdev) ||
875             strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
876                 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
877                         cdev->dev.bus_id);
878                 return;
879         }
880
881         /* Schedule request to be retried. */
882         cqr->status = DASD_CQR_QUEUED;
883
884         dasd_clear_timer(device);
885         dasd_schedule_bh(device);
886         dasd_put_device(device);
887 }
888
889 static void
890 dasd_handle_state_change_pending(struct dasd_device *device)
891 {
892         struct dasd_ccw_req *cqr;
893         struct list_head *l, *n;
894
895         device->stopped &= ~DASD_STOPPED_PENDING;
896
897         /* restart all 'running' IO on queue */
898         list_for_each_safe(l, n, &device->ccw_queue) {
899                 cqr = list_entry(l, struct dasd_ccw_req, list);
900                 if (cqr->status == DASD_CQR_IN_IO) {
901                         cqr->status = DASD_CQR_QUEUED;
902                 }
903         }
904         dasd_clear_timer(device);
905         dasd_schedule_bh(device);
906 }
907
908 /*
909  * Interrupt handler for "normal" ssch-io based dasd devices.
910  */
911 void
912 dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
913                  struct irb *irb)
914 {
915         struct dasd_ccw_req *cqr, *next;
916         struct dasd_device *device;
917         unsigned long long now;
918         int expires;
919         dasd_era_t era;
920         char mask;
921
922         if (IS_ERR(irb)) {
923                 switch (PTR_ERR(irb)) {
924                 case -EIO:
925                         dasd_handle_killed_request(cdev, intparm);
926                         break;
927                 case -ETIMEDOUT:
928                         printk(KERN_WARNING"%s(%s): request timed out\n",
929                                __FUNCTION__, cdev->dev.bus_id);
930                         //FIXME - dasd uses own timeout interface...
931                         break;
932                 default:
933                         printk(KERN_WARNING"%s(%s): unknown error %ld\n",
934                                __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
935                 }
936                 return;
937         }
938
939         now = get_clock();
940
941         DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
942                   cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
943                   (unsigned int) intparm);
944
945         /* first of all check for state change pending interrupt */
946         mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
947         if ((irb->scsw.dstat & mask) == mask) {
948                 device = dasd_device_from_cdev(cdev);
949                 if (!IS_ERR(device)) {
950                         dasd_handle_state_change_pending(device);
951                         dasd_put_device(device);
952                 }
953                 return;
954         }
955
956         cqr = (struct dasd_ccw_req *) intparm;
957
958         /* check for unsolicited interrupts */
959         if (cqr == NULL) {
960                 MESSAGE(KERN_DEBUG,
961                         "unsolicited interrupt received: bus_id %s",
962                         cdev->dev.bus_id);
963                 return;
964         }
965
966         device = (struct dasd_device *) cqr->device;
967         if (device == NULL ||
968             strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
969                 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
970                         cdev->dev.bus_id);
971                 return;
972         }
973
974         /* Check for clear pending */
975         if (cqr->status == DASD_CQR_CLEAR &&
976             irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
977                 cqr->status = DASD_CQR_QUEUED;
978                 dasd_clear_timer(device);
979                 dasd_schedule_bh(device);
980                 return;
981         }
982
983         /* check status - the request might have been killed by dyn detach */
984         if (cqr->status != DASD_CQR_IN_IO) {
985                 MESSAGE(KERN_DEBUG,
986                         "invalid status: bus_id %s, status %02x",
987                         cdev->dev.bus_id, cqr->status);
988                 return;
989         }
990         DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
991                       ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
992
993         /* Find out the appropriate era_action. */
994         if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) 
995                 era = dasd_era_fatal;
996         else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
997                  irb->scsw.cstat == 0 &&
998                  !irb->esw.esw0.erw.cons)
999                 era = dasd_era_none;
1000         else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
1001                 era = dasd_era_fatal; /* don't recover this request */
1002         else if (irb->esw.esw0.erw.cons)
1003                 era = device->discipline->examine_error(cqr, irb);
1004         else 
1005                 era = dasd_era_recover;
1006
1007         DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
1008         expires = 0;
1009         if (era == dasd_era_none) {
1010                 cqr->status = DASD_CQR_DONE;
1011                 cqr->stopclk = now;
1012                 /* Start first request on queue if possible -> fast_io. */
1013                 if (cqr->list.next != &device->ccw_queue) {
1014                         next = list_entry(cqr->list.next,
1015                                           struct dasd_ccw_req, list);
1016                         if ((next->status == DASD_CQR_QUEUED) &&
1017                             (!device->stopped)) {
1018                                 if (device->discipline->start_IO(next) == 0)
1019                                         expires = next->expires;
1020                                 else
1021                                         DEV_MESSAGE(KERN_DEBUG, device, "%s",
1022                                                     "Interrupt fastpath "
1023                                                     "failed!");
1024                         }
1025                 }
1026         } else {                /* error */
1027                 memcpy(&cqr->irb, irb, sizeof (struct irb));
1028 #ifdef ERP_DEBUG
1029                 /* dump sense data */
1030                 dasd_log_sense(cqr, irb);
1031 #endif
1032                 switch (era) {
1033                 case dasd_era_fatal:
1034                         cqr->status = DASD_CQR_FAILED;
1035                         cqr->stopclk = now;
1036                         break;
1037                 case dasd_era_recover:
1038                         cqr->status = DASD_CQR_ERROR;
1039                         break;
1040                 default:
1041                         BUG();
1042                 }
1043         }
1044         if (expires != 0)
1045                 dasd_set_timer(device, expires);
1046         else
1047                 dasd_clear_timer(device);
1048         dasd_schedule_bh(device);
1049 }
1050
1051 /*
1052  * posts the buffer_cache about a finalized request
1053  */
1054 static inline void
1055 dasd_end_request(struct request *req, int uptodate)
1056 {
1057         if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1058                 BUG();
1059         add_disk_randomness(req->rq_disk);
1060         end_that_request_last(req, uptodate);
1061 }
1062
1063 /*
1064  * Process finished error recovery ccw.
1065  */
1066 static inline void
1067 __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1068 {
1069         dasd_erp_fn_t erp_fn;
1070
1071         if (cqr->status == DASD_CQR_DONE)
1072                 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1073         else
1074                 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1075         erp_fn = device->discipline->erp_postaction(cqr);
1076         erp_fn(cqr);
1077 }
1078
1079 /*
1080  * Process ccw request queue.
1081  */
1082 static inline void
1083 __dasd_process_ccw_queue(struct dasd_device * device,
1084                          struct list_head *final_queue)
1085 {
1086         struct list_head *l, *n;
1087         struct dasd_ccw_req *cqr;
1088         dasd_erp_fn_t erp_fn;
1089
1090 restart:
1091         /* Process request with final status. */
1092         list_for_each_safe(l, n, &device->ccw_queue) {
1093                 cqr = list_entry(l, struct dasd_ccw_req, list);
1094                 /* Stop list processing at the first non-final request. */
1095                 if (cqr->status != DASD_CQR_DONE &&
1096                     cqr->status != DASD_CQR_FAILED &&
1097                     cqr->status != DASD_CQR_ERROR)
1098                         break;
1099                 /*  Process requests with DASD_CQR_ERROR */
1100                 if (cqr->status == DASD_CQR_ERROR) {
1101                         if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1102                                 cqr->status = DASD_CQR_FAILED;
1103                                 cqr->stopclk = get_clock();
1104                         } else {
1105                                 if (cqr->irb.esw.esw0.erw.cons) {
1106                                         erp_fn = device->discipline->
1107                                                 erp_action(cqr);
1108                                         erp_fn(cqr);
1109                                 } else
1110                                         dasd_default_erp_action(cqr);
1111                         }
1112                         goto restart;
1113                 }
1114                 /* Process finished ERP request. */
1115                 if (cqr->refers) {
1116                         __dasd_process_erp(device, cqr);
1117                         goto restart;
1118                 }
1119
1120                 /* Rechain finished requests to final queue */
1121                 cqr->endclk = get_clock();
1122                 list_move_tail(&cqr->list, final_queue);
1123         }
1124 }
1125
1126 static void
1127 dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1128 {
1129         struct request *req;
1130         struct dasd_device *device;
1131         int status;
1132
1133         req = (struct request *) data;
1134         device = cqr->device;
1135         dasd_profile_end(device, cqr, req);
1136         status = cqr->device->discipline->free_cp(cqr,req);
1137         spin_lock_irq(&device->request_queue_lock);
1138         dasd_end_request(req, status);
1139         spin_unlock_irq(&device->request_queue_lock);
1140 }
1141
1142
1143 /*
1144  * Fetch requests from the block device queue.
1145  */
1146 static inline void
1147 __dasd_process_blk_queue(struct dasd_device * device)
1148 {
1149         request_queue_t *queue;
1150         struct request *req;
1151         struct dasd_ccw_req *cqr;
1152         int nr_queued;
1153
1154         queue = device->request_queue;
1155         /* No queue ? Then there is nothing to do. */
1156         if (queue == NULL)
1157                 return;
1158
1159         /*
1160          * We requeue request from the block device queue to the ccw
1161          * queue only in two states. In state DASD_STATE_READY the
1162          * partition detection is done and we need to requeue requests
1163          * for that. State DASD_STATE_ONLINE is normal block device
1164          * operation.
1165          */
1166         if (device->state != DASD_STATE_READY &&
1167             device->state != DASD_STATE_ONLINE)
1168                 return;
1169         nr_queued = 0;
1170         /* Now we try to fetch requests from the request queue */
1171         list_for_each_entry(cqr, &device->ccw_queue, list)
1172                 if (cqr->status == DASD_CQR_QUEUED)
1173                         nr_queued++;
1174         while (!blk_queue_plugged(queue) &&
1175                elv_next_request(queue) &&
1176                 nr_queued < DASD_CHANQ_MAX_SIZE) {
1177                 req = elv_next_request(queue);
1178
1179                 if (device->features & DASD_FEATURE_READONLY &&
1180                     rq_data_dir(req) == WRITE) {
1181                         DBF_DEV_EVENT(DBF_ERR, device,
1182                                       "Rejecting write request %p",
1183                                       req);
1184                         blkdev_dequeue_request(req);
1185                         dasd_end_request(req, 0);
1186                         continue;
1187                 }
1188                 if (device->stopped & DASD_STOPPED_DC_EIO) {
1189                         blkdev_dequeue_request(req);
1190                         dasd_end_request(req, 0);
1191                         continue;
1192                 }
1193                 cqr = device->discipline->build_cp(device, req);
1194                 if (IS_ERR(cqr)) {
1195                         if (PTR_ERR(cqr) == -ENOMEM)
1196                                 break;  /* terminate request queue loop */
1197                         DBF_DEV_EVENT(DBF_ERR, device,
1198                                       "CCW creation failed (rc=%ld) "
1199                                       "on request %p",
1200                                       PTR_ERR(cqr), req);
1201                         blkdev_dequeue_request(req);
1202                         dasd_end_request(req, 0);
1203                         continue;
1204                 }
1205                 cqr->callback = dasd_end_request_cb;
1206                 cqr->callback_data = (void *) req;
1207                 cqr->status = DASD_CQR_QUEUED;
1208                 blkdev_dequeue_request(req);
1209                 list_add_tail(&cqr->list, &device->ccw_queue);
1210                 dasd_profile_start(device, cqr, req);
1211                 nr_queued++;
1212         }
1213 }
1214
1215 /*
1216  * Take a look at the first request on the ccw queue and check
1217  * if it reached its expire time. If so, terminate the IO.
1218  */
1219 static inline void
1220 __dasd_check_expire(struct dasd_device * device)
1221 {
1222         struct dasd_ccw_req *cqr;
1223
1224         if (list_empty(&device->ccw_queue))
1225                 return;
1226         cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1227         if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
1228                 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
1229                         if (device->discipline->term_IO(cqr) != 0)
1230                                 /* Hmpf, try again in 1/10 sec */
1231                                 dasd_set_timer(device, 10);
1232                 }
1233         }
1234 }
1235
1236 /*
1237  * Take a look at the first request on the ccw queue and check
1238  * if it needs to be started.
1239  */
1240 static inline void
1241 __dasd_start_head(struct dasd_device * device)
1242 {
1243         struct dasd_ccw_req *cqr;
1244         int rc;
1245
1246         if (list_empty(&device->ccw_queue))
1247                 return;
1248         cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1249         /* check FAILFAST */
1250         if (device->stopped & ~DASD_STOPPED_PENDING &&
1251             test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) {
1252                 cqr->status = DASD_CQR_FAILED;
1253                 dasd_schedule_bh(device);
1254         }
1255         if ((cqr->status == DASD_CQR_QUEUED) &&
1256             (!device->stopped)) {
1257                 /* try to start the first I/O that can be started */
1258                 rc = device->discipline->start_IO(cqr);
1259                 if (rc == 0)
1260                         dasd_set_timer(device, cqr->expires);
1261                 else if (rc == -EACCES) {
1262                         dasd_schedule_bh(device);
1263                 } else
1264                         /* Hmpf, try again in 1/2 sec */
1265                         dasd_set_timer(device, 50);
1266         }
1267 }
1268
1269 /*
1270  * Remove requests from the ccw queue. 
1271  */
1272 static void
1273 dasd_flush_ccw_queue(struct dasd_device * device, int all)
1274 {
1275         struct list_head flush_queue;
1276         struct list_head *l, *n;
1277         struct dasd_ccw_req *cqr;
1278
1279         INIT_LIST_HEAD(&flush_queue);
1280         spin_lock_irq(get_ccwdev_lock(device->cdev));
1281         list_for_each_safe(l, n, &device->ccw_queue) {
1282                 cqr = list_entry(l, struct dasd_ccw_req, list);
1283                 /* Flush all request or only block device requests? */
1284                 if (all == 0 && cqr->callback == dasd_end_request_cb)
1285                         continue;
1286                 if (cqr->status == DASD_CQR_IN_IO)
1287                         device->discipline->term_IO(cqr);
1288                 if (cqr->status != DASD_CQR_DONE ||
1289                     cqr->status != DASD_CQR_FAILED) {
1290                         cqr->status = DASD_CQR_FAILED;
1291                         cqr->stopclk = get_clock();
1292                 }
1293                 /* Process finished ERP request. */
1294                 if (cqr->refers) {
1295                         __dasd_process_erp(device, cqr);
1296                         continue;
1297                 }
1298                 /* Rechain request on device request queue */
1299                 cqr->endclk = get_clock();
1300                 list_move_tail(&cqr->list, &flush_queue);
1301         }
1302         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1303         /* Now call the callback function of flushed requests */
1304         list_for_each_safe(l, n, &flush_queue) {
1305                 cqr = list_entry(l, struct dasd_ccw_req, list);
1306                 if (cqr->callback != NULL)
1307                         (cqr->callback)(cqr, cqr->callback_data);
1308         }
1309 }
1310
1311 /*
1312  * Acquire the device lock and process queues for the device.
1313  */
1314 static void
1315 dasd_tasklet(struct dasd_device * device)
1316 {
1317         struct list_head final_queue;
1318         struct list_head *l, *n;
1319         struct dasd_ccw_req *cqr;
1320
1321         atomic_set (&device->tasklet_scheduled, 0);
1322         INIT_LIST_HEAD(&final_queue);
1323         spin_lock_irq(get_ccwdev_lock(device->cdev));
1324         /* Check expire time of first request on the ccw queue. */
1325         __dasd_check_expire(device);
1326         /* Finish off requests on ccw queue */
1327         __dasd_process_ccw_queue(device, &final_queue);
1328         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1329         /* Now call the callback function of requests with final status */
1330         list_for_each_safe(l, n, &final_queue) {
1331                 cqr = list_entry(l, struct dasd_ccw_req, list);
1332                 list_del_init(&cqr->list);
1333                 if (cqr->callback != NULL)
1334                         (cqr->callback)(cqr, cqr->callback_data);
1335         }
1336         spin_lock_irq(&device->request_queue_lock);
1337         spin_lock(get_ccwdev_lock(device->cdev));
1338         /* Get new request from the block device request queue */
1339         __dasd_process_blk_queue(device);
1340         /* Now check if the head of the ccw queue needs to be started. */
1341         __dasd_start_head(device);
1342         spin_unlock(get_ccwdev_lock(device->cdev));
1343         spin_unlock_irq(&device->request_queue_lock);
1344         dasd_put_device(device);
1345 }
1346
1347 /*
1348  * Schedules a call to dasd_tasklet over the device tasklet.
1349  */
1350 void
1351 dasd_schedule_bh(struct dasd_device * device)
1352 {
1353         /* Protect against rescheduling. */
1354         if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1355                 return;
1356         dasd_get_device(device);
1357         tasklet_hi_schedule(&device->tasklet);
1358 }
1359
1360 /*
1361  * Queue a request to the head of the ccw_queue. Start the I/O if
1362  * possible.
1363  */
1364 void
1365 dasd_add_request_head(struct dasd_ccw_req *req)
1366 {
1367         struct dasd_device *device;
1368         unsigned long flags;
1369
1370         device = req->device;
1371         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1372         req->status = DASD_CQR_QUEUED;
1373         req->device = device;
1374         list_add(&req->list, &device->ccw_queue);
1375         /* let the bh start the request to keep them in order */
1376         dasd_schedule_bh(device);
1377         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1378 }
1379
1380 /*
1381  * Queue a request to the tail of the ccw_queue. Start the I/O if
1382  * possible.
1383  */
1384 void
1385 dasd_add_request_tail(struct dasd_ccw_req *req)
1386 {
1387         struct dasd_device *device;
1388         unsigned long flags;
1389
1390         device = req->device;
1391         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1392         req->status = DASD_CQR_QUEUED;
1393         req->device = device;
1394         list_add_tail(&req->list, &device->ccw_queue);
1395         /* let the bh start the request to keep them in order */
1396         dasd_schedule_bh(device);
1397         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1398 }
1399
1400 /*
1401  * Wakeup callback.
1402  */
1403 static void
1404 dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1405 {
1406         wake_up((wait_queue_head_t *) data);
1407 }
1408
1409 static inline int
1410 _wait_for_wakeup(struct dasd_ccw_req *cqr)
1411 {
1412         struct dasd_device *device;
1413         int rc;
1414
1415         device = cqr->device;
1416         spin_lock_irq(get_ccwdev_lock(device->cdev));
1417         rc = ((cqr->status == DASD_CQR_DONE ||
1418                cqr->status == DASD_CQR_FAILED) &&
1419               list_empty(&cqr->list));
1420         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1421         return rc;
1422 }
1423
1424 /*
1425  * Attempts to start a special ccw queue and waits for its completion.
1426  */
1427 int
1428 dasd_sleep_on(struct dasd_ccw_req * cqr)
1429 {
1430         wait_queue_head_t wait_q;
1431         struct dasd_device *device;
1432         int rc;
1433         
1434         device = cqr->device;
1435         spin_lock_irq(get_ccwdev_lock(device->cdev));
1436         
1437         init_waitqueue_head (&wait_q);
1438         cqr->callback = dasd_wakeup_cb;
1439         cqr->callback_data = (void *) &wait_q;
1440         cqr->status = DASD_CQR_QUEUED;
1441         list_add_tail(&cqr->list, &device->ccw_queue);
1442         
1443         /* let the bh start the request to keep them in order */
1444         dasd_schedule_bh(device);
1445         
1446         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1447
1448         wait_event(wait_q, _wait_for_wakeup(cqr));
1449         
1450         /* Request status is either done or failed. */
1451         rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1452         return rc;
1453 }
1454
1455 /*
1456  * Attempts to start a special ccw queue and wait interruptible
1457  * for its completion.
1458  */
1459 int
1460 dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1461 {
1462         wait_queue_head_t wait_q;
1463         struct dasd_device *device;
1464         int rc, finished;
1465
1466         device = cqr->device;
1467         spin_lock_irq(get_ccwdev_lock(device->cdev));
1468
1469         init_waitqueue_head (&wait_q);
1470         cqr->callback = dasd_wakeup_cb;
1471         cqr->callback_data = (void *) &wait_q;
1472         cqr->status = DASD_CQR_QUEUED;
1473         list_add_tail(&cqr->list, &device->ccw_queue);
1474
1475         /* let the bh start the request to keep them in order */
1476         dasd_schedule_bh(device);
1477         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1478
1479         finished = 0;
1480         while (!finished) {
1481                 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1482                 if (rc != -ERESTARTSYS) {
1483                         /* Request is final (done or failed) */
1484                         rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1485                         break;
1486                 }
1487                 spin_lock_irq(get_ccwdev_lock(device->cdev));
1488                 switch (cqr->status) {
1489                 case DASD_CQR_IN_IO:
1490                         /* terminate runnig cqr */
1491                         if (device->discipline->term_IO) {
1492                                 cqr->retries = -1;
1493                                 device->discipline->term_IO(cqr);
1494                                 /*nished =
1495                                  * wait (non-interruptible) for final status
1496                                  * because signal ist still pending
1497                                  */
1498                                 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1499                                 wait_event(wait_q, _wait_for_wakeup(cqr));
1500                                 spin_lock_irq(get_ccwdev_lock(device->cdev));
1501                                 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1502                                 finished = 1;
1503                         }
1504                         break;
1505                 case DASD_CQR_QUEUED:
1506                         /* request  */
1507                         list_del_init(&cqr->list);
1508                         rc = -EIO;
1509                         finished = 1;
1510                         break;
1511                 default:
1512                         /* cqr with 'non-interruptable' status - just wait */
1513                         break;
1514                 }
1515                 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1516         }
1517         return rc;
1518 }
1519
1520 /*
1521  * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1522  * for eckd devices) the currently running request has to be terminated
1523  * and be put back to status queued, before the special request is added
1524  * to the head of the queue. Then the special request is waited on normally.
1525  */
1526 static inline int
1527 _dasd_term_running_cqr(struct dasd_device *device)
1528 {
1529         struct dasd_ccw_req *cqr;
1530         int rc;
1531
1532         if (list_empty(&device->ccw_queue))
1533                 return 0;
1534         cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1535         rc = device->discipline->term_IO(cqr);
1536         if (rc == 0) {
1537                 /* termination successful */
1538                 cqr->status = DASD_CQR_QUEUED;
1539                 cqr->startclk = cqr->stopclk = 0;
1540                 cqr->starttime = 0;
1541         }
1542         return rc;
1543 }
1544
1545 int
1546 dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1547 {
1548         wait_queue_head_t wait_q;
1549         struct dasd_device *device;
1550         int rc;
1551         
1552         device = cqr->device;
1553         spin_lock_irq(get_ccwdev_lock(device->cdev));
1554         rc = _dasd_term_running_cqr(device);
1555         if (rc) {
1556                 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1557                 return rc;
1558         }
1559         
1560         init_waitqueue_head (&wait_q);
1561         cqr->callback = dasd_wakeup_cb;
1562         cqr->callback_data = (void *) &wait_q;
1563         cqr->status = DASD_CQR_QUEUED;
1564         list_add(&cqr->list, &device->ccw_queue);
1565         
1566         /* let the bh start the request to keep them in order */
1567         dasd_schedule_bh(device);
1568         
1569         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1570
1571         wait_event(wait_q, _wait_for_wakeup(cqr));
1572         
1573         /* Request status is either done or failed. */
1574         rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1575         return rc;
1576 }
1577
1578 /*
1579  * Cancels a request that was started with dasd_sleep_on_req.
1580  * This is useful to timeout requests. The request will be
1581  * terminated if it is currently in i/o.
1582  * Returns 1 if the request has been terminated.
1583  */
1584 int
1585 dasd_cancel_req(struct dasd_ccw_req *cqr)
1586 {
1587         struct dasd_device *device = cqr->device;
1588         unsigned long flags;
1589         int rc;
1590
1591         rc = 0;
1592         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1593         switch (cqr->status) {
1594         case DASD_CQR_QUEUED:
1595                 /* request was not started - just set to failed */
1596                 cqr->status = DASD_CQR_FAILED;
1597                 break;
1598         case DASD_CQR_IN_IO:
1599                 /* request in IO - terminate IO and release again */
1600                 if (device->discipline->term_IO(cqr) != 0)
1601                         /* what to do if unable to terminate ??????
1602                            e.g. not _IN_IO */
1603                         cqr->status = DASD_CQR_FAILED;
1604                 cqr->stopclk = get_clock();
1605                 rc = 1;
1606                 break;
1607         case DASD_CQR_DONE:
1608         case DASD_CQR_FAILED:
1609                 /* already finished - do nothing */
1610                 break;
1611         default:
1612                 DEV_MESSAGE(KERN_ALERT, device,
1613                             "invalid status %02x in request",
1614                             cqr->status);
1615                 BUG();
1616
1617         }
1618         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1619         dasd_schedule_bh(device);
1620         return rc;
1621 }
1622
1623 /*
1624  * SECTION: Block device operations (request queue, partitions, open, release).
1625  */
1626
1627 /*
1628  * Dasd request queue function. Called from ll_rw_blk.c
1629  */
1630 static void
1631 do_dasd_request(request_queue_t * queue)
1632 {
1633         struct dasd_device *device;
1634
1635         device = (struct dasd_device *) queue->queuedata;
1636         spin_lock(get_ccwdev_lock(device->cdev));
1637         /* Get new request from the block device request queue */
1638         __dasd_process_blk_queue(device);
1639         /* Now check if the head of the ccw queue needs to be started. */
1640         __dasd_start_head(device);
1641         spin_unlock(get_ccwdev_lock(device->cdev));
1642 }
1643
1644 /*
1645  * Allocate and initialize request queue and default I/O scheduler.
1646  */
1647 static int
1648 dasd_alloc_queue(struct dasd_device * device)
1649 {
1650         int rc;
1651
1652         device->request_queue = blk_init_queue(do_dasd_request,
1653                                                &device->request_queue_lock);
1654         if (device->request_queue == NULL)
1655                 return -ENOMEM;
1656
1657         device->request_queue->queuedata = device;
1658
1659         elevator_exit(device->request_queue->elevator);
1660         rc = elevator_init(device->request_queue, "deadline");
1661         if (rc) {
1662                 blk_cleanup_queue(device->request_queue);
1663                 return rc;
1664         }
1665         return 0;
1666 }
1667
1668 /*
1669  * Allocate and initialize request queue.
1670  */
1671 static void
1672 dasd_setup_queue(struct dasd_device * device)
1673 {
1674         int max;
1675
1676         blk_queue_hardsect_size(device->request_queue, device->bp_block);
1677         max = device->discipline->max_blocks << device->s2b_shift;
1678         blk_queue_max_sectors(device->request_queue, max);
1679         blk_queue_max_phys_segments(device->request_queue, -1L);
1680         blk_queue_max_hw_segments(device->request_queue, -1L);
1681         blk_queue_max_segment_size(device->request_queue, -1L);
1682         blk_queue_segment_boundary(device->request_queue, -1L);
1683         blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL);
1684 }
1685
1686 /*
1687  * Deactivate and free request queue.
1688  */
1689 static void
1690 dasd_free_queue(struct dasd_device * device)
1691 {
1692         if (device->request_queue) {
1693                 blk_cleanup_queue(device->request_queue);
1694                 device->request_queue = NULL;
1695         }
1696 }
1697
1698 /*
1699  * Flush request on the request queue.
1700  */
1701 static void
1702 dasd_flush_request_queue(struct dasd_device * device)
1703 {
1704         struct request *req;
1705
1706         if (!device->request_queue)
1707                 return;
1708         
1709         spin_lock_irq(&device->request_queue_lock);
1710         while (!list_empty(&device->request_queue->queue_head)) {
1711                 req = elv_next_request(device->request_queue);
1712                 if (req == NULL)
1713                         break;
1714                 dasd_end_request(req, 0);
1715                 blkdev_dequeue_request(req);
1716         }
1717         spin_unlock_irq(&device->request_queue_lock);
1718 }
1719
1720 static int
1721 dasd_open(struct inode *inp, struct file *filp)
1722 {
1723         struct gendisk *disk = inp->i_bdev->bd_disk;
1724         struct dasd_device *device = disk->private_data;
1725         int rc;
1726
1727         atomic_inc(&device->open_count);
1728         if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1729                 rc = -ENODEV;
1730                 goto unlock;
1731         }
1732
1733         if (!try_module_get(device->discipline->owner)) {
1734                 rc = -EINVAL;
1735                 goto unlock;
1736         }
1737
1738         if (dasd_probeonly) {
1739                 DEV_MESSAGE(KERN_INFO, device, "%s",
1740                             "No access to device due to probeonly mode");
1741                 rc = -EPERM;
1742                 goto out;
1743         }
1744
1745         if (device->state <= DASD_STATE_BASIC) {
1746                 DBF_DEV_EVENT(DBF_ERR, device, " %s",
1747                               " Cannot open unrecognized device");
1748                 rc = -ENODEV;
1749                 goto out;
1750         }
1751
1752         return 0;
1753
1754 out:
1755         module_put(device->discipline->owner);
1756 unlock:
1757         atomic_dec(&device->open_count);
1758         return rc;
1759 }
1760
1761 static int
1762 dasd_release(struct inode *inp, struct file *filp)
1763 {
1764         struct gendisk *disk = inp->i_bdev->bd_disk;
1765         struct dasd_device *device = disk->private_data;
1766
1767         atomic_dec(&device->open_count);
1768         module_put(device->discipline->owner);
1769         return 0;
1770 }
1771
1772 /*
1773  * Return disk geometry.
1774  */
1775 static int
1776 dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1777 {
1778         struct dasd_device *device;
1779
1780         device = bdev->bd_disk->private_data;
1781         if (!device)
1782                 return -ENODEV;
1783
1784         if (!device->discipline ||
1785             !device->discipline->fill_geometry)
1786                 return -EINVAL;
1787
1788         device->discipline->fill_geometry(device, geo);
1789         geo->start = get_start_sect(bdev) >> device->s2b_shift;
1790         return 0;
1791 }
1792
1793 struct block_device_operations
1794 dasd_device_operations = {
1795         .owner          = THIS_MODULE,
1796         .open           = dasd_open,
1797         .release        = dasd_release,
1798         .ioctl          = dasd_ioctl,
1799         .compat_ioctl   = dasd_compat_ioctl,
1800         .getgeo         = dasd_getgeo,
1801 };
1802
1803
1804 static void
1805 dasd_exit(void)
1806 {
1807 #ifdef CONFIG_PROC_FS
1808         dasd_proc_exit();
1809 #endif
1810         dasd_ioctl_exit();
1811         if (dasd_page_cache != NULL) {
1812                 kmem_cache_destroy(dasd_page_cache);
1813                 dasd_page_cache = NULL;
1814         }
1815         dasd_gendisk_exit();
1816         dasd_devmap_exit();
1817         devfs_remove("dasd");
1818         if (dasd_debug_area != NULL) {
1819                 debug_unregister(dasd_debug_area);
1820                 dasd_debug_area = NULL;
1821         }
1822 }
1823
1824 /*
1825  * SECTION: common functions for ccw_driver use
1826  */
1827
1828 /*
1829  * Initial attempt at a probe function. this can be simplified once
1830  * the other detection code is gone.
1831  */
1832 int
1833 dasd_generic_probe (struct ccw_device *cdev,
1834                     struct dasd_discipline *discipline)
1835 {
1836         int ret;
1837
1838         ret = dasd_add_sysfs_files(cdev);
1839         if (ret) {
1840                 printk(KERN_WARNING
1841                        "dasd_generic_probe: could not add sysfs entries "
1842                        "for %s\n", cdev->dev.bus_id);
1843         } else {
1844                 cdev->handler = &dasd_int_handler;
1845         }
1846
1847         return ret;
1848 }
1849
1850 /*
1851  * This will one day be called from a global not_oper handler.
1852  * It is also used by driver_unregister during module unload.
1853  */
1854 void
1855 dasd_generic_remove (struct ccw_device *cdev)
1856 {
1857         struct dasd_device *device;
1858
1859         cdev->handler = NULL;
1860
1861         dasd_remove_sysfs_files(cdev);
1862         device = dasd_device_from_cdev(cdev);
1863         if (IS_ERR(device))
1864                 return;
1865         if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1866                 /* Already doing offline processing */
1867                 dasd_put_device(device);
1868                 return;
1869         }
1870         /*
1871          * This device is removed unconditionally. Set offline
1872          * flag to prevent dasd_open from opening it while it is
1873          * no quite down yet.
1874          */
1875         dasd_set_target_state(device, DASD_STATE_NEW);
1876         /* dasd_delete_device destroys the device reference. */
1877         dasd_delete_device(device);
1878 }
1879
1880 /*
1881  * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
1882  * the device is detected for the first time and is supposed to be used
1883  * or the user has started activation through sysfs.
1884  */
1885 int
1886 dasd_generic_set_online (struct ccw_device *cdev,
1887                          struct dasd_discipline *base_discipline)
1888
1889 {
1890         struct dasd_discipline *discipline;
1891         struct dasd_device *device;
1892         int rc;
1893
1894         device = dasd_create_device(cdev);
1895         if (IS_ERR(device))
1896                 return PTR_ERR(device);
1897
1898         discipline = base_discipline;
1899         if (device->features & DASD_FEATURE_USEDIAG) {
1900                 if (!dasd_diag_discipline_pointer) {
1901                         printk (KERN_WARNING
1902                                 "dasd_generic couldn't online device %s "
1903                                 "- discipline DIAG not available\n",
1904                                 cdev->dev.bus_id);
1905                         dasd_delete_device(device);
1906                         return -ENODEV;
1907                 }
1908                 discipline = dasd_diag_discipline_pointer;
1909         }
1910         if (!try_module_get(base_discipline->owner)) {
1911                 dasd_delete_device(device);
1912                 return -EINVAL;
1913         }
1914         if (!try_module_get(discipline->owner)) {
1915                 module_put(base_discipline->owner);
1916                 dasd_delete_device(device);
1917                 return -EINVAL;
1918         }
1919         device->base_discipline = base_discipline;
1920         device->discipline = discipline;
1921
1922         rc = discipline->check_device(device);
1923         if (rc) {
1924                 printk (KERN_WARNING
1925                         "dasd_generic couldn't online device %s "
1926                         "with discipline %s rc=%i\n",
1927                         cdev->dev.bus_id, discipline->name, rc);
1928                 module_put(discipline->owner);
1929                 module_put(base_discipline->owner);
1930                 dasd_delete_device(device);
1931                 return rc;
1932         }
1933
1934         dasd_set_target_state(device, DASD_STATE_ONLINE);
1935         if (device->state <= DASD_STATE_KNOWN) {
1936                 printk (KERN_WARNING
1937                         "dasd_generic discipline not found for %s\n",
1938                         cdev->dev.bus_id);
1939                 rc = -ENODEV;
1940                 dasd_set_target_state(device, DASD_STATE_NEW);
1941                 dasd_delete_device(device);
1942         } else
1943                 pr_debug("dasd_generic device %s found\n",
1944                                 cdev->dev.bus_id);
1945
1946         /* FIXME: we have to wait for the root device but we don't want
1947          * to wait for each single device but for all at once. */
1948         wait_event(dasd_init_waitq, _wait_for_device(device));
1949
1950         dasd_put_device(device);
1951
1952         return rc;
1953 }
1954
1955 int
1956 dasd_generic_set_offline (struct ccw_device *cdev)
1957 {
1958         struct dasd_device *device;
1959         int max_count;
1960
1961         device = dasd_device_from_cdev(cdev);
1962         if (IS_ERR(device))
1963                 return PTR_ERR(device);
1964         if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1965                 /* Already doing offline processing */
1966                 dasd_put_device(device);
1967                 return 0;
1968         }
1969         /*
1970          * We must make sure that this device is currently not in use.
1971          * The open_count is increased for every opener, that includes
1972          * the blkdev_get in dasd_scan_partitions. We are only interested
1973          * in the other openers.
1974          */
1975         max_count = device->bdev ? 0 : -1;
1976         if (atomic_read(&device->open_count) > max_count) {
1977                 printk (KERN_WARNING "Can't offline dasd device with open"
1978                         " count = %i.\n",
1979                         atomic_read(&device->open_count));
1980                 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
1981                 dasd_put_device(device);
1982                 return -EBUSY;
1983         }
1984         dasd_set_target_state(device, DASD_STATE_NEW);
1985         /* dasd_delete_device destroys the device reference. */
1986         dasd_delete_device(device);
1987
1988         return 0;
1989 }
1990
1991 int
1992 dasd_generic_notify(struct ccw_device *cdev, int event)
1993 {
1994         struct dasd_device *device;
1995         struct dasd_ccw_req *cqr;
1996         unsigned long flags;
1997         int ret;
1998
1999         device = dasd_device_from_cdev(cdev);
2000         if (IS_ERR(device))
2001                 return 0;
2002         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
2003         ret = 0;
2004         switch (event) {
2005         case CIO_GONE:
2006         case CIO_NO_PATH:
2007                 if (device->state < DASD_STATE_BASIC)
2008                         break;
2009                 /* Device is active. We want to keep it. */
2010                 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
2011                         list_for_each_entry(cqr, &device->ccw_queue, list)
2012                                 if (cqr->status == DASD_CQR_IN_IO)
2013                                         cqr->status = DASD_CQR_FAILED;
2014                         device->stopped |= DASD_STOPPED_DC_EIO;
2015                 } else {
2016                         list_for_each_entry(cqr, &device->ccw_queue, list)
2017                                 if (cqr->status == DASD_CQR_IN_IO) {
2018                                         cqr->status = DASD_CQR_QUEUED;
2019                                         cqr->retries++;
2020                                 }
2021                         device->stopped |= DASD_STOPPED_DC_WAIT;
2022                         dasd_set_timer(device, 0);
2023                 }
2024                 dasd_schedule_bh(device);
2025                 ret = 1;
2026                 break;
2027         case CIO_OPER:
2028                 /* FIXME: add a sanity check. */
2029                 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
2030                 dasd_schedule_bh(device);
2031                 ret = 1;
2032                 break;
2033         }
2034         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2035         dasd_put_device(device);
2036         return ret;
2037 }
2038
2039 /*
2040  * Automatically online either all dasd devices (dasd_autodetect) or
2041  * all devices specified with dasd= parameters.
2042  */
2043 static int
2044 __dasd_auto_online(struct device *dev, void *data)
2045 {
2046         struct ccw_device *cdev;
2047
2048         cdev = to_ccwdev(dev);
2049         if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
2050                 ccw_device_set_online(cdev);
2051         return 0;
2052 }
2053
2054 void
2055 dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
2056 {
2057         struct device_driver *drv;
2058
2059         drv = get_driver(&dasd_discipline_driver->driver);
2060         driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
2061         put_driver(drv);
2062 }
2063
2064 static int __init
2065 dasd_init(void)
2066 {
2067         int rc;
2068
2069         init_waitqueue_head(&dasd_init_waitq);
2070
2071         /* register 'common' DASD debug area, used for all DBF_XXX calls */
2072         dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
2073         if (dasd_debug_area == NULL) {
2074                 rc = -ENOMEM;
2075                 goto failed;
2076         }
2077         debug_register_view(dasd_debug_area, &debug_sprintf_view);
2078         debug_set_level(dasd_debug_area, DBF_EMERG);
2079
2080         DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2081
2082         dasd_diag_discipline_pointer = NULL;
2083
2084         rc = devfs_mk_dir("dasd");
2085         if (rc)
2086                 goto failed;
2087         rc = dasd_devmap_init();
2088         if (rc)
2089                 goto failed;
2090         rc = dasd_gendisk_init();
2091         if (rc)
2092                 goto failed;
2093         rc = dasd_parse();
2094         if (rc)
2095                 goto failed;
2096         rc = dasd_ioctl_init();
2097         if (rc)
2098                 goto failed;
2099 #ifdef CONFIG_PROC_FS
2100         rc = dasd_proc_init();
2101         if (rc)
2102                 goto failed;
2103 #endif
2104
2105         return 0;
2106 failed:
2107         MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2108         dasd_exit();
2109         return rc;
2110 }
2111
2112 module_init(dasd_init);
2113 module_exit(dasd_exit);
2114
2115 EXPORT_SYMBOL(dasd_debug_area);
2116 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2117
2118 EXPORT_SYMBOL(dasd_add_request_head);
2119 EXPORT_SYMBOL(dasd_add_request_tail);
2120 EXPORT_SYMBOL(dasd_cancel_req);
2121 EXPORT_SYMBOL(dasd_clear_timer);
2122 EXPORT_SYMBOL(dasd_enable_device);
2123 EXPORT_SYMBOL(dasd_int_handler);
2124 EXPORT_SYMBOL(dasd_kfree_request);
2125 EXPORT_SYMBOL(dasd_kick_device);
2126 EXPORT_SYMBOL(dasd_kmalloc_request);
2127 EXPORT_SYMBOL(dasd_schedule_bh);
2128 EXPORT_SYMBOL(dasd_set_target_state);
2129 EXPORT_SYMBOL(dasd_set_timer);
2130 EXPORT_SYMBOL(dasd_sfree_request);
2131 EXPORT_SYMBOL(dasd_sleep_on);
2132 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2133 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2134 EXPORT_SYMBOL(dasd_smalloc_request);
2135 EXPORT_SYMBOL(dasd_start_IO);
2136 EXPORT_SYMBOL(dasd_term_IO);
2137
2138 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2139 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2140 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2141 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2142 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2143 EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2144
2145 /*
2146  * Overrides for Emacs so that we follow Linus's tabbing style.
2147  * Emacs will notice this stuff at the end of the file and automatically
2148  * adjust the settings for this buffer only.  This must remain at the end
2149  * of the file.
2150  * ---------------------------------------------------------------------------
2151  * Local variables:
2152  * c-indent-level: 4
2153  * c-brace-imaginary-offset: 0
2154  * c-brace-offset: -4
2155  * c-argdecl-indent: 4
2156  * c-label-offset: -4
2157  * c-continued-statement-offset: 4
2158  * c-continued-brace-offset: 0
2159  * indent-tabs-mode: 1
2160  * tab-width: 8
2161  * End:
2162  */