Auto merge with /home/aegl/GIT/linus
[sfrench/cifs-2.6.git] / drivers / message / i2o / i2o_block.c
1 /*
2  *      Block OSM
3  *
4  *      Copyright (C) 1999-2002 Red Hat Software
5  *
6  *      Written by Alan Cox, Building Number Three Ltd
7  *
8  *      This program is free software; you can redistribute it and/or modify it
9  *      under the terms of the GNU General Public License as published by the
10  *      Free Software Foundation; either version 2 of the License, or (at your
11  *      option) any later version.
12  *
13  *      This program is distributed in the hope that it will be useful, but
14  *      WITHOUT ANY WARRANTY; without even the implied warranty of
15  *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  *      General Public License for more details.
17  *
18  *      For the purpose of avoiding doubt the preferred form of the work
19  *      for making modifications shall be a standards compliant form such
20  *      gzipped tar and not one requiring a proprietary or patent encumbered
21  *      tool to unpack.
22  *
23  *      Fixes/additions:
24  *              Steve Ralston:
25  *                      Multiple device handling error fixes,
26  *                      Added a queue depth.
27  *              Alan Cox:
28  *                      FC920 has an rmw bug. Dont or in the end marker.
29  *                      Removed queue walk, fixed for 64bitness.
30  *                      Rewrote much of the code over time
31  *                      Added indirect block lists
32  *                      Handle 64K limits on many controllers
33  *                      Don't use indirects on the Promise (breaks)
34  *                      Heavily chop down the queue depths
35  *              Deepak Saxena:
36  *                      Independent queues per IOP
37  *                      Support for dynamic device creation/deletion
38  *                      Code cleanup
39  *                      Support for larger I/Os through merge* functions
40  *                      (taken from DAC960 driver)
41  *              Boji T Kannanthanam:
42  *                      Set the I2O Block devices to be detected in increasing
43  *                      order of TIDs during boot.
44  *                      Search and set the I2O block device that we boot off
45  *                      from as the first device to be claimed (as /dev/i2o/hda)
46  *                      Properly attach/detach I2O gendisk structure from the
47  *                      system gendisk list. The I2O block devices now appear in
48  *                      /proc/partitions.
49  *              Markus Lidel <Markus.Lidel@shadowconnect.com>:
50  *                      Minor bugfixes for 2.6.
51  */
52
53 #include <linux/module.h>
54 #include <linux/i2o.h>
55
56 #include <linux/mempool.h>
57
58 #include <linux/genhd.h>
59 #include <linux/blkdev.h>
60 #include <linux/hdreg.h>
61
62 #include "i2o_block.h"
63
64 #define OSM_NAME        "block-osm"
65 #define OSM_VERSION     "$Rev$"
66 #define OSM_DESCRIPTION "I2O Block Device OSM"
67
68 static struct i2o_driver i2o_block_driver;
69
70 /* global Block OSM request mempool */
71 static struct i2o_block_mempool i2o_blk_req_pool;
72
73 /* Block OSM class handling definition */
74 static struct i2o_class_id i2o_block_class_id[] = {
75         {I2O_CLASS_RANDOM_BLOCK_STORAGE},
76         {I2O_CLASS_END}
77 };
78
79 /**
80  *      i2o_block_device_free - free the memory of the I2O Block device
81  *      @dev: I2O Block device, which should be cleaned up
82  *
83  *      Frees the request queue, gendisk and the i2o_block_device structure.
84  */
85 static void i2o_block_device_free(struct i2o_block_device *dev)
86 {
87         blk_cleanup_queue(dev->gd->queue);
88
89         put_disk(dev->gd);
90
91         kfree(dev);
92 };
93
94 /**
95  *      i2o_block_remove - remove the I2O Block device from the system again
96  *      @dev: I2O Block device which should be removed
97  *
98  *      Remove gendisk from system and free all allocated memory.
99  *
100  *      Always returns 0.
101  */
102 static int i2o_block_remove(struct device *dev)
103 {
104         struct i2o_device *i2o_dev = to_i2o_device(dev);
105         struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
106
107         osm_info("Device removed %s\n", i2o_blk_dev->gd->disk_name);
108
109         i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
110
111         del_gendisk(i2o_blk_dev->gd);
112
113         dev_set_drvdata(dev, NULL);
114
115         i2o_device_claim_release(i2o_dev);
116
117         i2o_block_device_free(i2o_blk_dev);
118
119         return 0;
120 };
121
122 /**
123  *      i2o_block_device flush - Flush all dirty data of I2O device dev
124  *      @dev: I2O device which should be flushed
125  *
126  *      Flushes all dirty data on device dev.
127  *
128  *      Returns 0 on success or negative error code on failure.
129  */
130 static int i2o_block_device_flush(struct i2o_device *dev)
131 {
132         struct i2o_message __iomem *msg;
133         u32 m;
134
135         m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
136         if (m == I2O_QUEUE_EMPTY)
137                 return -ETIMEDOUT;
138
139         writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
140         writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
141                &msg->u.head[1]);
142         writel(60 << 16, &msg->body[0]);
143         osm_debug("Flushing...\n");
144
145         return i2o_msg_post_wait(dev->iop, m, 60);
146 };
147
148 /**
149  *      i2o_block_device_mount - Mount (load) the media of device dev
150  *      @dev: I2O device which should receive the mount request
151  *      @media_id: Media Identifier
152  *
153  *      Load a media into drive. Identifier should be set to -1, because the
154  *      spec does not support any other value.
155  *
156  *      Returns 0 on success or negative error code on failure.
157  */
158 static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
159 {
160         struct i2o_message __iomem *msg;
161         u32 m;
162
163         m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
164         if (m == I2O_QUEUE_EMPTY)
165                 return -ETIMEDOUT;
166
167         writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
168         writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
169                &msg->u.head[1]);
170         writel(-1, &msg->body[0]);
171         writel(0, &msg->body[1]);
172         osm_debug("Mounting...\n");
173
174         return i2o_msg_post_wait(dev->iop, m, 2);
175 };
176
177 /**
178  *      i2o_block_device_lock - Locks the media of device dev
179  *      @dev: I2O device which should receive the lock request
180  *      @media_id: Media Identifier
181  *
182  *      Lock media of device dev to prevent removal. The media identifier
183  *      should be set to -1, because the spec does not support any other value.
184  *
185  *      Returns 0 on success or negative error code on failure.
186  */
187 static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
188 {
189         struct i2o_message __iomem *msg;
190         u32 m;
191
192         m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
193         if (m == I2O_QUEUE_EMPTY)
194                 return -ETIMEDOUT;
195
196         writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
197         writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
198                &msg->u.head[1]);
199         writel(-1, &msg->body[0]);
200         osm_debug("Locking...\n");
201
202         return i2o_msg_post_wait(dev->iop, m, 2);
203 };
204
205 /**
206  *      i2o_block_device_unlock - Unlocks the media of device dev
207  *      @dev: I2O device which should receive the unlocked request
208  *      @media_id: Media Identifier
209  *
210  *      Unlocks the media in device dev. The media identifier should be set to
211  *      -1, because the spec does not support any other value.
212  *
213  *      Returns 0 on success or negative error code on failure.
214  */
215 static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
216 {
217         struct i2o_message __iomem *msg;
218         u32 m;
219
220         m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
221         if (m == I2O_QUEUE_EMPTY)
222                 return -ETIMEDOUT;
223
224         writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
225         writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
226                &msg->u.head[1]);
227         writel(media_id, &msg->body[0]);
228         osm_debug("Unlocking...\n");
229
230         return i2o_msg_post_wait(dev->iop, m, 2);
231 };
232
233 /**
234  *      i2o_block_device_power - Power management for device dev
235  *      @dev: I2O device which should receive the power management request
236  *      @operation: Operation which should be send
237  *
238  *      Send a power management request to the device dev.
239  *
240  *      Returns 0 on success or negative error code on failure.
241  */
242 static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
243 {
244         struct i2o_device *i2o_dev = dev->i2o_dev;
245         struct i2o_controller *c = i2o_dev->iop;
246         struct i2o_message __iomem *msg;
247         u32 m;
248         int rc;
249
250         m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
251         if (m == I2O_QUEUE_EMPTY)
252                 return -ETIMEDOUT;
253
254         writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
255         writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
256                tid, &msg->u.head[1]);
257         writel(op << 24, &msg->body[0]);
258         osm_debug("Power...\n");
259
260         rc = i2o_msg_post_wait(c, m, 60);
261         if (!rc)
262                 dev->power = op;
263
264         return rc;
265 };
266
267 /**
268  *      i2o_block_request_alloc - Allocate an I2O block request struct
269  *
270  *      Allocates an I2O block request struct and initialize the list.
271  *
272  *      Returns a i2o_block_request pointer on success or negative error code
273  *      on failure.
274  */
275 static inline struct i2o_block_request *i2o_block_request_alloc(void)
276 {
277         struct i2o_block_request *ireq;
278
279         ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
280         if (!ireq)
281                 return ERR_PTR(-ENOMEM);
282
283         INIT_LIST_HEAD(&ireq->queue);
284
285         return ireq;
286 };
287
288 /**
289  *      i2o_block_request_free - Frees a I2O block request
290  *      @ireq: I2O block request which should be freed
291  *
292  *      Fres the allocated memory (give it back to the request mempool).
293  */
294 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
295 {
296         mempool_free(ireq, i2o_blk_req_pool.pool);
297 };
298
299 /**
300  *      i2o_block_sglist_alloc - Allocate the SG list and map it
301  *      @ireq: I2O block request
302  *
303  *      Builds the SG list and map it into to be accessable by the controller.
304  *
305  *      Returns the number of elements in the SG list or 0 on failure.
306  */
307 static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
308 {
309         struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
310         int nents;
311
312         nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
313
314         if (rq_data_dir(ireq->req) == READ)
315                 ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
316         else
317                 ireq->sg_dma_direction = PCI_DMA_TODEVICE;
318
319         ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
320                                     ireq->sg_dma_direction);
321
322         return ireq->sg_nents;
323 };
324
325 /**
326  *      i2o_block_sglist_free - Frees the SG list
327  *      @ireq: I2O block request from which the SG should be freed
328  *
329  *      Frees the SG list from the I2O block request.
330  */
331 static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
332 {
333         struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
334
335         dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
336                      ireq->sg_dma_direction);
337 };
338
339 /**
340  *      i2o_block_prep_req_fn - Allocates I2O block device specific struct
341  *      @q: request queue for the request
342  *      @req: the request to prepare
343  *
344  *      Allocate the necessary i2o_block_request struct and connect it to
345  *      the request. This is needed that we not loose the SG list later on.
346  *
347  *      Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
348  */
349 static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
350 {
351         struct i2o_block_device *i2o_blk_dev = q->queuedata;
352         struct i2o_block_request *ireq;
353
354         /* request is already processed by us, so return */
355         if (req->flags & REQ_SPECIAL) {
356                 osm_debug("REQ_SPECIAL already set!\n");
357                 req->flags |= REQ_DONTPREP;
358                 return BLKPREP_OK;
359         }
360
361         /* connect the i2o_block_request to the request */
362         if (!req->special) {
363                 ireq = i2o_block_request_alloc();
364                 if (unlikely(IS_ERR(ireq))) {
365                         osm_debug("unable to allocate i2o_block_request!\n");
366                         return BLKPREP_DEFER;
367                 }
368
369                 ireq->i2o_blk_dev = i2o_blk_dev;
370                 req->special = ireq;
371                 ireq->req = req;
372         } else
373                 ireq = req->special;
374
375         /* do not come back here */
376         req->flags |= REQ_DONTPREP | REQ_SPECIAL;
377
378         return BLKPREP_OK;
379 };
380
381 /**
382  *      i2o_block_delayed_request_fn - delayed request queue function
383  *      delayed_request: the delayed request with the queue to start
384  *
385  *      If the request queue is stopped for a disk, and there is no open
386  *      request, a new event is created, which calls this function to start
387  *      the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
388  *      be started again.
389  */
390 static void i2o_block_delayed_request_fn(void *delayed_request)
391 {
392         struct i2o_block_delayed_request *dreq = delayed_request;
393         struct request_queue *q = dreq->queue;
394         unsigned long flags;
395
396         spin_lock_irqsave(q->queue_lock, flags);
397         blk_start_queue(q);
398         spin_unlock_irqrestore(q->queue_lock, flags);
399         kfree(dreq);
400 };
401
402 /**
403  *      i2o_block_reply - Block OSM reply handler.
404  *      @c: I2O controller from which the message arrives
405  *      @m: message id of reply
406  *      qmsg: the actuall I2O message reply
407  *
408  *      This function gets all the message replies.
409  *
410  */
411 static int i2o_block_reply(struct i2o_controller *c, u32 m,
412                            struct i2o_message *msg)
413 {
414         struct i2o_block_request *ireq;
415         struct request *req;
416         struct i2o_block_device *dev;
417         struct request_queue *q;
418         u8 st;
419         unsigned long flags;
420
421         /* FAILed message */
422         if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) {
423                 struct i2o_message *pmsg;
424                 u32 pm;
425
426                 /*
427                  * FAILed message from controller
428                  * We increment the error count and abort it
429                  *
430                  * In theory this will never happen.  The I2O block class
431                  * specification states that block devices never return
432                  * FAILs but instead use the REQ status field...but
433                  * better be on the safe side since no one really follows
434                  * the spec to the book :)
435                  */
436                 pm = le32_to_cpu(msg->body[3]);
437                 pmsg = i2o_msg_in_to_virt(c, pm);
438
439                 req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt));
440                 if (unlikely(!req)) {
441                         osm_err("NULL reply received!\n");
442                         return -1;
443                 }
444
445                 ireq = req->special;
446                 dev = ireq->i2o_blk_dev;
447                 q = dev->gd->queue;
448
449                 req->errors++;
450
451                 spin_lock_irqsave(q->queue_lock, flags);
452
453                 while (end_that_request_chunk(req, !req->errors,
454                                               le32_to_cpu(pmsg->body[1]))) ;
455                 end_that_request_last(req);
456
457                 dev->open_queue_depth--;
458                 list_del(&ireq->queue);
459                 blk_start_queue(q);
460
461                 spin_unlock_irqrestore(q->queue_lock, flags);
462
463                 /* Now flush the message by making it a NOP */
464                 i2o_msg_nop(c, pm);
465
466                 return -1;
467         }
468
469         req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
470         if (unlikely(!req)) {
471                 osm_err("NULL reply received!\n");
472                 return -1;
473         }
474
475         ireq = req->special;
476         dev = ireq->i2o_blk_dev;
477         q = dev->gd->queue;
478
479         if (unlikely(!dev->i2o_dev)) {
480                 /*
481                  * This is HACK, but Intel Integrated RAID allows user
482                  * to delete a volume that is claimed, locked, and in use
483                  * by the OS. We have to check for a reply from a
484                  * non-existent device and flag it as an error or the system
485                  * goes kaput...
486                  */
487                 req->errors++;
488                 osm_warn("Data transfer to deleted device!\n");
489                 spin_lock_irqsave(q->queue_lock, flags);
490                 while (end_that_request_chunk
491                        (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
492                 end_that_request_last(req);
493
494                 dev->open_queue_depth--;
495                 list_del(&ireq->queue);
496                 blk_start_queue(q);
497
498                 spin_unlock_irqrestore(q->queue_lock, flags);
499                 return -1;
500         }
501
502         /*
503          *      Lets see what is cooking. We stuffed the
504          *      request in the context.
505          */
506
507         st = le32_to_cpu(msg->body[0]) >> 24;
508
509         if (st != 0) {
510                 int err;
511                 char *bsa_errors[] = {
512                         "Success",
513                         "Media Error",
514                         "Failure communicating to device",
515                         "Device Failure",
516                         "Device is not ready",
517                         "Media not present",
518                         "Media is locked by another user",
519                         "Media has failed",
520                         "Failure communicating to device",
521                         "Device bus failure",
522                         "Device is locked by another user",
523                         "Device is write protected",
524                         "Device has reset",
525                         "Volume has changed, waiting for acknowledgement"
526                 };
527
528                 err = le32_to_cpu(msg->body[0]) & 0xffff;
529
530                 /*
531                  *      Device not ready means two things. One is that the
532                  *      the thing went offline (but not a removal media)
533                  *
534                  *      The second is that you have a SuperTrak 100 and the
535                  *      firmware got constipated. Unlike standard i2o card
536                  *      setups the supertrak returns an error rather than
537                  *      blocking for the timeout in these cases.
538                  *
539                  *      Don't stick a supertrak100 into cache aggressive modes
540                  */
541
542                 osm_err("block-osm: /dev/%s error: %s", dev->gd->disk_name,
543                         bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]);
544                 if (le32_to_cpu(msg->body[0]) & 0x00ff0000)
545                         printk(KERN_ERR " - DDM attempted %d retries",
546                                (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
547                 printk(KERN_ERR ".\n");
548                 req->errors++;
549         } else
550                 req->errors = 0;
551
552         if (!end_that_request_chunk
553             (req, !req->errors, le32_to_cpu(msg->body[1]))) {
554                 add_disk_randomness(req->rq_disk);
555                 spin_lock_irqsave(q->queue_lock, flags);
556
557                 end_that_request_last(req);
558
559                 dev->open_queue_depth--;
560                 list_del(&ireq->queue);
561                 blk_start_queue(q);
562
563                 spin_unlock_irqrestore(q->queue_lock, flags);
564
565                 i2o_block_sglist_free(ireq);
566                 i2o_block_request_free(ireq);
567         } else
568                 osm_err("still remaining chunks\n");
569
570         return 1;
571 };
572
573 static void i2o_block_event(struct i2o_event *evt)
574 {
575         osm_info("block-osm: event received\n");
576         kfree(evt);
577 };
578
579 /*
580  *      SCSI-CAM for ioctl geometry mapping
581  *      Duplicated with SCSI - this should be moved into somewhere common
582  *      perhaps genhd ?
583  *
584  * LBA -> CHS mapping table taken from:
585  *
586  * "Incorporating the I2O Architecture into BIOS for Intel Architecture
587  *  Platforms"
588  *
589  * This is an I2O document that is only available to I2O members,
590  * not developers.
591  *
592  * From my understanding, this is how all the I2O cards do this
593  *
594  * Disk Size      | Sectors | Heads | Cylinders
595  * ---------------+---------+-------+-------------------
596  * 1 < X <= 528M  | 63      | 16    | X/(63 * 16 * 512)
597  * 528M < X <= 1G | 63      | 32    | X/(63 * 32 * 512)
598  * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512)
599  * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512)
600  *
601  */
602 #define BLOCK_SIZE_528M         1081344
603 #define BLOCK_SIZE_1G           2097152
604 #define BLOCK_SIZE_21G          4403200
605 #define BLOCK_SIZE_42G          8806400
606 #define BLOCK_SIZE_84G          17612800
607
608 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
609                                 unsigned char *hds, unsigned char *secs)
610 {
611         unsigned long heads, sectors, cylinders;
612
613         sectors = 63L;          /* Maximize sectors per track */
614         if (capacity <= BLOCK_SIZE_528M)
615                 heads = 16;
616         else if (capacity <= BLOCK_SIZE_1G)
617                 heads = 32;
618         else if (capacity <= BLOCK_SIZE_21G)
619                 heads = 64;
620         else if (capacity <= BLOCK_SIZE_42G)
621                 heads = 128;
622         else
623                 heads = 255;
624
625         cylinders = (unsigned long)capacity / (heads * sectors);
626
627         *cyls = (unsigned short)cylinders;      /* Stuff return values */
628         *secs = (unsigned char)sectors;
629         *hds = (unsigned char)heads;
630 }
631
632 /**
633  *      i2o_block_open - Open the block device
634  *
635  *      Power up the device, mount and lock the media. This function is called,
636  *      if the block device is opened for access.
637  *
638  *      Returns 0 on success or negative error code on failure.
639  */
640 static int i2o_block_open(struct inode *inode, struct file *file)
641 {
642         struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
643
644         if (!dev->i2o_dev)
645                 return -ENODEV;
646
647         if (dev->power > 0x1f)
648                 i2o_block_device_power(dev, 0x02);
649
650         i2o_block_device_mount(dev->i2o_dev, -1);
651
652         i2o_block_device_lock(dev->i2o_dev, -1);
653
654         osm_debug("Ready.\n");
655
656         return 0;
657 };
658
659 /**
660  *      i2o_block_release - Release the I2O block device
661  *
662  *      Unlock and unmount the media, and power down the device. Gets called if
663  *      the block device is closed.
664  *
665  *      Returns 0 on success or negative error code on failure.
666  */
667 static int i2o_block_release(struct inode *inode, struct file *file)
668 {
669         struct gendisk *disk = inode->i_bdev->bd_disk;
670         struct i2o_block_device *dev = disk->private_data;
671         u8 operation;
672
673         /*
674          * This is to deail with the case of an application
675          * opening a device and then the device dissapears while
676          * it's in use, and then the application tries to release
677          * it.  ex: Unmounting a deleted RAID volume at reboot.
678          * If we send messages, it will just cause FAILs since
679          * the TID no longer exists.
680          */
681         if (!dev->i2o_dev)
682                 return 0;
683
684         i2o_block_device_flush(dev->i2o_dev);
685
686         i2o_block_device_unlock(dev->i2o_dev, -1);
687
688         if (dev->flags & (1 << 3 | 1 << 4))     /* Removable */
689                 operation = 0x21;
690         else
691                 operation = 0x24;
692
693         i2o_block_device_power(dev, operation);
694
695         return 0;
696 }
697
698 /**
699  *      i2o_block_ioctl - Issue device specific ioctl calls.
700  *      @cmd: ioctl command
701  *      @arg: arg
702  *
703  *      Handles ioctl request for the block device.
704  *
705  *      Return 0 on success or negative error on failure.
706  */
707 static int i2o_block_ioctl(struct inode *inode, struct file *file,
708                            unsigned int cmd, unsigned long arg)
709 {
710         struct gendisk *disk = inode->i_bdev->bd_disk;
711         struct i2o_block_device *dev = disk->private_data;
712         void __user *argp = (void __user *)arg;
713
714         /* Anyone capable of this syscall can do *real bad* things */
715
716         if (!capable(CAP_SYS_ADMIN))
717                 return -EPERM;
718
719         switch (cmd) {
720         case HDIO_GETGEO:
721                 {
722                         struct hd_geometry g;
723                         i2o_block_biosparam(get_capacity(disk),
724                                             &g.cylinders, &g.heads, &g.sectors);
725                         g.start = get_start_sect(inode->i_bdev);
726                         return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
727                 }
728
729         case BLKI2OGRSTRAT:
730                 return put_user(dev->rcache, (int __user *)arg);
731         case BLKI2OGWSTRAT:
732                 return put_user(dev->wcache, (int __user *)arg);
733         case BLKI2OSRSTRAT:
734                 if (arg < 0 || arg > CACHE_SMARTFETCH)
735                         return -EINVAL;
736                 dev->rcache = arg;
737                 break;
738         case BLKI2OSWSTRAT:
739                 if (arg != 0
740                     && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
741                         return -EINVAL;
742                 dev->wcache = arg;
743                 break;
744         }
745         return -ENOTTY;
746 };
747
748 /**
749  *      i2o_block_media_changed - Have we seen a media change?
750  *      @disk: gendisk which should be verified
751  *
752  *      Verifies if the media has changed.
753  *
754  *      Returns 1 if the media was changed or 0 otherwise.
755  */
756 static int i2o_block_media_changed(struct gendisk *disk)
757 {
758         struct i2o_block_device *p = disk->private_data;
759
760         if (p->media_change_flag) {
761                 p->media_change_flag = 0;
762                 return 1;
763         }
764         return 0;
765 }
766
767 /**
768  *      i2o_block_transfer - Transfer a request to/from the I2O controller
769  *      @req: the request which should be transfered
770  *
771  *      This function converts the request into a I2O message. The necessary
772  *      DMA buffers are allocated and after everything is setup post the message
773  *      to the I2O controller. No cleanup is done by this function. It is done
774  *      on the interrupt side when the reply arrives.
775  *
776  *      Return 0 on success or negative error code on failure.
777  */
778 static int i2o_block_transfer(struct request *req)
779 {
780         struct i2o_block_device *dev = req->rq_disk->private_data;
781         struct i2o_controller *c = dev->i2o_dev->iop;
782         int tid = dev->i2o_dev->lct_data.tid;
783         struct i2o_message __iomem *msg;
784         void __iomem *mptr;
785         struct i2o_block_request *ireq = req->special;
786         struct scatterlist *sg;
787         int sgnum;
788         int i;
789         u32 m;
790         u32 tcntxt;
791         u32 sg_flags;
792         int rc;
793
794         m = i2o_msg_get(c, &msg);
795         if (m == I2O_QUEUE_EMPTY) {
796                 rc = -EBUSY;
797                 goto exit;
798         }
799
800         tcntxt = i2o_cntxt_list_add(c, req);
801         if (!tcntxt) {
802                 rc = -ENOMEM;
803                 goto nop_msg;
804         }
805
806         if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
807                 rc = -ENOMEM;
808                 goto context_remove;
809         }
810
811         /* Build the message based on the request. */
812         writel(i2o_block_driver.context, &msg->u.s.icntxt);
813         writel(tcntxt, &msg->u.s.tcntxt);
814         writel(req->nr_sectors << 9, &msg->body[1]);
815
816         writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
817         writel(req->sector >> 23, &msg->body[3]);
818
819         mptr = &msg->body[4];
820
821         sg = ireq->sg_table;
822
823         if (rq_data_dir(req) == READ) {
824                 writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
825                        &msg->u.head[1]);
826                 sg_flags = 0x10000000;
827                 switch (dev->rcache) {
828                 case CACHE_NULL:
829                         writel(0, &msg->body[0]);
830                         break;
831                 case CACHE_PREFETCH:
832                         writel(0x201F0008, &msg->body[0]);
833                         break;
834                 case CACHE_SMARTFETCH:
835                         if (req->nr_sectors > 16)
836                                 writel(0x201F0008, &msg->body[0]);
837                         else
838                                 writel(0x001F0000, &msg->body[0]);
839                         break;
840                 }
841         } else {
842                 writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
843                        &msg->u.head[1]);
844                 sg_flags = 0x14000000;
845                 switch (dev->wcache) {
846                 case CACHE_NULL:
847                         writel(0, &msg->body[0]);
848                         break;
849                 case CACHE_WRITETHROUGH:
850                         writel(0x001F0008, &msg->body[0]);
851                         break;
852                 case CACHE_WRITEBACK:
853                         writel(0x001F0010, &msg->body[0]);
854                         break;
855                 case CACHE_SMARTBACK:
856                         if (req->nr_sectors > 16)
857                                 writel(0x001F0004, &msg->body[0]);
858                         else
859                                 writel(0x001F0010, &msg->body[0]);
860                         break;
861                 case CACHE_SMARTTHROUGH:
862                         if (req->nr_sectors > 16)
863                                 writel(0x001F0004, &msg->body[0]);
864                         else
865                                 writel(0x001F0010, &msg->body[0]);
866                 }
867         }
868
869         for (i = sgnum; i > 0; i--) {
870                 if (i == 1)
871                         sg_flags |= 0x80000000;
872                 writel(sg_flags | sg_dma_len(sg), mptr);
873                 writel(sg_dma_address(sg), mptr + 4);
874                 mptr += 8;
875                 sg++;
876         }
877
878         writel(I2O_MESSAGE_SIZE
879                (((unsigned long)mptr -
880                  (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
881                &msg->u.head[0]);
882
883         list_add_tail(&ireq->queue, &dev->open_queue);
884         dev->open_queue_depth++;
885
886         i2o_msg_post(c, m);
887
888         return 0;
889
890       context_remove:
891         i2o_cntxt_list_remove(c, req);
892
893       nop_msg:
894         i2o_msg_nop(c, m);
895
896       exit:
897         return rc;
898 };
899
900 /**
901  *      i2o_block_request_fn - request queue handling function
902  *      q: request queue from which the request could be fetched
903  *
904  *      Takes the next request from the queue, transfers it and if no error
905  *      occurs dequeue it from the queue. On arrival of the reply the message
906  *      will be processed further. If an error occurs requeue the request.
907  */
908 static void i2o_block_request_fn(struct request_queue *q)
909 {
910         struct request *req;
911
912         while (!blk_queue_plugged(q)) {
913                 req = elv_next_request(q);
914                 if (!req)
915                         break;
916
917                 if (blk_fs_request(req)) {
918                         struct i2o_block_delayed_request *dreq;
919                         struct i2o_block_request *ireq = req->special;
920                         unsigned int queue_depth;
921
922                         queue_depth = ireq->i2o_blk_dev->open_queue_depth;
923
924                         if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
925                                 if (!i2o_block_transfer(req)) {
926                                         blkdev_dequeue_request(req);
927                                         continue;
928                                 }
929
930                         if (queue_depth)
931                                 break;
932
933                         /* stop the queue and retry later */
934                         dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
935                         if (!dreq)
936                                 continue;
937
938                         dreq->queue = q;
939                         INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
940                                   dreq);
941
942                         osm_info("transfer error\n");
943                         if (!queue_delayed_work(i2o_block_driver.event_queue,
944                                                 &dreq->work,
945                                                 I2O_BLOCK_RETRY_TIME))
946                                 kfree(dreq);
947                         else {
948                                 blk_stop_queue(q);
949                                 break;
950                         }
951                 } else
952                         end_request(req, 0);
953         }
954 };
955
956 /* I2O Block device operations definition */
957 static struct block_device_operations i2o_block_fops = {
958         .owner = THIS_MODULE,
959         .open = i2o_block_open,
960         .release = i2o_block_release,
961         .ioctl = i2o_block_ioctl,
962         .media_changed = i2o_block_media_changed
963 };
964
965 /**
966  *      i2o_block_device_alloc - Allocate memory for a I2O Block device
967  *
968  *      Allocate memory for the i2o_block_device struct, gendisk and request
969  *      queue and initialize them as far as no additional information is needed.
970  *
971  *      Returns a pointer to the allocated I2O Block device on succes or a
972  *      negative error code on failure.
973  */
974 static struct i2o_block_device *i2o_block_device_alloc(void)
975 {
976         struct i2o_block_device *dev;
977         struct gendisk *gd;
978         struct request_queue *queue;
979         int rc;
980
981         dev = kmalloc(sizeof(*dev), GFP_KERNEL);
982         if (!dev) {
983                 osm_err("Insufficient memory to allocate I2O Block disk.\n");
984                 rc = -ENOMEM;
985                 goto exit;
986         }
987         memset(dev, 0, sizeof(*dev));
988
989         INIT_LIST_HEAD(&dev->open_queue);
990         spin_lock_init(&dev->lock);
991         dev->rcache = CACHE_PREFETCH;
992         dev->wcache = CACHE_WRITEBACK;
993
994         /* allocate a gendisk with 16 partitions */
995         gd = alloc_disk(16);
996         if (!gd) {
997                 osm_err("Insufficient memory to allocate gendisk.\n");
998                 rc = -ENOMEM;
999                 goto cleanup_dev;
1000         }
1001
1002         /* initialize the request queue */
1003         queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
1004         if (!queue) {
1005                 osm_err("Insufficient memory to allocate request queue.\n");
1006                 rc = -ENOMEM;
1007                 goto cleanup_queue;
1008         }
1009
1010         blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1011
1012         gd->major = I2O_MAJOR;
1013         gd->queue = queue;
1014         gd->fops = &i2o_block_fops;
1015         gd->private_data = dev;
1016
1017         dev->gd = gd;
1018
1019         return dev;
1020
1021       cleanup_queue:
1022         put_disk(gd);
1023
1024       cleanup_dev:
1025         kfree(dev);
1026
1027       exit:
1028         return ERR_PTR(rc);
1029 };
1030
1031 /**
1032  *      i2o_block_probe - verify if dev is a I2O Block device and install it
1033  *      @dev: device to verify if it is a I2O Block device
1034  *
1035  *      We only verify if the user_tid of the device is 0xfff and then install
1036  *      the device. Otherwise it is used by some other device (e. g. RAID).
1037  *
1038  *      Returns 0 on success or negative error code on failure.
1039  */
1040 static int i2o_block_probe(struct device *dev)
1041 {
1042         struct i2o_device *i2o_dev = to_i2o_device(dev);
1043         struct i2o_block_device *i2o_blk_dev;
1044         struct i2o_controller *c = i2o_dev->iop;
1045         struct gendisk *gd;
1046         struct request_queue *queue;
1047         static int unit = 0;
1048         int rc;
1049         u64 size;
1050         u32 blocksize;
1051         u16 power;
1052         u32 flags, status;
1053         int segments;
1054
1055         /* skip devices which are used by IOP */
1056         if (i2o_dev->lct_data.user_tid != 0xfff) {
1057                 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1058                 return -ENODEV;
1059         }
1060
1061         osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid);
1062
1063         if (i2o_device_claim(i2o_dev)) {
1064                 osm_warn("Unable to claim device. Installation aborted\n");
1065                 rc = -EFAULT;
1066                 goto exit;
1067         }
1068
1069         i2o_blk_dev = i2o_block_device_alloc();
1070         if (IS_ERR(i2o_blk_dev)) {
1071                 osm_err("could not alloc a new I2O block device");
1072                 rc = PTR_ERR(i2o_blk_dev);
1073                 goto claim_release;
1074         }
1075
1076         i2o_blk_dev->i2o_dev = i2o_dev;
1077         dev_set_drvdata(dev, i2o_blk_dev);
1078
1079         /* setup gendisk */
1080         gd = i2o_blk_dev->gd;
1081         gd->first_minor = unit << 4;
1082         sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1083         sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);
1084         gd->driverfs_dev = &i2o_dev->device;
1085
1086         /* setup request queue */
1087         queue = gd->queue;
1088         queue->queuedata = i2o_blk_dev;
1089
1090         blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
1091         blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
1092
1093         if (c->short_req)
1094                 segments = 8;
1095         else {
1096                 i2o_status_block *sb;
1097
1098                 sb = c->status_block.virt;
1099
1100                 segments = (sb->inbound_frame_size -
1101                             sizeof(struct i2o_message) / 4 - 4) / 2;
1102         }
1103
1104         blk_queue_max_hw_segments(queue, segments);
1105
1106         osm_debug("max sectors = %d\n", I2O_MAX_SECTORS);
1107         osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS);
1108         osm_debug("hw segments = %d\n", segments);
1109
1110         /*
1111          *      Ask for the current media data. If that isn't supported
1112          *      then we ask for the device capacity data
1113          */
1114         if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0
1115             || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) {
1116                 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4);
1117                 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8);
1118         }
1119         osm_debug("blocksize = %d\n", blocksize);
1120
1121         if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1122                 power = 0;
1123         i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1124         i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1125
1126         set_capacity(gd, size >> 9);
1127
1128         i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1129
1130         add_disk(gd);
1131
1132         unit++;
1133
1134         return 0;
1135
1136       claim_release:
1137         i2o_device_claim_release(i2o_dev);
1138
1139       exit:
1140         return rc;
1141 };
1142
1143 /* Block OSM driver struct */
1144 static struct i2o_driver i2o_block_driver = {
1145         .name = OSM_NAME,
1146         .event = i2o_block_event,
1147         .reply = i2o_block_reply,
1148         .classes = i2o_block_class_id,
1149         .driver = {
1150                    .probe = i2o_block_probe,
1151                    .remove = i2o_block_remove,
1152                    },
1153 };
1154
1155 /**
1156  *      i2o_block_init - Block OSM initialization function
1157  *
1158  *      Allocate the slab and mempool for request structs, registers i2o_block
1159  *      block device and finally register the Block OSM in the I2O core.
1160  *
1161  *      Returns 0 on success or negative error code on failure.
1162  */
1163 static int __init i2o_block_init(void)
1164 {
1165         int rc;
1166         int size;
1167
1168         printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1169
1170         /* Allocate request mempool and slab */
1171         size = sizeof(struct i2o_block_request);
1172         i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1173                                                   SLAB_HWCACHE_ALIGN, NULL,
1174                                                   NULL);
1175         if (!i2o_blk_req_pool.slab) {
1176                 osm_err("can't init request slab\n");
1177                 rc = -ENOMEM;
1178                 goto exit;
1179         }
1180
1181         i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
1182                                                mempool_alloc_slab,
1183                                                mempool_free_slab,
1184                                                i2o_blk_req_pool.slab);
1185         if (!i2o_blk_req_pool.pool) {
1186                 osm_err("can't init request mempool\n");
1187                 rc = -ENOMEM;
1188                 goto free_slab;
1189         }
1190
1191         /* Register the block device interfaces */
1192         rc = register_blkdev(I2O_MAJOR, "i2o_block");
1193         if (rc) {
1194                 osm_err("unable to register block device\n");
1195                 goto free_mempool;
1196         }
1197 #ifdef MODULE
1198         osm_info("registered device at major %d\n", I2O_MAJOR);
1199 #endif
1200
1201         /* Register Block OSM into I2O core */
1202         rc = i2o_driver_register(&i2o_block_driver);
1203         if (rc) {
1204                 osm_err("Could not register Block driver\n");
1205                 goto unregister_blkdev;
1206         }
1207
1208         return 0;
1209
1210       unregister_blkdev:
1211         unregister_blkdev(I2O_MAJOR, "i2o_block");
1212
1213       free_mempool:
1214         mempool_destroy(i2o_blk_req_pool.pool);
1215
1216       free_slab:
1217         kmem_cache_destroy(i2o_blk_req_pool.slab);
1218
1219       exit:
1220         return rc;
1221 };
1222
1223 /**
1224  *      i2o_block_exit - Block OSM exit function
1225  *
1226  *      Unregisters Block OSM from I2O core, unregisters i2o_block block device
1227  *      and frees the mempool and slab.
1228  */
1229 static void __exit i2o_block_exit(void)
1230 {
1231         /* Unregister I2O Block OSM from I2O core */
1232         i2o_driver_unregister(&i2o_block_driver);
1233
1234         /* Unregister block device */
1235         unregister_blkdev(I2O_MAJOR, "i2o_block");
1236
1237         /* Free request mempool and slab */
1238         mempool_destroy(i2o_blk_req_pool.pool);
1239         kmem_cache_destroy(i2o_blk_req_pool.slab);
1240 };
1241
1242 MODULE_AUTHOR("Red Hat");
1243 MODULE_LICENSE("GPL");
1244 MODULE_DESCRIPTION(OSM_DESCRIPTION);
1245 MODULE_VERSION(OSM_VERSION);
1246
1247 module_init(i2o_block_init);
1248 module_exit(i2o_block_exit);