Pull altix-fpga-reset into release branch
[sfrench/cifs-2.6.git] / drivers / block / deadline-iosched.c
1 /*
2  *  linux/drivers/block/deadline-iosched.c
3  *
4  *  Deadline i/o scheduler.
5  *
6  *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
7  */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/bio.h>
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/hash.h>
19 #include <linux/rbtree.h>
20
21 /*
22  * See Documentation/block/deadline-iosched.txt
23  */
24 static int read_expire = HZ / 2;  /* max time before a read is submitted. */
25 static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
26 static int writes_starved = 2;    /* max times reads can starve a write */
27 static int fifo_batch = 16;       /* # of sequential requests treated as one
28                                      by the above parameters. For throughput. */
29
30 static const int deadline_hash_shift = 5;
31 #define DL_HASH_BLOCK(sec)      ((sec) >> 3)
32 #define DL_HASH_FN(sec)         (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
33 #define DL_HASH_ENTRIES         (1 << deadline_hash_shift)
34 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
35 #define list_entry_hash(ptr)    list_entry((ptr), struct deadline_rq, hash)
36 #define ON_HASH(drq)            (drq)->on_hash
37
38 struct deadline_data {
39         /*
40          * run time data
41          */
42
43         /*
44          * requests (deadline_rq s) are present on both sort_list and fifo_list
45          */
46         struct rb_root sort_list[2];    
47         struct list_head fifo_list[2];
48         
49         /*
50          * next in sort order. read, write or both are NULL
51          */
52         struct deadline_rq *next_drq[2];
53         struct list_head *dispatch;     /* driver dispatch queue */
54         struct list_head *hash;         /* request hash */
55         unsigned int batching;          /* number of sequential requests made */
56         sector_t last_sector;           /* head position */
57         unsigned int starved;           /* times reads have starved writes */
58
59         /*
60          * settings that change how the i/o scheduler behaves
61          */
62         int fifo_expire[2];
63         int fifo_batch;
64         int writes_starved;
65         int front_merges;
66
67         mempool_t *drq_pool;
68 };
69
70 /*
71  * pre-request data.
72  */
73 struct deadline_rq {
74         /*
75          * rbtree index, key is the starting offset
76          */
77         struct rb_node rb_node;
78         sector_t rb_key;
79
80         struct request *request;
81
82         /*
83          * request hash, key is the ending offset (for back merge lookup)
84          */
85         struct list_head hash;
86         char on_hash;
87
88         /*
89          * expire fifo
90          */
91         struct list_head fifo;
92         unsigned long expires;
93 };
94
95 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
96
97 static kmem_cache_t *drq_pool;
98
99 #define RQ_DATA(rq)     ((struct deadline_rq *) (rq)->elevator_private)
100
101 /*
102  * the back merge hash support functions
103  */
104 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
105 {
106         drq->on_hash = 0;
107         list_del_init(&drq->hash);
108 }
109
110 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
111 {
112         if (ON_HASH(drq))
113                 __deadline_del_drq_hash(drq);
114 }
115
116 static void
117 deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
118 {
119         deadline_del_drq_hash(drq);
120
121         if (q->last_merge == drq->request)
122                 q->last_merge = NULL;
123 }
124
125 static inline void
126 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
127 {
128         struct request *rq = drq->request;
129
130         BUG_ON(ON_HASH(drq));
131
132         drq->on_hash = 1;
133         list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
134 }
135
136 /*
137  * move hot entry to front of chain
138  */
139 static inline void
140 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
141 {
142         struct request *rq = drq->request;
143         struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
144
145         if (ON_HASH(drq) && drq->hash.prev != head) {
146                 list_del(&drq->hash);
147                 list_add(&drq->hash, head);
148         }
149 }
150
151 static struct request *
152 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
153 {
154         struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
155         struct list_head *entry, *next = hash_list->next;
156
157         while ((entry = next) != hash_list) {
158                 struct deadline_rq *drq = list_entry_hash(entry);
159                 struct request *__rq = drq->request;
160
161                 next = entry->next;
162                 
163                 BUG_ON(!ON_HASH(drq));
164
165                 if (!rq_mergeable(__rq)) {
166                         __deadline_del_drq_hash(drq);
167                         continue;
168                 }
169
170                 if (rq_hash_key(__rq) == offset)
171                         return __rq;
172         }
173
174         return NULL;
175 }
176
177 /*
178  * rb tree support functions
179  */
180 #define RB_NONE         (2)
181 #define RB_EMPTY(root)  ((root)->rb_node == NULL)
182 #define ON_RB(node)     ((node)->rb_color != RB_NONE)
183 #define RB_CLEAR(node)  ((node)->rb_color = RB_NONE)
184 #define rb_entry_drq(node)      rb_entry((node), struct deadline_rq, rb_node)
185 #define DRQ_RB_ROOT(dd, drq)    (&(dd)->sort_list[rq_data_dir((drq)->request)])
186 #define rq_rb_key(rq)           (rq)->sector
187
188 static struct deadline_rq *
189 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
190 {
191         struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
192         struct rb_node *parent = NULL;
193         struct deadline_rq *__drq;
194
195         while (*p) {
196                 parent = *p;
197                 __drq = rb_entry_drq(parent);
198
199                 if (drq->rb_key < __drq->rb_key)
200                         p = &(*p)->rb_left;
201                 else if (drq->rb_key > __drq->rb_key)
202                         p = &(*p)->rb_right;
203                 else
204                         return __drq;
205         }
206
207         rb_link_node(&drq->rb_node, parent, p);
208         return NULL;
209 }
210
211 static void
212 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
213 {
214         struct deadline_rq *__alias;
215
216         drq->rb_key = rq_rb_key(drq->request);
217
218 retry:
219         __alias = __deadline_add_drq_rb(dd, drq);
220         if (!__alias) {
221                 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
222                 return;
223         }
224
225         deadline_move_request(dd, __alias);
226         goto retry;
227 }
228
229 static inline void
230 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
231 {
232         const int data_dir = rq_data_dir(drq->request);
233
234         if (dd->next_drq[data_dir] == drq) {
235                 struct rb_node *rbnext = rb_next(&drq->rb_node);
236
237                 dd->next_drq[data_dir] = NULL;
238                 if (rbnext)
239                         dd->next_drq[data_dir] = rb_entry_drq(rbnext);
240         }
241
242         if (ON_RB(&drq->rb_node)) {
243                 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
244                 RB_CLEAR(&drq->rb_node);
245         }
246 }
247
248 static struct request *
249 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
250 {
251         struct rb_node *n = dd->sort_list[data_dir].rb_node;
252         struct deadline_rq *drq;
253
254         while (n) {
255                 drq = rb_entry_drq(n);
256
257                 if (sector < drq->rb_key)
258                         n = n->rb_left;
259                 else if (sector > drq->rb_key)
260                         n = n->rb_right;
261                 else
262                         return drq->request;
263         }
264
265         return NULL;
266 }
267
268 /*
269  * deadline_find_first_drq finds the first (lowest sector numbered) request
270  * for the specified data_dir. Used to sweep back to the start of the disk
271  * (1-way elevator) after we process the last (highest sector) request.
272  */
273 static struct deadline_rq *
274 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
275 {
276         struct rb_node *n = dd->sort_list[data_dir].rb_node;
277
278         for (;;) {
279                 if (n->rb_left == NULL)
280                         return rb_entry_drq(n);
281                 
282                 n = n->rb_left;
283         }
284 }
285
286 /*
287  * add drq to rbtree and fifo
288  */
289 static inline void
290 deadline_add_request(struct request_queue *q, struct request *rq)
291 {
292         struct deadline_data *dd = q->elevator->elevator_data;
293         struct deadline_rq *drq = RQ_DATA(rq);
294
295         const int data_dir = rq_data_dir(drq->request);
296
297         deadline_add_drq_rb(dd, drq);
298         /*
299          * set expire time (only used for reads) and add to fifo list
300          */
301         drq->expires = jiffies + dd->fifo_expire[data_dir];
302         list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
303
304         if (rq_mergeable(rq)) {
305                 deadline_add_drq_hash(dd, drq);
306
307                 if (!q->last_merge)
308                         q->last_merge = rq;
309         }
310 }
311
312 /*
313  * remove rq from rbtree, fifo, and hash
314  */
315 static void deadline_remove_request(request_queue_t *q, struct request *rq)
316 {
317         struct deadline_rq *drq = RQ_DATA(rq);
318
319         if (drq) {
320                 struct deadline_data *dd = q->elevator->elevator_data;
321
322                 list_del_init(&drq->fifo);
323                 deadline_remove_merge_hints(q, drq);
324                 deadline_del_drq_rb(dd, drq);
325         }
326 }
327
328 static int
329 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
330 {
331         struct deadline_data *dd = q->elevator->elevator_data;
332         struct request *__rq;
333         int ret;
334
335         /*
336          * try last_merge to avoid going to hash
337          */
338         ret = elv_try_last_merge(q, bio);
339         if (ret != ELEVATOR_NO_MERGE) {
340                 __rq = q->last_merge;
341                 goto out_insert;
342         }
343
344         /*
345          * see if the merge hash can satisfy a back merge
346          */
347         __rq = deadline_find_drq_hash(dd, bio->bi_sector);
348         if (__rq) {
349                 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
350
351                 if (elv_rq_merge_ok(__rq, bio)) {
352                         ret = ELEVATOR_BACK_MERGE;
353                         goto out;
354                 }
355         }
356
357         /*
358          * check for front merge
359          */
360         if (dd->front_merges) {
361                 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
362
363                 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
364                 if (__rq) {
365                         BUG_ON(rb_key != rq_rb_key(__rq));
366
367                         if (elv_rq_merge_ok(__rq, bio)) {
368                                 ret = ELEVATOR_FRONT_MERGE;
369                                 goto out;
370                         }
371                 }
372         }
373
374         return ELEVATOR_NO_MERGE;
375 out:
376         q->last_merge = __rq;
377 out_insert:
378         if (ret)
379                 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
380         *req = __rq;
381         return ret;
382 }
383
384 static void deadline_merged_request(request_queue_t *q, struct request *req)
385 {
386         struct deadline_data *dd = q->elevator->elevator_data;
387         struct deadline_rq *drq = RQ_DATA(req);
388
389         /*
390          * hash always needs to be repositioned, key is end sector
391          */
392         deadline_del_drq_hash(drq);
393         deadline_add_drq_hash(dd, drq);
394
395         /*
396          * if the merge was a front merge, we need to reposition request
397          */
398         if (rq_rb_key(req) != drq->rb_key) {
399                 deadline_del_drq_rb(dd, drq);
400                 deadline_add_drq_rb(dd, drq);
401         }
402
403         q->last_merge = req;
404 }
405
406 static void
407 deadline_merged_requests(request_queue_t *q, struct request *req,
408                          struct request *next)
409 {
410         struct deadline_data *dd = q->elevator->elevator_data;
411         struct deadline_rq *drq = RQ_DATA(req);
412         struct deadline_rq *dnext = RQ_DATA(next);
413
414         BUG_ON(!drq);
415         BUG_ON(!dnext);
416
417         /*
418          * reposition drq (this is the merged request) in hash, and in rbtree
419          * in case of a front merge
420          */
421         deadline_del_drq_hash(drq);
422         deadline_add_drq_hash(dd, drq);
423
424         if (rq_rb_key(req) != drq->rb_key) {
425                 deadline_del_drq_rb(dd, drq);
426                 deadline_add_drq_rb(dd, drq);
427         }
428
429         /*
430          * if dnext expires before drq, assign its expire time to drq
431          * and move into dnext position (dnext will be deleted) in fifo
432          */
433         if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
434                 if (time_before(dnext->expires, drq->expires)) {
435                         list_move(&drq->fifo, &dnext->fifo);
436                         drq->expires = dnext->expires;
437                 }
438         }
439
440         /*
441          * kill knowledge of next, this one is a goner
442          */
443         deadline_remove_request(q, next);
444 }
445
446 /*
447  * move request from sort list to dispatch queue.
448  */
449 static inline void
450 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
451 {
452         request_queue_t *q = drq->request->q;
453
454         deadline_remove_request(q, drq->request);
455         list_add_tail(&drq->request->queuelist, dd->dispatch);
456 }
457
458 /*
459  * move an entry to dispatch queue
460  */
461 static void
462 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
463 {
464         const int data_dir = rq_data_dir(drq->request);
465         struct rb_node *rbnext = rb_next(&drq->rb_node);
466
467         dd->next_drq[READ] = NULL;
468         dd->next_drq[WRITE] = NULL;
469
470         if (rbnext)
471                 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
472         
473         dd->last_sector = drq->request->sector + drq->request->nr_sectors;
474
475         /*
476          * take it off the sort and fifo list, move
477          * to dispatch queue
478          */
479         deadline_move_to_dispatch(dd, drq);
480 }
481
482 #define list_entry_fifo(ptr)    list_entry((ptr), struct deadline_rq, fifo)
483
484 /*
485  * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
486  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
487  */
488 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
489 {
490         struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
491
492         /*
493          * drq is expired!
494          */
495         if (time_after(jiffies, drq->expires))
496                 return 1;
497
498         return 0;
499 }
500
501 /*
502  * deadline_dispatch_requests selects the best request according to
503  * read/write expire, fifo_batch, etc
504  */
505 static int deadline_dispatch_requests(struct deadline_data *dd)
506 {
507         const int reads = !list_empty(&dd->fifo_list[READ]);
508         const int writes = !list_empty(&dd->fifo_list[WRITE]);
509         struct deadline_rq *drq;
510         int data_dir;
511
512         /*
513          * batches are currently reads XOR writes
514          */
515         if (dd->next_drq[WRITE])
516                 drq = dd->next_drq[WRITE];
517         else
518                 drq = dd->next_drq[READ];
519
520         if (drq) {
521                 /* we have a "next request" */
522                 
523                 if (dd->last_sector != drq->request->sector)
524                         /* end the batch on a non sequential request */
525                         dd->batching += dd->fifo_batch;
526                 
527                 if (dd->batching < dd->fifo_batch)
528                         /* we are still entitled to batch */
529                         goto dispatch_request;
530         }
531
532         /*
533          * at this point we are not running a batch. select the appropriate
534          * data direction (read / write)
535          */
536
537         if (reads) {
538                 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
539
540                 if (writes && (dd->starved++ >= dd->writes_starved))
541                         goto dispatch_writes;
542
543                 data_dir = READ;
544
545                 goto dispatch_find_request;
546         }
547
548         /*
549          * there are either no reads or writes have been starved
550          */
551
552         if (writes) {
553 dispatch_writes:
554                 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
555
556                 dd->starved = 0;
557
558                 data_dir = WRITE;
559
560                 goto dispatch_find_request;
561         }
562
563         return 0;
564
565 dispatch_find_request:
566         /*
567          * we are not running a batch, find best request for selected data_dir
568          */
569         if (deadline_check_fifo(dd, data_dir)) {
570                 /* An expired request exists - satisfy it */
571                 dd->batching = 0;
572                 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
573                 
574         } else if (dd->next_drq[data_dir]) {
575                 /*
576                  * The last req was the same dir and we have a next request in
577                  * sort order. No expired requests so continue on from here.
578                  */
579                 drq = dd->next_drq[data_dir];
580         } else {
581                 /*
582                  * The last req was the other direction or we have run out of
583                  * higher-sectored requests. Go back to the lowest sectored
584                  * request (1 way elevator) and start a new batch.
585                  */
586                 dd->batching = 0;
587                 drq = deadline_find_first_drq(dd, data_dir);
588         }
589
590 dispatch_request:
591         /*
592          * drq is the selected appropriate request.
593          */
594         dd->batching++;
595         deadline_move_request(dd, drq);
596
597         return 1;
598 }
599
600 static struct request *deadline_next_request(request_queue_t *q)
601 {
602         struct deadline_data *dd = q->elevator->elevator_data;
603         struct request *rq;
604
605         /*
606          * if there are still requests on the dispatch queue, grab the first one
607          */
608         if (!list_empty(dd->dispatch)) {
609 dispatch:
610                 rq = list_entry_rq(dd->dispatch->next);
611                 return rq;
612         }
613
614         if (deadline_dispatch_requests(dd))
615                 goto dispatch;
616
617         return NULL;
618 }
619
620 static void
621 deadline_insert_request(request_queue_t *q, struct request *rq, int where)
622 {
623         struct deadline_data *dd = q->elevator->elevator_data;
624
625         /* barriers must flush the reorder queue */
626         if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
627                         && where == ELEVATOR_INSERT_SORT))
628                 where = ELEVATOR_INSERT_BACK;
629
630         switch (where) {
631                 case ELEVATOR_INSERT_BACK:
632                         while (deadline_dispatch_requests(dd))
633                                 ;
634                         list_add_tail(&rq->queuelist, dd->dispatch);
635                         break;
636                 case ELEVATOR_INSERT_FRONT:
637                         list_add(&rq->queuelist, dd->dispatch);
638                         break;
639                 case ELEVATOR_INSERT_SORT:
640                         BUG_ON(!blk_fs_request(rq));
641                         deadline_add_request(q, rq);
642                         break;
643                 default:
644                         printk("%s: bad insert point %d\n", __FUNCTION__,where);
645                         return;
646         }
647 }
648
649 static int deadline_queue_empty(request_queue_t *q)
650 {
651         struct deadline_data *dd = q->elevator->elevator_data;
652
653         if (!list_empty(&dd->fifo_list[WRITE])
654             || !list_empty(&dd->fifo_list[READ])
655             || !list_empty(dd->dispatch))
656                 return 0;
657
658         return 1;
659 }
660
661 static struct request *
662 deadline_former_request(request_queue_t *q, struct request *rq)
663 {
664         struct deadline_rq *drq = RQ_DATA(rq);
665         struct rb_node *rbprev = rb_prev(&drq->rb_node);
666
667         if (rbprev)
668                 return rb_entry_drq(rbprev)->request;
669
670         return NULL;
671 }
672
673 static struct request *
674 deadline_latter_request(request_queue_t *q, struct request *rq)
675 {
676         struct deadline_rq *drq = RQ_DATA(rq);
677         struct rb_node *rbnext = rb_next(&drq->rb_node);
678
679         if (rbnext)
680                 return rb_entry_drq(rbnext)->request;
681
682         return NULL;
683 }
684
685 static void deadline_exit_queue(elevator_t *e)
686 {
687         struct deadline_data *dd = e->elevator_data;
688
689         BUG_ON(!list_empty(&dd->fifo_list[READ]));
690         BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
691
692         mempool_destroy(dd->drq_pool);
693         kfree(dd->hash);
694         kfree(dd);
695 }
696
697 /*
698  * initialize elevator private data (deadline_data), and alloc a drq for
699  * each request on the free lists
700  */
701 static int deadline_init_queue(request_queue_t *q, elevator_t *e)
702 {
703         struct deadline_data *dd;
704         int i;
705
706         if (!drq_pool)
707                 return -ENOMEM;
708
709         dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
710         if (!dd)
711                 return -ENOMEM;
712         memset(dd, 0, sizeof(*dd));
713
714         dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
715                                 GFP_KERNEL, q->node);
716         if (!dd->hash) {
717                 kfree(dd);
718                 return -ENOMEM;
719         }
720
721         dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
722                                         mempool_free_slab, drq_pool, q->node);
723         if (!dd->drq_pool) {
724                 kfree(dd->hash);
725                 kfree(dd);
726                 return -ENOMEM;
727         }
728
729         for (i = 0; i < DL_HASH_ENTRIES; i++)
730                 INIT_LIST_HEAD(&dd->hash[i]);
731
732         INIT_LIST_HEAD(&dd->fifo_list[READ]);
733         INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
734         dd->sort_list[READ] = RB_ROOT;
735         dd->sort_list[WRITE] = RB_ROOT;
736         dd->dispatch = &q->queue_head;
737         dd->fifo_expire[READ] = read_expire;
738         dd->fifo_expire[WRITE] = write_expire;
739         dd->writes_starved = writes_starved;
740         dd->front_merges = 1;
741         dd->fifo_batch = fifo_batch;
742         e->elevator_data = dd;
743         return 0;
744 }
745
746 static void deadline_put_request(request_queue_t *q, struct request *rq)
747 {
748         struct deadline_data *dd = q->elevator->elevator_data;
749         struct deadline_rq *drq = RQ_DATA(rq);
750
751         if (drq) {
752                 mempool_free(drq, dd->drq_pool);
753                 rq->elevator_private = NULL;
754         }
755 }
756
757 static int
758 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
759                      int gfp_mask)
760 {
761         struct deadline_data *dd = q->elevator->elevator_data;
762         struct deadline_rq *drq;
763
764         drq = mempool_alloc(dd->drq_pool, gfp_mask);
765         if (drq) {
766                 memset(drq, 0, sizeof(*drq));
767                 RB_CLEAR(&drq->rb_node);
768                 drq->request = rq;
769
770                 INIT_LIST_HEAD(&drq->hash);
771                 drq->on_hash = 0;
772
773                 INIT_LIST_HEAD(&drq->fifo);
774
775                 rq->elevator_private = drq;
776                 return 0;
777         }
778
779         return 1;
780 }
781
782 /*
783  * sysfs parts below
784  */
785 struct deadline_fs_entry {
786         struct attribute attr;
787         ssize_t (*show)(struct deadline_data *, char *);
788         ssize_t (*store)(struct deadline_data *, const char *, size_t);
789 };
790
791 static ssize_t
792 deadline_var_show(int var, char *page)
793 {
794         return sprintf(page, "%d\n", var);
795 }
796
797 static ssize_t
798 deadline_var_store(int *var, const char *page, size_t count)
799 {
800         char *p = (char *) page;
801
802         *var = simple_strtol(p, &p, 10);
803         return count;
804 }
805
806 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
807 static ssize_t __FUNC(struct deadline_data *dd, char *page)             \
808 {                                                                       \
809         int __data = __VAR;                                     \
810         if (__CONV)                                                     \
811                 __data = jiffies_to_msecs(__data);                      \
812         return deadline_var_show(__data, (page));                       \
813 }
814 SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
815 SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
816 SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
817 SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
818 SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
819 #undef SHOW_FUNCTION
820
821 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
822 static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
823 {                                                                       \
824         int __data;                                                     \
825         int ret = deadline_var_store(&__data, (page), count);           \
826         if (__data < (MIN))                                             \
827                 __data = (MIN);                                         \
828         else if (__data > (MAX))                                        \
829                 __data = (MAX);                                         \
830         if (__CONV)                                                     \
831                 *(__PTR) = msecs_to_jiffies(__data);                    \
832         else                                                            \
833                 *(__PTR) = __data;                                      \
834         return ret;                                                     \
835 }
836 STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
837 STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
838 STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
839 STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
840 STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
841 #undef STORE_FUNCTION
842
843 static struct deadline_fs_entry deadline_readexpire_entry = {
844         .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
845         .show = deadline_readexpire_show,
846         .store = deadline_readexpire_store,
847 };
848 static struct deadline_fs_entry deadline_writeexpire_entry = {
849         .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
850         .show = deadline_writeexpire_show,
851         .store = deadline_writeexpire_store,
852 };
853 static struct deadline_fs_entry deadline_writesstarved_entry = {
854         .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
855         .show = deadline_writesstarved_show,
856         .store = deadline_writesstarved_store,
857 };
858 static struct deadline_fs_entry deadline_frontmerges_entry = {
859         .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
860         .show = deadline_frontmerges_show,
861         .store = deadline_frontmerges_store,
862 };
863 static struct deadline_fs_entry deadline_fifobatch_entry = {
864         .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
865         .show = deadline_fifobatch_show,
866         .store = deadline_fifobatch_store,
867 };
868
869 static struct attribute *default_attrs[] = {
870         &deadline_readexpire_entry.attr,
871         &deadline_writeexpire_entry.attr,
872         &deadline_writesstarved_entry.attr,
873         &deadline_frontmerges_entry.attr,
874         &deadline_fifobatch_entry.attr,
875         NULL,
876 };
877
878 #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
879
880 static ssize_t
881 deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
882 {
883         elevator_t *e = container_of(kobj, elevator_t, kobj);
884         struct deadline_fs_entry *entry = to_deadline(attr);
885
886         if (!entry->show)
887                 return -EIO;
888
889         return entry->show(e->elevator_data, page);
890 }
891
892 static ssize_t
893 deadline_attr_store(struct kobject *kobj, struct attribute *attr,
894                     const char *page, size_t length)
895 {
896         elevator_t *e = container_of(kobj, elevator_t, kobj);
897         struct deadline_fs_entry *entry = to_deadline(attr);
898
899         if (!entry->store)
900                 return -EIO;
901
902         return entry->store(e->elevator_data, page, length);
903 }
904
905 static struct sysfs_ops deadline_sysfs_ops = {
906         .show   = deadline_attr_show,
907         .store  = deadline_attr_store,
908 };
909
910 static struct kobj_type deadline_ktype = {
911         .sysfs_ops      = &deadline_sysfs_ops,
912         .default_attrs  = default_attrs,
913 };
914
915 static struct elevator_type iosched_deadline = {
916         .ops = {
917                 .elevator_merge_fn =            deadline_merge,
918                 .elevator_merged_fn =           deadline_merged_request,
919                 .elevator_merge_req_fn =        deadline_merged_requests,
920                 .elevator_next_req_fn =         deadline_next_request,
921                 .elevator_add_req_fn =          deadline_insert_request,
922                 .elevator_remove_req_fn =       deadline_remove_request,
923                 .elevator_queue_empty_fn =      deadline_queue_empty,
924                 .elevator_former_req_fn =       deadline_former_request,
925                 .elevator_latter_req_fn =       deadline_latter_request,
926                 .elevator_set_req_fn =          deadline_set_request,
927                 .elevator_put_req_fn =          deadline_put_request,
928                 .elevator_init_fn =             deadline_init_queue,
929                 .elevator_exit_fn =             deadline_exit_queue,
930         },
931
932         .elevator_ktype = &deadline_ktype,
933         .elevator_name = "deadline",
934         .elevator_owner = THIS_MODULE,
935 };
936
937 static int __init deadline_init(void)
938 {
939         int ret;
940
941         drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
942                                      0, 0, NULL, NULL);
943
944         if (!drq_pool)
945                 return -ENOMEM;
946
947         ret = elv_register(&iosched_deadline);
948         if (ret)
949                 kmem_cache_destroy(drq_pool);
950
951         return ret;
952 }
953
954 static void __exit deadline_exit(void)
955 {
956         kmem_cache_destroy(drq_pool);
957         elv_unregister(&iosched_deadline);
958 }
959
960 module_init(deadline_init);
961 module_exit(deadline_exit);
962
963 MODULE_AUTHOR("Jens Axboe");
964 MODULE_LICENSE("GPL");
965 MODULE_DESCRIPTION("deadline IO scheduler");