Merge tag 'kvm-s390-master-4.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/sched/clock.h>
12 #include <linux/blkdev.h>
13 #include <linux/elevator.h>
14 #include <linux/ktime.h>
15 #include <linux/rbtree.h>
16 #include <linux/ioprio.h>
17 #include <linux/blktrace_api.h>
18 #include <linux/blk-cgroup.h>
19 #include "blk.h"
20 #include "blk-wbt.h"
21
22 /*
23  * tunables
24  */
25 /* max queue in one round of service */
26 static const int cfq_quantum = 8;
27 static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
28 /* maximum backwards seek, in KiB */
29 static const int cfq_back_max = 16 * 1024;
30 /* penalty of a backwards seek */
31 static const int cfq_back_penalty = 2;
32 static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
33 static u64 cfq_slice_async = NSEC_PER_SEC / 25;
34 static const int cfq_slice_async_rq = 2;
35 static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
36 static u64 cfq_group_idle = NSEC_PER_SEC / 125;
37 static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
38 static const int cfq_hist_divisor = 4;
39
40 /*
41  * offset from end of queue service tree for idle class
42  */
43 #define CFQ_IDLE_DELAY          (NSEC_PER_SEC / 5)
44 /* offset from end of group service tree under time slice mode */
45 #define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
46 /* offset from end of group service under IOPS mode */
47 #define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
48
49 /*
50  * below this threshold, we consider thinktime immediate
51  */
52 #define CFQ_MIN_TT              (2 * NSEC_PER_SEC / HZ)
53
54 #define CFQ_SLICE_SCALE         (5)
55 #define CFQ_HW_QUEUE_MIN        (5)
56 #define CFQ_SERVICE_SHIFT       12
57
58 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
59 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
60 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
61 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
62
63 #define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
64 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
65 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
66
67 static struct kmem_cache *cfq_pool;
68
69 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
70 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
71 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
72
73 #define sample_valid(samples)   ((samples) > 80)
74 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
75
76 /* blkio-related constants */
77 #define CFQ_WEIGHT_LEGACY_MIN   10
78 #define CFQ_WEIGHT_LEGACY_DFL   500
79 #define CFQ_WEIGHT_LEGACY_MAX   1000
80
81 struct cfq_ttime {
82         u64 last_end_request;
83
84         u64 ttime_total;
85         u64 ttime_mean;
86         unsigned long ttime_samples;
87 };
88
89 /*
90  * Most of our rbtree usage is for sorting with min extraction, so
91  * if we cache the leftmost node we don't have to walk down the tree
92  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
93  * move this into the elevator for the rq sorting as well.
94  */
95 struct cfq_rb_root {
96         struct rb_root rb;
97         struct rb_node *left;
98         unsigned count;
99         u64 min_vdisktime;
100         struct cfq_ttime ttime;
101 };
102 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
103                         .ttime = {.last_end_request = ktime_get_ns(),},}
104
105 /*
106  * Per process-grouping structure
107  */
108 struct cfq_queue {
109         /* reference count */
110         int ref;
111         /* various state flags, see below */
112         unsigned int flags;
113         /* parent cfq_data */
114         struct cfq_data *cfqd;
115         /* service_tree member */
116         struct rb_node rb_node;
117         /* service_tree key */
118         u64 rb_key;
119         /* prio tree member */
120         struct rb_node p_node;
121         /* prio tree root we belong to, if any */
122         struct rb_root *p_root;
123         /* sorted list of pending requests */
124         struct rb_root sort_list;
125         /* if fifo isn't expired, next request to serve */
126         struct request *next_rq;
127         /* requests queued in sort_list */
128         int queued[2];
129         /* currently allocated requests */
130         int allocated[2];
131         /* fifo list of requests in sort_list */
132         struct list_head fifo;
133
134         /* time when queue got scheduled in to dispatch first request. */
135         u64 dispatch_start;
136         u64 allocated_slice;
137         u64 slice_dispatch;
138         /* time when first request from queue completed and slice started. */
139         u64 slice_start;
140         u64 slice_end;
141         s64 slice_resid;
142
143         /* pending priority requests */
144         int prio_pending;
145         /* number of requests that are on the dispatch list or inside driver */
146         int dispatched;
147
148         /* io prio of this group */
149         unsigned short ioprio, org_ioprio;
150         unsigned short ioprio_class, org_ioprio_class;
151
152         pid_t pid;
153
154         u32 seek_history;
155         sector_t last_request_pos;
156
157         struct cfq_rb_root *service_tree;
158         struct cfq_queue *new_cfqq;
159         struct cfq_group *cfqg;
160         /* Number of sectors dispatched from queue in single dispatch round */
161         unsigned long nr_sectors;
162 };
163
164 /*
165  * First index in the service_trees.
166  * IDLE is handled separately, so it has negative index
167  */
168 enum wl_class_t {
169         BE_WORKLOAD = 0,
170         RT_WORKLOAD = 1,
171         IDLE_WORKLOAD = 2,
172         CFQ_PRIO_NR,
173 };
174
175 /*
176  * Second index in the service_trees.
177  */
178 enum wl_type_t {
179         ASYNC_WORKLOAD = 0,
180         SYNC_NOIDLE_WORKLOAD = 1,
181         SYNC_WORKLOAD = 2
182 };
183
184 struct cfqg_stats {
185 #ifdef CONFIG_CFQ_GROUP_IOSCHED
186         /* number of ios merged */
187         struct blkg_rwstat              merged;
188         /* total time spent on device in ns, may not be accurate w/ queueing */
189         struct blkg_rwstat              service_time;
190         /* total time spent waiting in scheduler queue in ns */
191         struct blkg_rwstat              wait_time;
192         /* number of IOs queued up */
193         struct blkg_rwstat              queued;
194         /* total disk time and nr sectors dispatched by this group */
195         struct blkg_stat                time;
196 #ifdef CONFIG_DEBUG_BLK_CGROUP
197         /* time not charged to this cgroup */
198         struct blkg_stat                unaccounted_time;
199         /* sum of number of ios queued across all samples */
200         struct blkg_stat                avg_queue_size_sum;
201         /* count of samples taken for average */
202         struct blkg_stat                avg_queue_size_samples;
203         /* how many times this group has been removed from service tree */
204         struct blkg_stat                dequeue;
205         /* total time spent waiting for it to be assigned a timeslice. */
206         struct blkg_stat                group_wait_time;
207         /* time spent idling for this blkcg_gq */
208         struct blkg_stat                idle_time;
209         /* total time with empty current active q with other requests queued */
210         struct blkg_stat                empty_time;
211         /* fields after this shouldn't be cleared on stat reset */
212         uint64_t                        start_group_wait_time;
213         uint64_t                        start_idle_time;
214         uint64_t                        start_empty_time;
215         uint16_t                        flags;
216 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
217 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
218 };
219
220 /* Per-cgroup data */
221 struct cfq_group_data {
222         /* must be the first member */
223         struct blkcg_policy_data cpd;
224
225         unsigned int weight;
226         unsigned int leaf_weight;
227 };
228
229 /* This is per cgroup per device grouping structure */
230 struct cfq_group {
231         /* must be the first member */
232         struct blkg_policy_data pd;
233
234         /* group service_tree member */
235         struct rb_node rb_node;
236
237         /* group service_tree key */
238         u64 vdisktime;
239
240         /*
241          * The number of active cfqgs and sum of their weights under this
242          * cfqg.  This covers this cfqg's leaf_weight and all children's
243          * weights, but does not cover weights of further descendants.
244          *
245          * If a cfqg is on the service tree, it's active.  An active cfqg
246          * also activates its parent and contributes to the children_weight
247          * of the parent.
248          */
249         int nr_active;
250         unsigned int children_weight;
251
252         /*
253          * vfraction is the fraction of vdisktime that the tasks in this
254          * cfqg are entitled to.  This is determined by compounding the
255          * ratios walking up from this cfqg to the root.
256          *
257          * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
258          * vfractions on a service tree is approximately 1.  The sum may
259          * deviate a bit due to rounding errors and fluctuations caused by
260          * cfqgs entering and leaving the service tree.
261          */
262         unsigned int vfraction;
263
264         /*
265          * There are two weights - (internal) weight is the weight of this
266          * cfqg against the sibling cfqgs.  leaf_weight is the wight of
267          * this cfqg against the child cfqgs.  For the root cfqg, both
268          * weights are kept in sync for backward compatibility.
269          */
270         unsigned int weight;
271         unsigned int new_weight;
272         unsigned int dev_weight;
273
274         unsigned int leaf_weight;
275         unsigned int new_leaf_weight;
276         unsigned int dev_leaf_weight;
277
278         /* number of cfqq currently on this group */
279         int nr_cfqq;
280
281         /*
282          * Per group busy queues average. Useful for workload slice calc. We
283          * create the array for each prio class but at run time it is used
284          * only for RT and BE class and slot for IDLE class remains unused.
285          * This is primarily done to avoid confusion and a gcc warning.
286          */
287         unsigned int busy_queues_avg[CFQ_PRIO_NR];
288         /*
289          * rr lists of queues with requests. We maintain service trees for
290          * RT and BE classes. These trees are subdivided in subclasses
291          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
292          * class there is no subclassification and all the cfq queues go on
293          * a single tree service_tree_idle.
294          * Counts are embedded in the cfq_rb_root
295          */
296         struct cfq_rb_root service_trees[2][3];
297         struct cfq_rb_root service_tree_idle;
298
299         u64 saved_wl_slice;
300         enum wl_type_t saved_wl_type;
301         enum wl_class_t saved_wl_class;
302
303         /* number of requests that are on the dispatch list or inside driver */
304         int dispatched;
305         struct cfq_ttime ttime;
306         struct cfqg_stats stats;        /* stats for this cfqg */
307
308         /* async queue for each priority case */
309         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
310         struct cfq_queue *async_idle_cfqq;
311
312 };
313
314 struct cfq_io_cq {
315         struct io_cq            icq;            /* must be the first member */
316         struct cfq_queue        *cfqq[2];
317         struct cfq_ttime        ttime;
318         int                     ioprio;         /* the current ioprio */
319 #ifdef CONFIG_CFQ_GROUP_IOSCHED
320         uint64_t                blkcg_serial_nr; /* the current blkcg serial */
321 #endif
322 };
323
324 /*
325  * Per block device queue structure
326  */
327 struct cfq_data {
328         struct request_queue *queue;
329         /* Root service tree for cfq_groups */
330         struct cfq_rb_root grp_service_tree;
331         struct cfq_group *root_group;
332
333         /*
334          * The priority currently being served
335          */
336         enum wl_class_t serving_wl_class;
337         enum wl_type_t serving_wl_type;
338         u64 workload_expires;
339         struct cfq_group *serving_group;
340
341         /*
342          * Each priority tree is sorted by next_request position.  These
343          * trees are used when determining if two or more queues are
344          * interleaving requests (see cfq_close_cooperator).
345          */
346         struct rb_root prio_trees[CFQ_PRIO_LISTS];
347
348         unsigned int busy_queues;
349         unsigned int busy_sync_queues;
350
351         int rq_in_driver;
352         int rq_in_flight[2];
353
354         /*
355          * queue-depth detection
356          */
357         int rq_queued;
358         int hw_tag;
359         /*
360          * hw_tag can be
361          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
362          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
363          *  0 => no NCQ
364          */
365         int hw_tag_est_depth;
366         unsigned int hw_tag_samples;
367
368         /*
369          * idle window management
370          */
371         struct hrtimer idle_slice_timer;
372         struct work_struct unplug_work;
373
374         struct cfq_queue *active_queue;
375         struct cfq_io_cq *active_cic;
376
377         sector_t last_position;
378
379         /*
380          * tunables, see top of file
381          */
382         unsigned int cfq_quantum;
383         unsigned int cfq_back_penalty;
384         unsigned int cfq_back_max;
385         unsigned int cfq_slice_async_rq;
386         unsigned int cfq_latency;
387         u64 cfq_fifo_expire[2];
388         u64 cfq_slice[2];
389         u64 cfq_slice_idle;
390         u64 cfq_group_idle;
391         u64 cfq_target_latency;
392
393         /*
394          * Fallback dummy cfqq for extreme OOM conditions
395          */
396         struct cfq_queue oom_cfqq;
397
398         u64 last_delayed_sync;
399 };
400
401 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
402 static void cfq_put_queue(struct cfq_queue *cfqq);
403
404 static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
405                                             enum wl_class_t class,
406                                             enum wl_type_t type)
407 {
408         if (!cfqg)
409                 return NULL;
410
411         if (class == IDLE_WORKLOAD)
412                 return &cfqg->service_tree_idle;
413
414         return &cfqg->service_trees[class][type];
415 }
416
417 enum cfqq_state_flags {
418         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
419         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
420         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
421         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
422         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
423         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
424         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
425         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
426         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
427         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
428         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
429         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
430         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
431 };
432
433 #define CFQ_CFQQ_FNS(name)                                              \
434 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
435 {                                                                       \
436         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
437 }                                                                       \
438 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
439 {                                                                       \
440         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
441 }                                                                       \
442 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
443 {                                                                       \
444         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
445 }
446
447 CFQ_CFQQ_FNS(on_rr);
448 CFQ_CFQQ_FNS(wait_request);
449 CFQ_CFQQ_FNS(must_dispatch);
450 CFQ_CFQQ_FNS(must_alloc_slice);
451 CFQ_CFQQ_FNS(fifo_expire);
452 CFQ_CFQQ_FNS(idle_window);
453 CFQ_CFQQ_FNS(prio_changed);
454 CFQ_CFQQ_FNS(slice_new);
455 CFQ_CFQQ_FNS(sync);
456 CFQ_CFQQ_FNS(coop);
457 CFQ_CFQQ_FNS(split_coop);
458 CFQ_CFQQ_FNS(deep);
459 CFQ_CFQQ_FNS(wait_busy);
460 #undef CFQ_CFQQ_FNS
461
462 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
463
464 /* cfqg stats flags */
465 enum cfqg_stats_flags {
466         CFQG_stats_waiting = 0,
467         CFQG_stats_idling,
468         CFQG_stats_empty,
469 };
470
471 #define CFQG_FLAG_FNS(name)                                             \
472 static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)     \
473 {                                                                       \
474         stats->flags |= (1 << CFQG_stats_##name);                       \
475 }                                                                       \
476 static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)    \
477 {                                                                       \
478         stats->flags &= ~(1 << CFQG_stats_##name);                      \
479 }                                                                       \
480 static inline int cfqg_stats_##name(struct cfqg_stats *stats)           \
481 {                                                                       \
482         return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
483 }                                                                       \
484
485 CFQG_FLAG_FNS(waiting)
486 CFQG_FLAG_FNS(idling)
487 CFQG_FLAG_FNS(empty)
488 #undef CFQG_FLAG_FNS
489
490 /* This should be called with the queue_lock held. */
491 static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
492 {
493         unsigned long long now;
494
495         if (!cfqg_stats_waiting(stats))
496                 return;
497
498         now = sched_clock();
499         if (time_after64(now, stats->start_group_wait_time))
500                 blkg_stat_add(&stats->group_wait_time,
501                               now - stats->start_group_wait_time);
502         cfqg_stats_clear_waiting(stats);
503 }
504
505 /* This should be called with the queue_lock held. */
506 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
507                                                  struct cfq_group *curr_cfqg)
508 {
509         struct cfqg_stats *stats = &cfqg->stats;
510
511         if (cfqg_stats_waiting(stats))
512                 return;
513         if (cfqg == curr_cfqg)
514                 return;
515         stats->start_group_wait_time = sched_clock();
516         cfqg_stats_mark_waiting(stats);
517 }
518
519 /* This should be called with the queue_lock held. */
520 static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
521 {
522         unsigned long long now;
523
524         if (!cfqg_stats_empty(stats))
525                 return;
526
527         now = sched_clock();
528         if (time_after64(now, stats->start_empty_time))
529                 blkg_stat_add(&stats->empty_time,
530                               now - stats->start_empty_time);
531         cfqg_stats_clear_empty(stats);
532 }
533
534 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
535 {
536         blkg_stat_add(&cfqg->stats.dequeue, 1);
537 }
538
539 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
540 {
541         struct cfqg_stats *stats = &cfqg->stats;
542
543         if (blkg_rwstat_total(&stats->queued))
544                 return;
545
546         /*
547          * group is already marked empty. This can happen if cfqq got new
548          * request in parent group and moved to this group while being added
549          * to service tree. Just ignore the event and move on.
550          */
551         if (cfqg_stats_empty(stats))
552                 return;
553
554         stats->start_empty_time = sched_clock();
555         cfqg_stats_mark_empty(stats);
556 }
557
558 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
559 {
560         struct cfqg_stats *stats = &cfqg->stats;
561
562         if (cfqg_stats_idling(stats)) {
563                 unsigned long long now = sched_clock();
564
565                 if (time_after64(now, stats->start_idle_time))
566                         blkg_stat_add(&stats->idle_time,
567                                       now - stats->start_idle_time);
568                 cfqg_stats_clear_idling(stats);
569         }
570 }
571
572 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
573 {
574         struct cfqg_stats *stats = &cfqg->stats;
575
576         BUG_ON(cfqg_stats_idling(stats));
577
578         stats->start_idle_time = sched_clock();
579         cfqg_stats_mark_idling(stats);
580 }
581
582 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
583 {
584         struct cfqg_stats *stats = &cfqg->stats;
585
586         blkg_stat_add(&stats->avg_queue_size_sum,
587                       blkg_rwstat_total(&stats->queued));
588         blkg_stat_add(&stats->avg_queue_size_samples, 1);
589         cfqg_stats_update_group_wait_time(stats);
590 }
591
592 #else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
593
594 static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
595 static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
596 static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
597 static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
598 static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
599 static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
600 static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
601
602 #endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
603
604 #ifdef CONFIG_CFQ_GROUP_IOSCHED
605
606 static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
607 {
608         return pd ? container_of(pd, struct cfq_group, pd) : NULL;
609 }
610
611 static struct cfq_group_data
612 *cpd_to_cfqgd(struct blkcg_policy_data *cpd)
613 {
614         return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
615 }
616
617 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
618 {
619         return pd_to_blkg(&cfqg->pd);
620 }
621
622 static struct blkcg_policy blkcg_policy_cfq;
623
624 static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
625 {
626         return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
627 }
628
629 static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
630 {
631         return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
632 }
633
634 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
635 {
636         struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
637
638         return pblkg ? blkg_to_cfqg(pblkg) : NULL;
639 }
640
641 static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
642                                       struct cfq_group *ancestor)
643 {
644         return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
645                                     cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
646 }
647
648 static inline void cfqg_get(struct cfq_group *cfqg)
649 {
650         return blkg_get(cfqg_to_blkg(cfqg));
651 }
652
653 static inline void cfqg_put(struct cfq_group *cfqg)
654 {
655         return blkg_put(cfqg_to_blkg(cfqg));
656 }
657
658 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  do {                    \
659         char __pbuf[128];                                               \
660                                                                         \
661         blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
662         blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
663                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
664                         cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
665                           __pbuf, ##args);                              \
666 } while (0)
667
668 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)  do {                    \
669         char __pbuf[128];                                               \
670                                                                         \
671         blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));          \
672         blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);    \
673 } while (0)
674
675 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
676                                             struct cfq_group *curr_cfqg,
677                                             unsigned int op)
678 {
679         blkg_rwstat_add(&cfqg->stats.queued, op, 1);
680         cfqg_stats_end_empty_time(&cfqg->stats);
681         cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
682 }
683
684 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
685                         uint64_t time, unsigned long unaccounted_time)
686 {
687         blkg_stat_add(&cfqg->stats.time, time);
688 #ifdef CONFIG_DEBUG_BLK_CGROUP
689         blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
690 #endif
691 }
692
693 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
694                                                unsigned int op)
695 {
696         blkg_rwstat_add(&cfqg->stats.queued, op, -1);
697 }
698
699 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
700                                                unsigned int op)
701 {
702         blkg_rwstat_add(&cfqg->stats.merged, op, 1);
703 }
704
705 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
706                         uint64_t start_time, uint64_t io_start_time,
707                         unsigned int op)
708 {
709         struct cfqg_stats *stats = &cfqg->stats;
710         unsigned long long now = sched_clock();
711
712         if (time_after64(now, io_start_time))
713                 blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
714         if (time_after64(io_start_time, start_time))
715                 blkg_rwstat_add(&stats->wait_time, op,
716                                 io_start_time - start_time);
717 }
718
719 /* @stats = 0 */
720 static void cfqg_stats_reset(struct cfqg_stats *stats)
721 {
722         /* queued stats shouldn't be cleared */
723         blkg_rwstat_reset(&stats->merged);
724         blkg_rwstat_reset(&stats->service_time);
725         blkg_rwstat_reset(&stats->wait_time);
726         blkg_stat_reset(&stats->time);
727 #ifdef CONFIG_DEBUG_BLK_CGROUP
728         blkg_stat_reset(&stats->unaccounted_time);
729         blkg_stat_reset(&stats->avg_queue_size_sum);
730         blkg_stat_reset(&stats->avg_queue_size_samples);
731         blkg_stat_reset(&stats->dequeue);
732         blkg_stat_reset(&stats->group_wait_time);
733         blkg_stat_reset(&stats->idle_time);
734         blkg_stat_reset(&stats->empty_time);
735 #endif
736 }
737
738 /* @to += @from */
739 static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
740 {
741         /* queued stats shouldn't be cleared */
742         blkg_rwstat_add_aux(&to->merged, &from->merged);
743         blkg_rwstat_add_aux(&to->service_time, &from->service_time);
744         blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
745         blkg_stat_add_aux(&from->time, &from->time);
746 #ifdef CONFIG_DEBUG_BLK_CGROUP
747         blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
748         blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
749         blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
750         blkg_stat_add_aux(&to->dequeue, &from->dequeue);
751         blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
752         blkg_stat_add_aux(&to->idle_time, &from->idle_time);
753         blkg_stat_add_aux(&to->empty_time, &from->empty_time);
754 #endif
755 }
756
757 /*
758  * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
759  * recursive stats can still account for the amount used by this cfqg after
760  * it's gone.
761  */
762 static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
763 {
764         struct cfq_group *parent = cfqg_parent(cfqg);
765
766         lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
767
768         if (unlikely(!parent))
769                 return;
770
771         cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
772         cfqg_stats_reset(&cfqg->stats);
773 }
774
775 #else   /* CONFIG_CFQ_GROUP_IOSCHED */
776
777 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
778 static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
779                                       struct cfq_group *ancestor)
780 {
781         return true;
782 }
783 static inline void cfqg_get(struct cfq_group *cfqg) { }
784 static inline void cfqg_put(struct cfq_group *cfqg) { }
785
786 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
787         blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
788                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
789                         cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
790                                 ##args)
791 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
792
793 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
794                         struct cfq_group *curr_cfqg, unsigned int op) { }
795 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
796                         uint64_t time, unsigned long unaccounted_time) { }
797 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
798                         unsigned int op) { }
799 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
800                         unsigned int op) { }
801 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
802                         uint64_t start_time, uint64_t io_start_time,
803                         unsigned int op) { }
804
805 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
806
807 #define cfq_log(cfqd, fmt, args...)     \
808         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
809
810 /* Traverses through cfq group service trees */
811 #define for_each_cfqg_st(cfqg, i, j, st) \
812         for (i = 0; i <= IDLE_WORKLOAD; i++) \
813                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
814                         : &cfqg->service_tree_idle; \
815                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
816                         (i == IDLE_WORKLOAD && j == 0); \
817                         j++, st = i < IDLE_WORKLOAD ? \
818                         &cfqg->service_trees[i][j]: NULL) \
819
820 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
821         struct cfq_ttime *ttime, bool group_idle)
822 {
823         u64 slice;
824         if (!sample_valid(ttime->ttime_samples))
825                 return false;
826         if (group_idle)
827                 slice = cfqd->cfq_group_idle;
828         else
829                 slice = cfqd->cfq_slice_idle;
830         return ttime->ttime_mean > slice;
831 }
832
833 static inline bool iops_mode(struct cfq_data *cfqd)
834 {
835         /*
836          * If we are not idling on queues and it is a NCQ drive, parallel
837          * execution of requests is on and measuring time is not possible
838          * in most of the cases until and unless we drive shallower queue
839          * depths and that becomes a performance bottleneck. In such cases
840          * switch to start providing fairness in terms of number of IOs.
841          */
842         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
843                 return true;
844         else
845                 return false;
846 }
847
848 static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
849 {
850         if (cfq_class_idle(cfqq))
851                 return IDLE_WORKLOAD;
852         if (cfq_class_rt(cfqq))
853                 return RT_WORKLOAD;
854         return BE_WORKLOAD;
855 }
856
857
858 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
859 {
860         if (!cfq_cfqq_sync(cfqq))
861                 return ASYNC_WORKLOAD;
862         if (!cfq_cfqq_idle_window(cfqq))
863                 return SYNC_NOIDLE_WORKLOAD;
864         return SYNC_WORKLOAD;
865 }
866
867 static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
868                                         struct cfq_data *cfqd,
869                                         struct cfq_group *cfqg)
870 {
871         if (wl_class == IDLE_WORKLOAD)
872                 return cfqg->service_tree_idle.count;
873
874         return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
875                 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
876                 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
877 }
878
879 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
880                                         struct cfq_group *cfqg)
881 {
882         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
883                 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
884 }
885
886 static void cfq_dispatch_insert(struct request_queue *, struct request *);
887 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
888                                        struct cfq_io_cq *cic, struct bio *bio);
889
890 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
891 {
892         /* cic->icq is the first member, %NULL will convert to %NULL */
893         return container_of(icq, struct cfq_io_cq, icq);
894 }
895
896 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
897                                                struct io_context *ioc)
898 {
899         if (ioc)
900                 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
901         return NULL;
902 }
903
904 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
905 {
906         return cic->cfqq[is_sync];
907 }
908
909 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
910                                 bool is_sync)
911 {
912         cic->cfqq[is_sync] = cfqq;
913 }
914
915 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
916 {
917         return cic->icq.q->elevator->elevator_data;
918 }
919
920 /*
921  * scheduler run of queue, if there are requests pending and no one in the
922  * driver that will restart queueing
923  */
924 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
925 {
926         if (cfqd->busy_queues) {
927                 cfq_log(cfqd, "schedule dispatch");
928                 kblockd_schedule_work(&cfqd->unplug_work);
929         }
930 }
931
932 /*
933  * Scale schedule slice based on io priority. Use the sync time slice only
934  * if a queue is marked sync and has sync io queued. A sync queue with async
935  * io only, should not get full sync slice length.
936  */
937 static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync,
938                                  unsigned short prio)
939 {
940         u64 base_slice = cfqd->cfq_slice[sync];
941         u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE);
942
943         WARN_ON(prio >= IOPRIO_BE_NR);
944
945         return base_slice + (slice * (4 - prio));
946 }
947
948 static inline u64
949 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
950 {
951         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
952 }
953
954 /**
955  * cfqg_scale_charge - scale disk time charge according to cfqg weight
956  * @charge: disk time being charged
957  * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
958  *
959  * Scale @charge according to @vfraction, which is in range (0, 1].  The
960  * scaling is inversely proportional.
961  *
962  * scaled = charge / vfraction
963  *
964  * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
965  */
966 static inline u64 cfqg_scale_charge(u64 charge,
967                                     unsigned int vfraction)
968 {
969         u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
970
971         /* charge / vfraction */
972         c <<= CFQ_SERVICE_SHIFT;
973         return div_u64(c, vfraction);
974 }
975
976 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
977 {
978         s64 delta = (s64)(vdisktime - min_vdisktime);
979         if (delta > 0)
980                 min_vdisktime = vdisktime;
981
982         return min_vdisktime;
983 }
984
985 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
986 {
987         s64 delta = (s64)(vdisktime - min_vdisktime);
988         if (delta < 0)
989                 min_vdisktime = vdisktime;
990
991         return min_vdisktime;
992 }
993
994 static void update_min_vdisktime(struct cfq_rb_root *st)
995 {
996         struct cfq_group *cfqg;
997
998         if (st->left) {
999                 cfqg = rb_entry_cfqg(st->left);
1000                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
1001                                                   cfqg->vdisktime);
1002         }
1003 }
1004
1005 /*
1006  * get averaged number of queues of RT/BE priority.
1007  * average is updated, with a formula that gives more weight to higher numbers,
1008  * to quickly follows sudden increases and decrease slowly
1009  */
1010
1011 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1012                                         struct cfq_group *cfqg, bool rt)
1013 {
1014         unsigned min_q, max_q;
1015         unsigned mult  = cfq_hist_divisor - 1;
1016         unsigned round = cfq_hist_divisor / 2;
1017         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1018
1019         min_q = min(cfqg->busy_queues_avg[rt], busy);
1020         max_q = max(cfqg->busy_queues_avg[rt], busy);
1021         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1022                 cfq_hist_divisor;
1023         return cfqg->busy_queues_avg[rt];
1024 }
1025
1026 static inline u64
1027 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1028 {
1029         return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1030 }
1031
1032 static inline u64
1033 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1034 {
1035         u64 slice = cfq_prio_to_slice(cfqd, cfqq);
1036         if (cfqd->cfq_latency) {
1037                 /*
1038                  * interested queues (we consider only the ones with the same
1039                  * priority class in the cfq group)
1040                  */
1041                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1042                                                 cfq_class_rt(cfqq));
1043                 u64 sync_slice = cfqd->cfq_slice[1];
1044                 u64 expect_latency = sync_slice * iq;
1045                 u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1046
1047                 if (expect_latency > group_slice) {
1048                         u64 base_low_slice = 2 * cfqd->cfq_slice_idle;
1049                         u64 low_slice;
1050
1051                         /* scale low_slice according to IO priority
1052                          * and sync vs async */
1053                         low_slice = div64_u64(base_low_slice*slice, sync_slice);
1054                         low_slice = min(slice, low_slice);
1055                         /* the adapted slice value is scaled to fit all iqs
1056                          * into the target latency */
1057                         slice = div64_u64(slice*group_slice, expect_latency);
1058                         slice = max(slice, low_slice);
1059                 }
1060         }
1061         return slice;
1062 }
1063
1064 static inline void
1065 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1066 {
1067         u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1068         u64 now = ktime_get_ns();
1069
1070         cfqq->slice_start = now;
1071         cfqq->slice_end = now + slice;
1072         cfqq->allocated_slice = slice;
1073         cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now);
1074 }
1075
1076 /*
1077  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1078  * isn't valid until the first request from the dispatch is activated
1079  * and the slice time set.
1080  */
1081 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1082 {
1083         if (cfq_cfqq_slice_new(cfqq))
1084                 return false;
1085         if (ktime_get_ns() < cfqq->slice_end)
1086                 return false;
1087
1088         return true;
1089 }
1090
1091 /*
1092  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1093  * We choose the request that is closest to the head right now. Distance
1094  * behind the head is penalized and only allowed to a certain extent.
1095  */
1096 static struct request *
1097 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1098 {
1099         sector_t s1, s2, d1 = 0, d2 = 0;
1100         unsigned long back_max;
1101 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
1102 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
1103         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1104
1105         if (rq1 == NULL || rq1 == rq2)
1106                 return rq2;
1107         if (rq2 == NULL)
1108                 return rq1;
1109
1110         if (rq_is_sync(rq1) != rq_is_sync(rq2))
1111                 return rq_is_sync(rq1) ? rq1 : rq2;
1112
1113         if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1114                 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1115
1116         s1 = blk_rq_pos(rq1);
1117         s2 = blk_rq_pos(rq2);
1118
1119         /*
1120          * by definition, 1KiB is 2 sectors
1121          */
1122         back_max = cfqd->cfq_back_max * 2;
1123
1124         /*
1125          * Strict one way elevator _except_ in the case where we allow
1126          * short backward seeks which are biased as twice the cost of a
1127          * similar forward seek.
1128          */
1129         if (s1 >= last)
1130                 d1 = s1 - last;
1131         else if (s1 + back_max >= last)
1132                 d1 = (last - s1) * cfqd->cfq_back_penalty;
1133         else
1134                 wrap |= CFQ_RQ1_WRAP;
1135
1136         if (s2 >= last)
1137                 d2 = s2 - last;
1138         else if (s2 + back_max >= last)
1139                 d2 = (last - s2) * cfqd->cfq_back_penalty;
1140         else
1141                 wrap |= CFQ_RQ2_WRAP;
1142
1143         /* Found required data */
1144
1145         /*
1146          * By doing switch() on the bit mask "wrap" we avoid having to
1147          * check two variables for all permutations: --> faster!
1148          */
1149         switch (wrap) {
1150         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1151                 if (d1 < d2)
1152                         return rq1;
1153                 else if (d2 < d1)
1154                         return rq2;
1155                 else {
1156                         if (s1 >= s2)
1157                                 return rq1;
1158                         else
1159                                 return rq2;
1160                 }
1161
1162         case CFQ_RQ2_WRAP:
1163                 return rq1;
1164         case CFQ_RQ1_WRAP:
1165                 return rq2;
1166         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1167         default:
1168                 /*
1169                  * Since both rqs are wrapped,
1170                  * start with the one that's further behind head
1171                  * (--> only *one* back seek required),
1172                  * since back seek takes more time than forward.
1173                  */
1174                 if (s1 <= s2)
1175                         return rq1;
1176                 else
1177                         return rq2;
1178         }
1179 }
1180
1181 /*
1182  * The below is leftmost cache rbtree addon
1183  */
1184 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1185 {
1186         /* Service tree is empty */
1187         if (!root->count)
1188                 return NULL;
1189
1190         if (!root->left)
1191                 root->left = rb_first(&root->rb);
1192
1193         if (root->left)
1194                 return rb_entry(root->left, struct cfq_queue, rb_node);
1195
1196         return NULL;
1197 }
1198
1199 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1200 {
1201         if (!root->left)
1202                 root->left = rb_first(&root->rb);
1203
1204         if (root->left)
1205                 return rb_entry_cfqg(root->left);
1206
1207         return NULL;
1208 }
1209
1210 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1211 {
1212         rb_erase(n, root);
1213         RB_CLEAR_NODE(n);
1214 }
1215
1216 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1217 {
1218         if (root->left == n)
1219                 root->left = NULL;
1220         rb_erase_init(n, &root->rb);
1221         --root->count;
1222 }
1223
1224 /*
1225  * would be nice to take fifo expire time into account as well
1226  */
1227 static struct request *
1228 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1229                   struct request *last)
1230 {
1231         struct rb_node *rbnext = rb_next(&last->rb_node);
1232         struct rb_node *rbprev = rb_prev(&last->rb_node);
1233         struct request *next = NULL, *prev = NULL;
1234
1235         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1236
1237         if (rbprev)
1238                 prev = rb_entry_rq(rbprev);
1239
1240         if (rbnext)
1241                 next = rb_entry_rq(rbnext);
1242         else {
1243                 rbnext = rb_first(&cfqq->sort_list);
1244                 if (rbnext && rbnext != &last->rb_node)
1245                         next = rb_entry_rq(rbnext);
1246         }
1247
1248         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1249 }
1250
1251 static u64 cfq_slice_offset(struct cfq_data *cfqd,
1252                             struct cfq_queue *cfqq)
1253 {
1254         /*
1255          * just an approximation, should be ok.
1256          */
1257         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1258                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1259 }
1260
1261 static inline s64
1262 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1263 {
1264         return cfqg->vdisktime - st->min_vdisktime;
1265 }
1266
1267 static void
1268 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1269 {
1270         struct rb_node **node = &st->rb.rb_node;
1271         struct rb_node *parent = NULL;
1272         struct cfq_group *__cfqg;
1273         s64 key = cfqg_key(st, cfqg);
1274         int left = 1;
1275
1276         while (*node != NULL) {
1277                 parent = *node;
1278                 __cfqg = rb_entry_cfqg(parent);
1279
1280                 if (key < cfqg_key(st, __cfqg))
1281                         node = &parent->rb_left;
1282                 else {
1283                         node = &parent->rb_right;
1284                         left = 0;
1285                 }
1286         }
1287
1288         if (left)
1289                 st->left = &cfqg->rb_node;
1290
1291         rb_link_node(&cfqg->rb_node, parent, node);
1292         rb_insert_color(&cfqg->rb_node, &st->rb);
1293 }
1294
1295 /*
1296  * This has to be called only on activation of cfqg
1297  */
1298 static void
1299 cfq_update_group_weight(struct cfq_group *cfqg)
1300 {
1301         if (cfqg->new_weight) {
1302                 cfqg->weight = cfqg->new_weight;
1303                 cfqg->new_weight = 0;
1304         }
1305 }
1306
1307 static void
1308 cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1309 {
1310         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1311
1312         if (cfqg->new_leaf_weight) {
1313                 cfqg->leaf_weight = cfqg->new_leaf_weight;
1314                 cfqg->new_leaf_weight = 0;
1315         }
1316 }
1317
1318 static void
1319 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1320 {
1321         unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;      /* start with 1 */
1322         struct cfq_group *pos = cfqg;
1323         struct cfq_group *parent;
1324         bool propagate;
1325
1326         /* add to the service tree */
1327         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1328
1329         /*
1330          * Update leaf_weight.  We cannot update weight at this point
1331          * because cfqg might already have been activated and is
1332          * contributing its current weight to the parent's child_weight.
1333          */
1334         cfq_update_group_leaf_weight(cfqg);
1335         __cfq_group_service_tree_add(st, cfqg);
1336
1337         /*
1338          * Activate @cfqg and calculate the portion of vfraction @cfqg is
1339          * entitled to.  vfraction is calculated by walking the tree
1340          * towards the root calculating the fraction it has at each level.
1341          * The compounded ratio is how much vfraction @cfqg owns.
1342          *
1343          * Start with the proportion tasks in this cfqg has against active
1344          * children cfqgs - its leaf_weight against children_weight.
1345          */
1346         propagate = !pos->nr_active++;
1347         pos->children_weight += pos->leaf_weight;
1348         vfr = vfr * pos->leaf_weight / pos->children_weight;
1349
1350         /*
1351          * Compound ->weight walking up the tree.  Both activation and
1352          * vfraction calculation are done in the same loop.  Propagation
1353          * stops once an already activated node is met.  vfraction
1354          * calculation should always continue to the root.
1355          */
1356         while ((parent = cfqg_parent(pos))) {
1357                 if (propagate) {
1358                         cfq_update_group_weight(pos);
1359                         propagate = !parent->nr_active++;
1360                         parent->children_weight += pos->weight;
1361                 }
1362                 vfr = vfr * pos->weight / parent->children_weight;
1363                 pos = parent;
1364         }
1365
1366         cfqg->vfraction = max_t(unsigned, vfr, 1);
1367 }
1368
1369 static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
1370 {
1371         if (!iops_mode(cfqd))
1372                 return CFQ_SLICE_MODE_GROUP_DELAY;
1373         else
1374                 return CFQ_IOPS_MODE_GROUP_DELAY;
1375 }
1376
1377 static void
1378 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1379 {
1380         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1381         struct cfq_group *__cfqg;
1382         struct rb_node *n;
1383
1384         cfqg->nr_cfqq++;
1385         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1386                 return;
1387
1388         /*
1389          * Currently put the group at the end. Later implement something
1390          * so that groups get lesser vtime based on their weights, so that
1391          * if group does not loose all if it was not continuously backlogged.
1392          */
1393         n = rb_last(&st->rb);
1394         if (n) {
1395                 __cfqg = rb_entry_cfqg(n);
1396                 cfqg->vdisktime = __cfqg->vdisktime +
1397                         cfq_get_cfqg_vdisktime_delay(cfqd);
1398         } else
1399                 cfqg->vdisktime = st->min_vdisktime;
1400         cfq_group_service_tree_add(st, cfqg);
1401 }
1402
1403 static void
1404 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1405 {
1406         struct cfq_group *pos = cfqg;
1407         bool propagate;
1408
1409         /*
1410          * Undo activation from cfq_group_service_tree_add().  Deactivate
1411          * @cfqg and propagate deactivation upwards.
1412          */
1413         propagate = !--pos->nr_active;
1414         pos->children_weight -= pos->leaf_weight;
1415
1416         while (propagate) {
1417                 struct cfq_group *parent = cfqg_parent(pos);
1418
1419                 /* @pos has 0 nr_active at this point */
1420                 WARN_ON_ONCE(pos->children_weight);
1421                 pos->vfraction = 0;
1422
1423                 if (!parent)
1424                         break;
1425
1426                 propagate = !--parent->nr_active;
1427                 parent->children_weight -= pos->weight;
1428                 pos = parent;
1429         }
1430
1431         /* remove from the service tree */
1432         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1433                 cfq_rb_erase(&cfqg->rb_node, st);
1434 }
1435
1436 static void
1437 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1438 {
1439         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1440
1441         BUG_ON(cfqg->nr_cfqq < 1);
1442         cfqg->nr_cfqq--;
1443
1444         /* If there are other cfq queues under this group, don't delete it */
1445         if (cfqg->nr_cfqq)
1446                 return;
1447
1448         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1449         cfq_group_service_tree_del(st, cfqg);
1450         cfqg->saved_wl_slice = 0;
1451         cfqg_stats_update_dequeue(cfqg);
1452 }
1453
1454 static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1455                                        u64 *unaccounted_time)
1456 {
1457         u64 slice_used;
1458         u64 now = ktime_get_ns();
1459
1460         /*
1461          * Queue got expired before even a single request completed or
1462          * got expired immediately after first request completion.
1463          */
1464         if (!cfqq->slice_start || cfqq->slice_start == now) {
1465                 /*
1466                  * Also charge the seek time incurred to the group, otherwise
1467                  * if there are mutiple queues in the group, each can dispatch
1468                  * a single request on seeky media and cause lots of seek time
1469                  * and group will never know it.
1470                  */
1471                 slice_used = max_t(u64, (now - cfqq->dispatch_start),
1472                                         jiffies_to_nsecs(1));
1473         } else {
1474                 slice_used = now - cfqq->slice_start;
1475                 if (slice_used > cfqq->allocated_slice) {
1476                         *unaccounted_time = slice_used - cfqq->allocated_slice;
1477                         slice_used = cfqq->allocated_slice;
1478                 }
1479                 if (cfqq->slice_start > cfqq->dispatch_start)
1480                         *unaccounted_time += cfqq->slice_start -
1481                                         cfqq->dispatch_start;
1482         }
1483
1484         return slice_used;
1485 }
1486
1487 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1488                                 struct cfq_queue *cfqq)
1489 {
1490         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1491         u64 used_sl, charge, unaccounted_sl = 0;
1492         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1493                         - cfqg->service_tree_idle.count;
1494         unsigned int vfr;
1495         u64 now = ktime_get_ns();
1496
1497         BUG_ON(nr_sync < 0);
1498         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1499
1500         if (iops_mode(cfqd))
1501                 charge = cfqq->slice_dispatch;
1502         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1503                 charge = cfqq->allocated_slice;
1504
1505         /*
1506          * Can't update vdisktime while on service tree and cfqg->vfraction
1507          * is valid only while on it.  Cache vfr, leave the service tree,
1508          * update vdisktime and go back on.  The re-addition to the tree
1509          * will also update the weights as necessary.
1510          */
1511         vfr = cfqg->vfraction;
1512         cfq_group_service_tree_del(st, cfqg);
1513         cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1514         cfq_group_service_tree_add(st, cfqg);
1515
1516         /* This group is being expired. Save the context */
1517         if (cfqd->workload_expires > now) {
1518                 cfqg->saved_wl_slice = cfqd->workload_expires - now;
1519                 cfqg->saved_wl_type = cfqd->serving_wl_type;
1520                 cfqg->saved_wl_class = cfqd->serving_wl_class;
1521         } else
1522                 cfqg->saved_wl_slice = 0;
1523
1524         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1525                                         st->min_vdisktime);
1526         cfq_log_cfqq(cfqq->cfqd, cfqq,
1527                      "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu",
1528                      used_sl, cfqq->slice_dispatch, charge,
1529                      iops_mode(cfqd), cfqq->nr_sectors);
1530         cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1531         cfqg_stats_set_start_empty_time(cfqg);
1532 }
1533
1534 /**
1535  * cfq_init_cfqg_base - initialize base part of a cfq_group
1536  * @cfqg: cfq_group to initialize
1537  *
1538  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1539  * is enabled or not.
1540  */
1541 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1542 {
1543         struct cfq_rb_root *st;
1544         int i, j;
1545
1546         for_each_cfqg_st(cfqg, i, j, st)
1547                 *st = CFQ_RB_ROOT;
1548         RB_CLEAR_NODE(&cfqg->rb_node);
1549
1550         cfqg->ttime.last_end_request = ktime_get_ns();
1551 }
1552
1553 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1554 static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1555                             bool on_dfl, bool reset_dev, bool is_leaf_weight);
1556
1557 static void cfqg_stats_exit(struct cfqg_stats *stats)
1558 {
1559         blkg_rwstat_exit(&stats->merged);
1560         blkg_rwstat_exit(&stats->service_time);
1561         blkg_rwstat_exit(&stats->wait_time);
1562         blkg_rwstat_exit(&stats->queued);
1563         blkg_stat_exit(&stats->time);
1564 #ifdef CONFIG_DEBUG_BLK_CGROUP
1565         blkg_stat_exit(&stats->unaccounted_time);
1566         blkg_stat_exit(&stats->avg_queue_size_sum);
1567         blkg_stat_exit(&stats->avg_queue_size_samples);
1568         blkg_stat_exit(&stats->dequeue);
1569         blkg_stat_exit(&stats->group_wait_time);
1570         blkg_stat_exit(&stats->idle_time);
1571         blkg_stat_exit(&stats->empty_time);
1572 #endif
1573 }
1574
1575 static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1576 {
1577         if (blkg_rwstat_init(&stats->merged, gfp) ||
1578             blkg_rwstat_init(&stats->service_time, gfp) ||
1579             blkg_rwstat_init(&stats->wait_time, gfp) ||
1580             blkg_rwstat_init(&stats->queued, gfp) ||
1581             blkg_stat_init(&stats->time, gfp))
1582                 goto err;
1583
1584 #ifdef CONFIG_DEBUG_BLK_CGROUP
1585         if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1586             blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1587             blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1588             blkg_stat_init(&stats->dequeue, gfp) ||
1589             blkg_stat_init(&stats->group_wait_time, gfp) ||
1590             blkg_stat_init(&stats->idle_time, gfp) ||
1591             blkg_stat_init(&stats->empty_time, gfp))
1592                 goto err;
1593 #endif
1594         return 0;
1595 err:
1596         cfqg_stats_exit(stats);
1597         return -ENOMEM;
1598 }
1599
1600 static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1601 {
1602         struct cfq_group_data *cgd;
1603
1604         cgd = kzalloc(sizeof(*cgd), gfp);
1605         if (!cgd)
1606                 return NULL;
1607         return &cgd->cpd;
1608 }
1609
1610 static void cfq_cpd_init(struct blkcg_policy_data *cpd)
1611 {
1612         struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
1613         unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
1614                               CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1615
1616         if (cpd_to_blkcg(cpd) == &blkcg_root)
1617                 weight *= 2;
1618
1619         cgd->weight = weight;
1620         cgd->leaf_weight = weight;
1621 }
1622
1623 static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1624 {
1625         kfree(cpd_to_cfqgd(cpd));
1626 }
1627
1628 static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
1629 {
1630         struct blkcg *blkcg = cpd_to_blkcg(cpd);
1631         bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
1632         unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1633
1634         if (blkcg == &blkcg_root)
1635                 weight *= 2;
1636
1637         WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
1638         WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
1639 }
1640
1641 static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1642 {
1643         struct cfq_group *cfqg;
1644
1645         cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1646         if (!cfqg)
1647                 return NULL;
1648
1649         cfq_init_cfqg_base(cfqg);
1650         if (cfqg_stats_init(&cfqg->stats, gfp)) {
1651                 kfree(cfqg);
1652                 return NULL;
1653         }
1654
1655         return &cfqg->pd;
1656 }
1657
1658 static void cfq_pd_init(struct blkg_policy_data *pd)
1659 {
1660         struct cfq_group *cfqg = pd_to_cfqg(pd);
1661         struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
1662
1663         cfqg->weight = cgd->weight;
1664         cfqg->leaf_weight = cgd->leaf_weight;
1665 }
1666
1667 static void cfq_pd_offline(struct blkg_policy_data *pd)
1668 {
1669         struct cfq_group *cfqg = pd_to_cfqg(pd);
1670         int i;
1671
1672         for (i = 0; i < IOPRIO_BE_NR; i++) {
1673                 if (cfqg->async_cfqq[0][i])
1674                         cfq_put_queue(cfqg->async_cfqq[0][i]);
1675                 if (cfqg->async_cfqq[1][i])
1676                         cfq_put_queue(cfqg->async_cfqq[1][i]);
1677         }
1678
1679         if (cfqg->async_idle_cfqq)
1680                 cfq_put_queue(cfqg->async_idle_cfqq);
1681
1682         /*
1683          * @blkg is going offline and will be ignored by
1684          * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1685          * that they don't get lost.  If IOs complete after this point, the
1686          * stats for them will be lost.  Oh well...
1687          */
1688         cfqg_stats_xfer_dead(cfqg);
1689 }
1690
1691 static void cfq_pd_free(struct blkg_policy_data *pd)
1692 {
1693         struct cfq_group *cfqg = pd_to_cfqg(pd);
1694
1695         cfqg_stats_exit(&cfqg->stats);
1696         return kfree(cfqg);
1697 }
1698
1699 static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
1700 {
1701         struct cfq_group *cfqg = pd_to_cfqg(pd);
1702
1703         cfqg_stats_reset(&cfqg->stats);
1704 }
1705
1706 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1707                                          struct blkcg *blkcg)
1708 {
1709         struct blkcg_gq *blkg;
1710
1711         blkg = blkg_lookup(blkcg, cfqd->queue);
1712         if (likely(blkg))
1713                 return blkg_to_cfqg(blkg);
1714         return NULL;
1715 }
1716
1717 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1718 {
1719         cfqq->cfqg = cfqg;
1720         /* cfqq reference on cfqg */
1721         cfqg_get(cfqg);
1722 }
1723
1724 static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1725                                      struct blkg_policy_data *pd, int off)
1726 {
1727         struct cfq_group *cfqg = pd_to_cfqg(pd);
1728
1729         if (!cfqg->dev_weight)
1730                 return 0;
1731         return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1732 }
1733
1734 static int cfqg_print_weight_device(struct seq_file *sf, void *v)
1735 {
1736         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1737                           cfqg_prfill_weight_device, &blkcg_policy_cfq,
1738                           0, false);
1739         return 0;
1740 }
1741
1742 static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1743                                           struct blkg_policy_data *pd, int off)
1744 {
1745         struct cfq_group *cfqg = pd_to_cfqg(pd);
1746
1747         if (!cfqg->dev_leaf_weight)
1748                 return 0;
1749         return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1750 }
1751
1752 static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
1753 {
1754         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1755                           cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1756                           0, false);
1757         return 0;
1758 }
1759
1760 static int cfq_print_weight(struct seq_file *sf, void *v)
1761 {
1762         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1763         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1764         unsigned int val = 0;
1765
1766         if (cgd)
1767                 val = cgd->weight;
1768
1769         seq_printf(sf, "%u\n", val);
1770         return 0;
1771 }
1772
1773 static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1774 {
1775         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1776         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1777         unsigned int val = 0;
1778
1779         if (cgd)
1780                 val = cgd->leaf_weight;
1781
1782         seq_printf(sf, "%u\n", val);
1783         return 0;
1784 }
1785
1786 static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1787                                         char *buf, size_t nbytes, loff_t off,
1788                                         bool on_dfl, bool is_leaf_weight)
1789 {
1790         unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1791         unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
1792         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1793         struct blkg_conf_ctx ctx;
1794         struct cfq_group *cfqg;
1795         struct cfq_group_data *cfqgd;
1796         int ret;
1797         u64 v;
1798
1799         ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1800         if (ret)
1801                 return ret;
1802
1803         if (sscanf(ctx.body, "%llu", &v) == 1) {
1804                 /* require "default" on dfl */
1805                 ret = -ERANGE;
1806                 if (!v && on_dfl)
1807                         goto out_finish;
1808         } else if (!strcmp(strim(ctx.body), "default")) {
1809                 v = 0;
1810         } else {
1811                 ret = -EINVAL;
1812                 goto out_finish;
1813         }
1814
1815         cfqg = blkg_to_cfqg(ctx.blkg);
1816         cfqgd = blkcg_to_cfqgd(blkcg);
1817
1818         ret = -ERANGE;
1819         if (!v || (v >= min && v <= max)) {
1820                 if (!is_leaf_weight) {
1821                         cfqg->dev_weight = v;
1822                         cfqg->new_weight = v ?: cfqgd->weight;
1823                 } else {
1824                         cfqg->dev_leaf_weight = v;
1825                         cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
1826                 }
1827                 ret = 0;
1828         }
1829 out_finish:
1830         blkg_conf_finish(&ctx);
1831         return ret ?: nbytes;
1832 }
1833
1834 static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1835                                       char *buf, size_t nbytes, loff_t off)
1836 {
1837         return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
1838 }
1839
1840 static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1841                                            char *buf, size_t nbytes, loff_t off)
1842 {
1843         return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
1844 }
1845
1846 static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1847                             bool on_dfl, bool reset_dev, bool is_leaf_weight)
1848 {
1849         unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1850         unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
1851         struct blkcg *blkcg = css_to_blkcg(css);
1852         struct blkcg_gq *blkg;
1853         struct cfq_group_data *cfqgd;
1854         int ret = 0;
1855
1856         if (val < min || val > max)
1857                 return -ERANGE;
1858
1859         spin_lock_irq(&blkcg->lock);
1860         cfqgd = blkcg_to_cfqgd(blkcg);
1861         if (!cfqgd) {
1862                 ret = -EINVAL;
1863                 goto out;
1864         }
1865
1866         if (!is_leaf_weight)
1867                 cfqgd->weight = val;
1868         else
1869                 cfqgd->leaf_weight = val;
1870
1871         hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1872                 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1873
1874                 if (!cfqg)
1875                         continue;
1876
1877                 if (!is_leaf_weight) {
1878                         if (reset_dev)
1879                                 cfqg->dev_weight = 0;
1880                         if (!cfqg->dev_weight)
1881                                 cfqg->new_weight = cfqgd->weight;
1882                 } else {
1883                         if (reset_dev)
1884                                 cfqg->dev_leaf_weight = 0;
1885                         if (!cfqg->dev_leaf_weight)
1886                                 cfqg->new_leaf_weight = cfqgd->leaf_weight;
1887                 }
1888         }
1889
1890 out:
1891         spin_unlock_irq(&blkcg->lock);
1892         return ret;
1893 }
1894
1895 static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1896                           u64 val)
1897 {
1898         return __cfq_set_weight(css, val, false, false, false);
1899 }
1900
1901 static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1902                                struct cftype *cft, u64 val)
1903 {
1904         return __cfq_set_weight(css, val, false, false, true);
1905 }
1906
1907 static int cfqg_print_stat(struct seq_file *sf, void *v)
1908 {
1909         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1910                           &blkcg_policy_cfq, seq_cft(sf)->private, false);
1911         return 0;
1912 }
1913
1914 static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1915 {
1916         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1917                           &blkcg_policy_cfq, seq_cft(sf)->private, true);
1918         return 0;
1919 }
1920
1921 static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1922                                       struct blkg_policy_data *pd, int off)
1923 {
1924         u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1925                                           &blkcg_policy_cfq, off);
1926         return __blkg_prfill_u64(sf, pd, sum);
1927 }
1928
1929 static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1930                                         struct blkg_policy_data *pd, int off)
1931 {
1932         struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1933                                                         &blkcg_policy_cfq, off);
1934         return __blkg_prfill_rwstat(sf, pd, &sum);
1935 }
1936
1937 static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
1938 {
1939         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1940                           cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1941                           seq_cft(sf)->private, false);
1942         return 0;
1943 }
1944
1945 static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1946 {
1947         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1948                           cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1949                           seq_cft(sf)->private, true);
1950         return 0;
1951 }
1952
1953 static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1954                                int off)
1955 {
1956         u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1957
1958         return __blkg_prfill_u64(sf, pd, sum >> 9);
1959 }
1960
1961 static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
1962 {
1963         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1964                           cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
1965         return 0;
1966 }
1967
1968 static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
1969                                          struct blkg_policy_data *pd, int off)
1970 {
1971         struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
1972                                         offsetof(struct blkcg_gq, stat_bytes));
1973         u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
1974                 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1975
1976         return __blkg_prfill_u64(sf, pd, sum >> 9);
1977 }
1978
1979 static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1980 {
1981         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1982                           cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
1983                           false);
1984         return 0;
1985 }
1986
1987 #ifdef CONFIG_DEBUG_BLK_CGROUP
1988 static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1989                                       struct blkg_policy_data *pd, int off)
1990 {
1991         struct cfq_group *cfqg = pd_to_cfqg(pd);
1992         u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1993         u64 v = 0;
1994
1995         if (samples) {
1996                 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1997                 v = div64_u64(v, samples);
1998         }
1999         __blkg_prfill_u64(sf, pd, v);
2000         return 0;
2001 }
2002
2003 /* print avg_queue_size */
2004 static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
2005 {
2006         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
2007                           cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
2008                           0, false);
2009         return 0;
2010 }
2011 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
2012
2013 static struct cftype cfq_blkcg_legacy_files[] = {
2014         /* on root, weight is mapped to leaf_weight */
2015         {
2016                 .name = "weight_device",
2017                 .flags = CFTYPE_ONLY_ON_ROOT,
2018                 .seq_show = cfqg_print_leaf_weight_device,
2019                 .write = cfqg_set_leaf_weight_device,
2020         },
2021         {
2022                 .name = "weight",
2023                 .flags = CFTYPE_ONLY_ON_ROOT,
2024                 .seq_show = cfq_print_leaf_weight,
2025                 .write_u64 = cfq_set_leaf_weight,
2026         },
2027
2028         /* no such mapping necessary for !roots */
2029         {
2030                 .name = "weight_device",
2031                 .flags = CFTYPE_NOT_ON_ROOT,
2032                 .seq_show = cfqg_print_weight_device,
2033                 .write = cfqg_set_weight_device,
2034         },
2035         {
2036                 .name = "weight",
2037                 .flags = CFTYPE_NOT_ON_ROOT,
2038                 .seq_show = cfq_print_weight,
2039                 .write_u64 = cfq_set_weight,
2040         },
2041
2042         {
2043                 .name = "leaf_weight_device",
2044                 .seq_show = cfqg_print_leaf_weight_device,
2045                 .write = cfqg_set_leaf_weight_device,
2046         },
2047         {
2048                 .name = "leaf_weight",
2049                 .seq_show = cfq_print_leaf_weight,
2050                 .write_u64 = cfq_set_leaf_weight,
2051         },
2052
2053         /* statistics, covers only the tasks in the cfqg */
2054         {
2055                 .name = "time",
2056                 .private = offsetof(struct cfq_group, stats.time),
2057                 .seq_show = cfqg_print_stat,
2058         },
2059         {
2060                 .name = "sectors",
2061                 .seq_show = cfqg_print_stat_sectors,
2062         },
2063         {
2064                 .name = "io_service_bytes",
2065                 .private = (unsigned long)&blkcg_policy_cfq,
2066                 .seq_show = blkg_print_stat_bytes,
2067         },
2068         {
2069                 .name = "io_serviced",
2070                 .private = (unsigned long)&blkcg_policy_cfq,
2071                 .seq_show = blkg_print_stat_ios,
2072         },
2073         {
2074                 .name = "io_service_time",
2075                 .private = offsetof(struct cfq_group, stats.service_time),
2076                 .seq_show = cfqg_print_rwstat,
2077         },
2078         {
2079                 .name = "io_wait_time",
2080                 .private = offsetof(struct cfq_group, stats.wait_time),
2081                 .seq_show = cfqg_print_rwstat,
2082         },
2083         {
2084                 .name = "io_merged",
2085                 .private = offsetof(struct cfq_group, stats.merged),
2086                 .seq_show = cfqg_print_rwstat,
2087         },
2088         {
2089                 .name = "io_queued",
2090                 .private = offsetof(struct cfq_group, stats.queued),
2091                 .seq_show = cfqg_print_rwstat,
2092         },
2093
2094         /* the same statictics which cover the cfqg and its descendants */
2095         {
2096                 .name = "time_recursive",
2097                 .private = offsetof(struct cfq_group, stats.time),
2098                 .seq_show = cfqg_print_stat_recursive,
2099         },
2100         {
2101                 .name = "sectors_recursive",
2102                 .seq_show = cfqg_print_stat_sectors_recursive,
2103         },
2104         {
2105                 .name = "io_service_bytes_recursive",
2106                 .private = (unsigned long)&blkcg_policy_cfq,
2107                 .seq_show = blkg_print_stat_bytes_recursive,
2108         },
2109         {
2110                 .name = "io_serviced_recursive",
2111                 .private = (unsigned long)&blkcg_policy_cfq,
2112                 .seq_show = blkg_print_stat_ios_recursive,
2113         },
2114         {
2115                 .name = "io_service_time_recursive",
2116                 .private = offsetof(struct cfq_group, stats.service_time),
2117                 .seq_show = cfqg_print_rwstat_recursive,
2118         },
2119         {
2120                 .name = "io_wait_time_recursive",
2121                 .private = offsetof(struct cfq_group, stats.wait_time),
2122                 .seq_show = cfqg_print_rwstat_recursive,
2123         },
2124         {
2125                 .name = "io_merged_recursive",
2126                 .private = offsetof(struct cfq_group, stats.merged),
2127                 .seq_show = cfqg_print_rwstat_recursive,
2128         },
2129         {
2130                 .name = "io_queued_recursive",
2131                 .private = offsetof(struct cfq_group, stats.queued),
2132                 .seq_show = cfqg_print_rwstat_recursive,
2133         },
2134 #ifdef CONFIG_DEBUG_BLK_CGROUP
2135         {
2136                 .name = "avg_queue_size",
2137                 .seq_show = cfqg_print_avg_queue_size,
2138         },
2139         {
2140                 .name = "group_wait_time",
2141                 .private = offsetof(struct cfq_group, stats.group_wait_time),
2142                 .seq_show = cfqg_print_stat,
2143         },
2144         {
2145                 .name = "idle_time",
2146                 .private = offsetof(struct cfq_group, stats.idle_time),
2147                 .seq_show = cfqg_print_stat,
2148         },
2149         {
2150                 .name = "empty_time",
2151                 .private = offsetof(struct cfq_group, stats.empty_time),
2152                 .seq_show = cfqg_print_stat,
2153         },
2154         {
2155                 .name = "dequeue",
2156                 .private = offsetof(struct cfq_group, stats.dequeue),
2157                 .seq_show = cfqg_print_stat,
2158         },
2159         {
2160                 .name = "unaccounted_time",
2161                 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2162                 .seq_show = cfqg_print_stat,
2163         },
2164 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
2165         { }     /* terminate */
2166 };
2167
2168 static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
2169 {
2170         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2171         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
2172
2173         seq_printf(sf, "default %u\n", cgd->weight);
2174         blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
2175                           &blkcg_policy_cfq, 0, false);
2176         return 0;
2177 }
2178
2179 static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
2180                                      char *buf, size_t nbytes, loff_t off)
2181 {
2182         char *endp;
2183         int ret;
2184         u64 v;
2185
2186         buf = strim(buf);
2187
2188         /* "WEIGHT" or "default WEIGHT" sets the default weight */
2189         v = simple_strtoull(buf, &endp, 0);
2190         if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
2191                 ret = __cfq_set_weight(of_css(of), v, true, false, false);
2192                 return ret ?: nbytes;
2193         }
2194
2195         /* "MAJ:MIN WEIGHT" */
2196         return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
2197 }
2198
2199 static struct cftype cfq_blkcg_files[] = {
2200         {
2201                 .name = "weight",
2202                 .flags = CFTYPE_NOT_ON_ROOT,
2203                 .seq_show = cfq_print_weight_on_dfl,
2204                 .write = cfq_set_weight_on_dfl,
2205         },
2206         { }     /* terminate */
2207 };
2208
2209 #else /* GROUP_IOSCHED */
2210 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2211                                          struct blkcg *blkcg)
2212 {
2213         return cfqd->root_group;
2214 }
2215
2216 static inline void
2217 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2218         cfqq->cfqg = cfqg;
2219 }
2220
2221 #endif /* GROUP_IOSCHED */
2222
2223 /*
2224  * The cfqd->service_trees holds all pending cfq_queue's that have
2225  * requests waiting to be processed. It is sorted in the order that
2226  * we will service the queues.
2227  */
2228 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2229                                  bool add_front)
2230 {
2231         struct rb_node **p, *parent;
2232         struct cfq_queue *__cfqq;
2233         u64 rb_key;
2234         struct cfq_rb_root *st;
2235         int left;
2236         int new_cfqq = 1;
2237         u64 now = ktime_get_ns();
2238
2239         st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2240         if (cfq_class_idle(cfqq)) {
2241                 rb_key = CFQ_IDLE_DELAY;
2242                 parent = rb_last(&st->rb);
2243                 if (parent && parent != &cfqq->rb_node) {
2244                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2245                         rb_key += __cfqq->rb_key;
2246                 } else
2247                         rb_key += now;
2248         } else if (!add_front) {
2249                 /*
2250                  * Get our rb key offset. Subtract any residual slice
2251                  * value carried from last service. A negative resid
2252                  * count indicates slice overrun, and this should position
2253                  * the next service time further away in the tree.
2254                  */
2255                 rb_key = cfq_slice_offset(cfqd, cfqq) + now;
2256                 rb_key -= cfqq->slice_resid;
2257                 cfqq->slice_resid = 0;
2258         } else {
2259                 rb_key = -NSEC_PER_SEC;
2260                 __cfqq = cfq_rb_first(st);
2261                 rb_key += __cfqq ? __cfqq->rb_key : now;
2262         }
2263
2264         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2265                 new_cfqq = 0;
2266                 /*
2267                  * same position, nothing more to do
2268                  */
2269                 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2270                         return;
2271
2272                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2273                 cfqq->service_tree = NULL;
2274         }
2275
2276         left = 1;
2277         parent = NULL;
2278         cfqq->service_tree = st;
2279         p = &st->rb.rb_node;
2280         while (*p) {
2281                 parent = *p;
2282                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2283
2284                 /*
2285                  * sort by key, that represents service time.
2286                  */
2287                 if (rb_key < __cfqq->rb_key)
2288                         p = &parent->rb_left;
2289                 else {
2290                         p = &parent->rb_right;
2291                         left = 0;
2292                 }
2293         }
2294
2295         if (left)
2296                 st->left = &cfqq->rb_node;
2297
2298         cfqq->rb_key = rb_key;
2299         rb_link_node(&cfqq->rb_node, parent, p);
2300         rb_insert_color(&cfqq->rb_node, &st->rb);
2301         st->count++;
2302         if (add_front || !new_cfqq)
2303                 return;
2304         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2305 }
2306
2307 static struct cfq_queue *
2308 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2309                      sector_t sector, struct rb_node **ret_parent,
2310                      struct rb_node ***rb_link)
2311 {
2312         struct rb_node **p, *parent;
2313         struct cfq_queue *cfqq = NULL;
2314
2315         parent = NULL;
2316         p = &root->rb_node;
2317         while (*p) {
2318                 struct rb_node **n;
2319
2320                 parent = *p;
2321                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2322
2323                 /*
2324                  * Sort strictly based on sector.  Smallest to the left,
2325                  * largest to the right.
2326                  */
2327                 if (sector > blk_rq_pos(cfqq->next_rq))
2328                         n = &(*p)->rb_right;
2329                 else if (sector < blk_rq_pos(cfqq->next_rq))
2330                         n = &(*p)->rb_left;
2331                 else
2332                         break;
2333                 p = n;
2334                 cfqq = NULL;
2335         }
2336
2337         *ret_parent = parent;
2338         if (rb_link)
2339                 *rb_link = p;
2340         return cfqq;
2341 }
2342
2343 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2344 {
2345         struct rb_node **p, *parent;
2346         struct cfq_queue *__cfqq;
2347
2348         if (cfqq->p_root) {
2349                 rb_erase(&cfqq->p_node, cfqq->p_root);
2350                 cfqq->p_root = NULL;
2351         }
2352
2353         if (cfq_class_idle(cfqq))
2354                 return;
2355         if (!cfqq->next_rq)
2356                 return;
2357
2358         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2359         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2360                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
2361         if (!__cfqq) {
2362                 rb_link_node(&cfqq->p_node, parent, p);
2363                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2364         } else
2365                 cfqq->p_root = NULL;
2366 }
2367
2368 /*
2369  * Update cfqq's position in the service tree.
2370  */
2371 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2372 {
2373         /*
2374          * Resorting requires the cfqq to be on the RR list already.
2375          */
2376         if (cfq_cfqq_on_rr(cfqq)) {
2377                 cfq_service_tree_add(cfqd, cfqq, 0);
2378                 cfq_prio_tree_add(cfqd, cfqq);
2379         }
2380 }
2381
2382 /*
2383  * add to busy list of queues for service, trying to be fair in ordering
2384  * the pending list according to last request service
2385  */
2386 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2387 {
2388         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2389         BUG_ON(cfq_cfqq_on_rr(cfqq));
2390         cfq_mark_cfqq_on_rr(cfqq);
2391         cfqd->busy_queues++;
2392         if (cfq_cfqq_sync(cfqq))
2393                 cfqd->busy_sync_queues++;
2394
2395         cfq_resort_rr_list(cfqd, cfqq);
2396 }
2397
2398 /*
2399  * Called when the cfqq no longer has requests pending, remove it from
2400  * the service tree.
2401  */
2402 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2403 {
2404         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2405         BUG_ON(!cfq_cfqq_on_rr(cfqq));
2406         cfq_clear_cfqq_on_rr(cfqq);
2407
2408         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2409                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2410                 cfqq->service_tree = NULL;
2411         }
2412         if (cfqq->p_root) {
2413                 rb_erase(&cfqq->p_node, cfqq->p_root);
2414                 cfqq->p_root = NULL;
2415         }
2416
2417         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2418         BUG_ON(!cfqd->busy_queues);
2419         cfqd->busy_queues--;
2420         if (cfq_cfqq_sync(cfqq))
2421                 cfqd->busy_sync_queues--;
2422 }
2423
2424 /*
2425  * rb tree support functions
2426  */
2427 static void cfq_del_rq_rb(struct request *rq)
2428 {
2429         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2430         const int sync = rq_is_sync(rq);
2431
2432         BUG_ON(!cfqq->queued[sync]);
2433         cfqq->queued[sync]--;
2434
2435         elv_rb_del(&cfqq->sort_list, rq);
2436
2437         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2438                 /*
2439                  * Queue will be deleted from service tree when we actually
2440                  * expire it later. Right now just remove it from prio tree
2441                  * as it is empty.
2442                  */
2443                 if (cfqq->p_root) {
2444                         rb_erase(&cfqq->p_node, cfqq->p_root);
2445                         cfqq->p_root = NULL;
2446                 }
2447         }
2448 }
2449
2450 static void cfq_add_rq_rb(struct request *rq)
2451 {
2452         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2453         struct cfq_data *cfqd = cfqq->cfqd;
2454         struct request *prev;
2455
2456         cfqq->queued[rq_is_sync(rq)]++;
2457
2458         elv_rb_add(&cfqq->sort_list, rq);
2459
2460         if (!cfq_cfqq_on_rr(cfqq))
2461                 cfq_add_cfqq_rr(cfqd, cfqq);
2462
2463         /*
2464          * check if this request is a better next-serve candidate
2465          */
2466         prev = cfqq->next_rq;
2467         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2468
2469         /*
2470          * adjust priority tree position, if ->next_rq changes
2471          */
2472         if (prev != cfqq->next_rq)
2473                 cfq_prio_tree_add(cfqd, cfqq);
2474
2475         BUG_ON(!cfqq->next_rq);
2476 }
2477
2478 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2479 {
2480         elv_rb_del(&cfqq->sort_list, rq);
2481         cfqq->queued[rq_is_sync(rq)]--;
2482         cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2483         cfq_add_rq_rb(rq);
2484         cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2485                                  rq->cmd_flags);
2486 }
2487
2488 static struct request *
2489 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2490 {
2491         struct task_struct *tsk = current;
2492         struct cfq_io_cq *cic;
2493         struct cfq_queue *cfqq;
2494
2495         cic = cfq_cic_lookup(cfqd, tsk->io_context);
2496         if (!cic)
2497                 return NULL;
2498
2499         cfqq = cic_to_cfqq(cic, op_is_sync(bio->bi_opf));
2500         if (cfqq)
2501                 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2502
2503         return NULL;
2504 }
2505
2506 static void cfq_activate_request(struct request_queue *q, struct request *rq)
2507 {
2508         struct cfq_data *cfqd = q->elevator->elevator_data;
2509
2510         cfqd->rq_in_driver++;
2511         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2512                                                 cfqd->rq_in_driver);
2513
2514         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2515 }
2516
2517 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2518 {
2519         struct cfq_data *cfqd = q->elevator->elevator_data;
2520
2521         WARN_ON(!cfqd->rq_in_driver);
2522         cfqd->rq_in_driver--;
2523         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2524                                                 cfqd->rq_in_driver);
2525 }
2526
2527 static void cfq_remove_request(struct request *rq)
2528 {
2529         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2530
2531         if (cfqq->next_rq == rq)
2532                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2533
2534         list_del_init(&rq->queuelist);
2535         cfq_del_rq_rb(rq);
2536
2537         cfqq->cfqd->rq_queued--;
2538         cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2539         if (rq->cmd_flags & REQ_PRIO) {
2540                 WARN_ON(!cfqq->prio_pending);
2541                 cfqq->prio_pending--;
2542         }
2543 }
2544
2545 static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
2546                      struct bio *bio)
2547 {
2548         struct cfq_data *cfqd = q->elevator->elevator_data;
2549         struct request *__rq;
2550
2551         __rq = cfq_find_rq_fmerge(cfqd, bio);
2552         if (__rq && elv_bio_merge_ok(__rq, bio)) {
2553                 *req = __rq;
2554                 return ELEVATOR_FRONT_MERGE;
2555         }
2556
2557         return ELEVATOR_NO_MERGE;
2558 }
2559
2560 static void cfq_merged_request(struct request_queue *q, struct request *req,
2561                                enum elv_merge type)
2562 {
2563         if (type == ELEVATOR_FRONT_MERGE) {
2564                 struct cfq_queue *cfqq = RQ_CFQQ(req);
2565
2566                 cfq_reposition_rq_rb(cfqq, req);
2567         }
2568 }
2569
2570 static void cfq_bio_merged(struct request_queue *q, struct request *req,
2571                                 struct bio *bio)
2572 {
2573         cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
2574 }
2575
2576 static void
2577 cfq_merged_requests(struct request_queue *q, struct request *rq,
2578                     struct request *next)
2579 {
2580         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2581         struct cfq_data *cfqd = q->elevator->elevator_data;
2582
2583         /*
2584          * reposition in fifo if next is older than rq
2585          */
2586         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2587             next->fifo_time < rq->fifo_time &&
2588             cfqq == RQ_CFQQ(next)) {
2589                 list_move(&rq->queuelist, &next->queuelist);
2590                 rq->fifo_time = next->fifo_time;
2591         }
2592
2593         if (cfqq->next_rq == next)
2594                 cfqq->next_rq = rq;
2595         cfq_remove_request(next);
2596         cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2597
2598         cfqq = RQ_CFQQ(next);
2599         /*
2600          * all requests of this queue are merged to other queues, delete it
2601          * from the service tree. If it's the active_queue,
2602          * cfq_dispatch_requests() will choose to expire it or do idle
2603          */
2604         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2605             cfqq != cfqd->active_queue)
2606                 cfq_del_cfqq_rr(cfqd, cfqq);
2607 }
2608
2609 static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2610                                struct bio *bio)
2611 {
2612         struct cfq_data *cfqd = q->elevator->elevator_data;
2613         bool is_sync = op_is_sync(bio->bi_opf);
2614         struct cfq_io_cq *cic;
2615         struct cfq_queue *cfqq;
2616
2617         /*
2618          * Disallow merge of a sync bio into an async request.
2619          */
2620         if (is_sync && !rq_is_sync(rq))
2621                 return false;
2622
2623         /*
2624          * Lookup the cfqq that this bio will be queued with and allow
2625          * merge only if rq is queued there.
2626          */
2627         cic = cfq_cic_lookup(cfqd, current->io_context);
2628         if (!cic)
2629                 return false;
2630
2631         cfqq = cic_to_cfqq(cic, is_sync);
2632         return cfqq == RQ_CFQQ(rq);
2633 }
2634
2635 static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq,
2636                               struct request *next)
2637 {
2638         return RQ_CFQQ(rq) == RQ_CFQQ(next);
2639 }
2640
2641 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2642 {
2643         hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
2644         cfqg_stats_update_idle_time(cfqq->cfqg);
2645 }
2646
2647 static void __cfq_set_active_queue(struct cfq_data *cfqd,
2648                                    struct cfq_queue *cfqq)
2649 {
2650         if (cfqq) {
2651                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2652                                 cfqd->serving_wl_class, cfqd->serving_wl_type);
2653                 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2654                 cfqq->slice_start = 0;
2655                 cfqq->dispatch_start = ktime_get_ns();
2656                 cfqq->allocated_slice = 0;
2657                 cfqq->slice_end = 0;
2658                 cfqq->slice_dispatch = 0;
2659                 cfqq->nr_sectors = 0;
2660
2661                 cfq_clear_cfqq_wait_request(cfqq);
2662                 cfq_clear_cfqq_must_dispatch(cfqq);
2663                 cfq_clear_cfqq_must_alloc_slice(cfqq);
2664                 cfq_clear_cfqq_fifo_expire(cfqq);
2665                 cfq_mark_cfqq_slice_new(cfqq);
2666
2667                 cfq_del_timer(cfqd, cfqq);
2668         }
2669
2670         cfqd->active_queue = cfqq;
2671 }
2672
2673 /*
2674  * current cfqq expired its slice (or was too idle), select new one
2675  */
2676 static void
2677 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2678                     bool timed_out)
2679 {
2680         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2681
2682         if (cfq_cfqq_wait_request(cfqq))
2683                 cfq_del_timer(cfqd, cfqq);
2684
2685         cfq_clear_cfqq_wait_request(cfqq);
2686         cfq_clear_cfqq_wait_busy(cfqq);
2687
2688         /*
2689          * If this cfqq is shared between multiple processes, check to
2690          * make sure that those processes are still issuing I/Os within
2691          * the mean seek distance.  If not, it may be time to break the
2692          * queues apart again.
2693          */
2694         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2695                 cfq_mark_cfqq_split_coop(cfqq);
2696
2697         /*
2698          * store what was left of this slice, if the queue idled/timed out
2699          */
2700         if (timed_out) {
2701                 if (cfq_cfqq_slice_new(cfqq))
2702                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2703                 else
2704                         cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
2705                 cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
2706         }
2707
2708         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2709
2710         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2711                 cfq_del_cfqq_rr(cfqd, cfqq);
2712
2713         cfq_resort_rr_list(cfqd, cfqq);
2714
2715         if (cfqq == cfqd->active_queue)
2716                 cfqd->active_queue = NULL;
2717
2718         if (cfqd->active_cic) {
2719                 put_io_context(cfqd->active_cic->icq.ioc);
2720                 cfqd->active_cic = NULL;
2721         }
2722 }
2723
2724 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2725 {
2726         struct cfq_queue *cfqq = cfqd->active_queue;
2727
2728         if (cfqq)
2729                 __cfq_slice_expired(cfqd, cfqq, timed_out);
2730 }
2731
2732 /*
2733  * Get next queue for service. Unless we have a queue preemption,
2734  * we'll simply select the first cfqq in the service tree.
2735  */
2736 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2737 {
2738         struct cfq_rb_root *st = st_for(cfqd->serving_group,
2739                         cfqd->serving_wl_class, cfqd->serving_wl_type);
2740
2741         if (!cfqd->rq_queued)
2742                 return NULL;
2743
2744         /* There is nothing to dispatch */
2745         if (!st)
2746                 return NULL;
2747         if (RB_EMPTY_ROOT(&st->rb))
2748                 return NULL;
2749         return cfq_rb_first(st);
2750 }
2751
2752 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2753 {
2754         struct cfq_group *cfqg;
2755         struct cfq_queue *cfqq;
2756         int i, j;
2757         struct cfq_rb_root *st;
2758
2759         if (!cfqd->rq_queued)
2760                 return NULL;
2761
2762         cfqg = cfq_get_next_cfqg(cfqd);
2763         if (!cfqg)
2764                 return NULL;
2765
2766         for_each_cfqg_st(cfqg, i, j, st) {
2767                 cfqq = cfq_rb_first(st);
2768                 if (cfqq)
2769                         return cfqq;
2770         }
2771         return NULL;
2772 }
2773
2774 /*
2775  * Get and set a new active queue for service.
2776  */
2777 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2778                                               struct cfq_queue *cfqq)
2779 {
2780         if (!cfqq)
2781                 cfqq = cfq_get_next_queue(cfqd);
2782
2783         __cfq_set_active_queue(cfqd, cfqq);
2784         return cfqq;
2785 }
2786
2787 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2788                                           struct request *rq)
2789 {
2790         if (blk_rq_pos(rq) >= cfqd->last_position)
2791                 return blk_rq_pos(rq) - cfqd->last_position;
2792         else
2793                 return cfqd->last_position - blk_rq_pos(rq);
2794 }
2795
2796 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2797                                struct request *rq)
2798 {
2799         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2800 }
2801
2802 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2803                                     struct cfq_queue *cur_cfqq)
2804 {
2805         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2806         struct rb_node *parent, *node;
2807         struct cfq_queue *__cfqq;
2808         sector_t sector = cfqd->last_position;
2809
2810         if (RB_EMPTY_ROOT(root))
2811                 return NULL;
2812
2813         /*
2814          * First, if we find a request starting at the end of the last
2815          * request, choose it.
2816          */
2817         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2818         if (__cfqq)
2819                 return __cfqq;
2820
2821         /*
2822          * If the exact sector wasn't found, the parent of the NULL leaf
2823          * will contain the closest sector.
2824          */
2825         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2826         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2827                 return __cfqq;
2828
2829         if (blk_rq_pos(__cfqq->next_rq) < sector)
2830                 node = rb_next(&__cfqq->p_node);
2831         else
2832                 node = rb_prev(&__cfqq->p_node);
2833         if (!node)
2834                 return NULL;
2835
2836         __cfqq = rb_entry(node, struct cfq_queue, p_node);
2837         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2838                 return __cfqq;
2839
2840         return NULL;
2841 }
2842
2843 /*
2844  * cfqd - obvious
2845  * cur_cfqq - passed in so that we don't decide that the current queue is
2846  *            closely cooperating with itself.
2847  *
2848  * So, basically we're assuming that that cur_cfqq has dispatched at least
2849  * one request, and that cfqd->last_position reflects a position on the disk
2850  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2851  * assumption.
2852  */
2853 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2854                                               struct cfq_queue *cur_cfqq)
2855 {
2856         struct cfq_queue *cfqq;
2857
2858         if (cfq_class_idle(cur_cfqq))
2859                 return NULL;
2860         if (!cfq_cfqq_sync(cur_cfqq))
2861                 return NULL;
2862         if (CFQQ_SEEKY(cur_cfqq))
2863                 return NULL;
2864
2865         /*
2866          * Don't search priority tree if it's the only queue in the group.
2867          */
2868         if (cur_cfqq->cfqg->nr_cfqq == 1)
2869                 return NULL;
2870
2871         /*
2872          * We should notice if some of the queues are cooperating, eg
2873          * working closely on the same area of the disk. In that case,
2874          * we can group them together and don't waste time idling.
2875          */
2876         cfqq = cfqq_close(cfqd, cur_cfqq);
2877         if (!cfqq)
2878                 return NULL;
2879
2880         /* If new queue belongs to different cfq_group, don't choose it */
2881         if (cur_cfqq->cfqg != cfqq->cfqg)
2882                 return NULL;
2883
2884         /*
2885          * It only makes sense to merge sync queues.
2886          */
2887         if (!cfq_cfqq_sync(cfqq))
2888                 return NULL;
2889         if (CFQQ_SEEKY(cfqq))
2890                 return NULL;
2891
2892         /*
2893          * Do not merge queues of different priority classes
2894          */
2895         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2896                 return NULL;
2897
2898         return cfqq;
2899 }
2900
2901 /*
2902  * Determine whether we should enforce idle window for this queue.
2903  */
2904
2905 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2906 {
2907         enum wl_class_t wl_class = cfqq_class(cfqq);
2908         struct cfq_rb_root *st = cfqq->service_tree;
2909
2910         BUG_ON(!st);
2911         BUG_ON(!st->count);
2912
2913         if (!cfqd->cfq_slice_idle)
2914                 return false;
2915
2916         /* We never do for idle class queues. */
2917         if (wl_class == IDLE_WORKLOAD)
2918                 return false;
2919
2920         /* We do for queues that were marked with idle window flag. */
2921         if (cfq_cfqq_idle_window(cfqq) &&
2922            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2923                 return true;
2924
2925         /*
2926          * Otherwise, we do only if they are the last ones
2927          * in their service tree.
2928          */
2929         if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2930            !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2931                 return true;
2932         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2933         return false;
2934 }
2935
2936 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2937 {
2938         struct cfq_queue *cfqq = cfqd->active_queue;
2939         struct cfq_rb_root *st = cfqq->service_tree;
2940         struct cfq_io_cq *cic;
2941         u64 sl, group_idle = 0;
2942         u64 now = ktime_get_ns();
2943
2944         /*
2945          * SSD device without seek penalty, disable idling. But only do so
2946          * for devices that support queuing, otherwise we still have a problem
2947          * with sync vs async workloads.
2948          */
2949         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2950                 return;
2951
2952         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2953         WARN_ON(cfq_cfqq_slice_new(cfqq));
2954
2955         /*
2956          * idle is disabled, either manually or by past process history
2957          */
2958         if (!cfq_should_idle(cfqd, cfqq)) {
2959                 /* no queue idling. Check for group idling */
2960                 if (cfqd->cfq_group_idle)
2961                         group_idle = cfqd->cfq_group_idle;
2962                 else
2963                         return;
2964         }
2965
2966         /*
2967          * still active requests from this queue, don't idle
2968          */
2969         if (cfqq->dispatched)
2970                 return;
2971
2972         /*
2973          * task has exited, don't wait
2974          */
2975         cic = cfqd->active_cic;
2976         if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2977                 return;
2978
2979         /*
2980          * If our average think time is larger than the remaining time
2981          * slice, then don't idle. This avoids overrunning the allotted
2982          * time slice.
2983          */
2984         if (sample_valid(cic->ttime.ttime_samples) &&
2985             (cfqq->slice_end - now < cic->ttime.ttime_mean)) {
2986                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu",
2987                              cic->ttime.ttime_mean);
2988                 return;
2989         }
2990
2991         /*
2992          * There are other queues in the group or this is the only group and
2993          * it has too big thinktime, don't do group idle.
2994          */
2995         if (group_idle &&
2996             (cfqq->cfqg->nr_cfqq > 1 ||
2997              cfq_io_thinktime_big(cfqd, &st->ttime, true)))
2998                 return;
2999
3000         cfq_mark_cfqq_wait_request(cfqq);
3001
3002         if (group_idle)
3003                 sl = cfqd->cfq_group_idle;
3004         else
3005                 sl = cfqd->cfq_slice_idle;
3006
3007         hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl),
3008                       HRTIMER_MODE_REL);
3009         cfqg_stats_set_start_idle_time(cfqq->cfqg);
3010         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl,
3011                         group_idle ? 1 : 0);
3012 }
3013
3014 /*
3015  * Move request from internal lists to the request queue dispatch list.
3016  */
3017 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
3018 {
3019         struct cfq_data *cfqd = q->elevator->elevator_data;
3020         struct cfq_queue *cfqq = RQ_CFQQ(rq);
3021
3022         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
3023
3024         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
3025         cfq_remove_request(rq);
3026         cfqq->dispatched++;
3027         (RQ_CFQG(rq))->dispatched++;
3028         elv_dispatch_sort(q, rq);
3029
3030         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
3031         cfqq->nr_sectors += blk_rq_sectors(rq);
3032 }
3033
3034 /*
3035  * return expired entry, or NULL to just start from scratch in rbtree
3036  */
3037 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
3038 {
3039         struct request *rq = NULL;
3040
3041         if (cfq_cfqq_fifo_expire(cfqq))
3042                 return NULL;
3043
3044         cfq_mark_cfqq_fifo_expire(cfqq);
3045
3046         if (list_empty(&cfqq->fifo))
3047                 return NULL;
3048
3049         rq = rq_entry_fifo(cfqq->fifo.next);
3050         if (ktime_get_ns() < rq->fifo_time)
3051                 rq = NULL;
3052
3053         return rq;
3054 }
3055
3056 static inline int
3057 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3058 {
3059         const int base_rq = cfqd->cfq_slice_async_rq;
3060
3061         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
3062
3063         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
3064 }
3065
3066 /*
3067  * Must be called with the queue_lock held.
3068  */
3069 static int cfqq_process_refs(struct cfq_queue *cfqq)
3070 {
3071         int process_refs, io_refs;
3072
3073         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
3074         process_refs = cfqq->ref - io_refs;
3075         BUG_ON(process_refs < 0);
3076         return process_refs;
3077 }
3078
3079 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
3080 {
3081         int process_refs, new_process_refs;
3082         struct cfq_queue *__cfqq;
3083
3084         /*
3085          * If there are no process references on the new_cfqq, then it is
3086          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
3087          * chain may have dropped their last reference (not just their
3088          * last process reference).
3089          */
3090         if (!cfqq_process_refs(new_cfqq))
3091                 return;
3092
3093         /* Avoid a circular list and skip interim queue merges */
3094         while ((__cfqq = new_cfqq->new_cfqq)) {
3095                 if (__cfqq == cfqq)
3096                         return;
3097                 new_cfqq = __cfqq;
3098         }
3099
3100         process_refs = cfqq_process_refs(cfqq);
3101         new_process_refs = cfqq_process_refs(new_cfqq);
3102         /*
3103          * If the process for the cfqq has gone away, there is no
3104          * sense in merging the queues.
3105          */
3106         if (process_refs == 0 || new_process_refs == 0)
3107                 return;
3108
3109         /*
3110          * Merge in the direction of the lesser amount of work.
3111          */
3112         if (new_process_refs >= process_refs) {
3113                 cfqq->new_cfqq = new_cfqq;
3114                 new_cfqq->ref += process_refs;
3115         } else {
3116                 new_cfqq->new_cfqq = cfqq;
3117                 cfqq->ref += new_process_refs;
3118         }
3119 }
3120
3121 static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3122                         struct cfq_group *cfqg, enum wl_class_t wl_class)
3123 {
3124         struct cfq_queue *queue;
3125         int i;
3126         bool key_valid = false;
3127         u64 lowest_key = 0;
3128         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3129
3130         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3131                 /* select the one with lowest rb_key */
3132                 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
3133                 if (queue &&
3134                     (!key_valid || queue->rb_key < lowest_key)) {
3135                         lowest_key = queue->rb_key;
3136                         cur_best = i;
3137                         key_valid = true;
3138                 }
3139         }
3140
3141         return cur_best;
3142 }
3143
3144 static void
3145 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
3146 {
3147         u64 slice;
3148         unsigned count;
3149         struct cfq_rb_root *st;
3150         u64 group_slice;
3151         enum wl_class_t original_class = cfqd->serving_wl_class;
3152         u64 now = ktime_get_ns();
3153
3154         /* Choose next priority. RT > BE > IDLE */
3155         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
3156                 cfqd->serving_wl_class = RT_WORKLOAD;
3157         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
3158                 cfqd->serving_wl_class = BE_WORKLOAD;
3159         else {
3160                 cfqd->serving_wl_class = IDLE_WORKLOAD;
3161                 cfqd->workload_expires = now + jiffies_to_nsecs(1);
3162                 return;
3163         }
3164
3165         if (original_class != cfqd->serving_wl_class)
3166                 goto new_workload;
3167
3168         /*
3169          * For RT and BE, we have to choose also the type
3170          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3171          * expiration time
3172          */
3173         st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3174         count = st->count;
3175
3176         /*
3177          * check workload expiration, and that we still have other queues ready
3178          */
3179         if (count && !(now > cfqd->workload_expires))
3180                 return;
3181
3182 new_workload:
3183         /* otherwise select new workload type */
3184         cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
3185                                         cfqd->serving_wl_class);
3186         st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3187         count = st->count;
3188
3189         /*
3190          * the workload slice is computed as a fraction of target latency
3191          * proportional to the number of queues in that workload, over
3192          * all the queues in the same priority class
3193          */
3194         group_slice = cfq_group_slice(cfqd, cfqg);
3195
3196         slice = div_u64(group_slice * count,
3197                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3198                       cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
3199                                         cfqg)));
3200
3201         if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
3202                 u64 tmp;
3203
3204                 /*
3205                  * Async queues are currently system wide. Just taking
3206                  * proportion of queues with-in same group will lead to higher
3207                  * async ratio system wide as generally root group is going
3208                  * to have higher weight. A more accurate thing would be to
3209                  * calculate system wide asnc/sync ratio.
3210                  */
3211                 tmp = cfqd->cfq_target_latency *
3212                         cfqg_busy_async_queues(cfqd, cfqg);
3213                 tmp = div_u64(tmp, cfqd->busy_queues);
3214                 slice = min_t(u64, slice, tmp);
3215
3216                 /* async workload slice is scaled down according to
3217                  * the sync/async slice ratio. */
3218                 slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]);
3219         } else
3220                 /* sync workload slice is at least 2 * cfq_slice_idle */
3221                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3222
3223         slice = max_t(u64, slice, CFQ_MIN_TT);
3224         cfq_log(cfqd, "workload slice:%llu", slice);
3225         cfqd->workload_expires = now + slice;
3226 }
3227
3228 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3229 {
3230         struct cfq_rb_root *st = &cfqd->grp_service_tree;
3231         struct cfq_group *cfqg;
3232
3233         if (RB_EMPTY_ROOT(&st->rb))
3234                 return NULL;
3235         cfqg = cfq_rb_first_group(st);
3236         update_min_vdisktime(st);
3237         return cfqg;
3238 }
3239
3240 static void cfq_choose_cfqg(struct cfq_data *cfqd)
3241 {
3242         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3243         u64 now = ktime_get_ns();
3244
3245         cfqd->serving_group = cfqg;
3246
3247         /* Restore the workload type data */
3248         if (cfqg->saved_wl_slice) {
3249                 cfqd->workload_expires = now + cfqg->saved_wl_slice;
3250                 cfqd->serving_wl_type = cfqg->saved_wl_type;
3251                 cfqd->serving_wl_class = cfqg->saved_wl_class;
3252         } else
3253                 cfqd->workload_expires = now - 1;
3254
3255         choose_wl_class_and_type(cfqd, cfqg);
3256 }
3257
3258 /*
3259  * Select a queue for service. If we have a current active queue,
3260  * check whether to continue servicing it, or retrieve and set a new one.
3261  */
3262 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3263 {
3264         struct cfq_queue *cfqq, *new_cfqq = NULL;
3265         u64 now = ktime_get_ns();
3266
3267         cfqq = cfqd->active_queue;
3268         if (!cfqq)
3269                 goto new_queue;
3270
3271         if (!cfqd->rq_queued)
3272                 return NULL;
3273
3274         /*
3275          * We were waiting for group to get backlogged. Expire the queue
3276          */
3277         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3278                 goto expire;
3279
3280         /*
3281          * The active queue has run out of time, expire it and select new.
3282          */
3283         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3284                 /*
3285                  * If slice had not expired at the completion of last request
3286                  * we might not have turned on wait_busy flag. Don't expire
3287                  * the queue yet. Allow the group to get backlogged.
3288                  *
3289                  * The very fact that we have used the slice, that means we
3290                  * have been idling all along on this queue and it should be
3291                  * ok to wait for this request to complete.
3292                  */
3293                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3294                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3295                         cfqq = NULL;
3296                         goto keep_queue;
3297                 } else
3298                         goto check_group_idle;
3299         }
3300
3301         /*
3302          * The active queue has requests and isn't expired, allow it to
3303          * dispatch.
3304          */
3305         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3306                 goto keep_queue;
3307
3308         /*
3309          * If another queue has a request waiting within our mean seek
3310          * distance, let it run.  The expire code will check for close
3311          * cooperators and put the close queue at the front of the service
3312          * tree.  If possible, merge the expiring queue with the new cfqq.
3313          */
3314         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3315         if (new_cfqq) {
3316                 if (!cfqq->new_cfqq)
3317                         cfq_setup_merge(cfqq, new_cfqq);
3318                 goto expire;
3319         }
3320
3321         /*
3322          * No requests pending. If the active queue still has requests in
3323          * flight or is idling for a new request, allow either of these
3324          * conditions to happen (or time out) before selecting a new queue.
3325          */
3326         if (hrtimer_active(&cfqd->idle_slice_timer)) {
3327                 cfqq = NULL;
3328                 goto keep_queue;
3329         }
3330
3331         /*
3332          * This is a deep seek queue, but the device is much faster than
3333          * the queue can deliver, don't idle
3334          **/
3335         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3336             (cfq_cfqq_slice_new(cfqq) ||
3337             (cfqq->slice_end - now > now - cfqq->slice_start))) {
3338                 cfq_clear_cfqq_deep(cfqq);
3339                 cfq_clear_cfqq_idle_window(cfqq);
3340         }
3341
3342         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3343                 cfqq = NULL;
3344                 goto keep_queue;
3345         }
3346
3347         /*
3348          * If group idle is enabled and there are requests dispatched from
3349          * this group, wait for requests to complete.
3350          */
3351 check_group_idle:
3352         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3353             cfqq->cfqg->dispatched &&
3354             !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3355                 cfqq = NULL;
3356                 goto keep_queue;
3357         }
3358
3359 expire:
3360         cfq_slice_expired(cfqd, 0);
3361 new_queue:
3362         /*
3363          * Current queue expired. Check if we have to switch to a new
3364          * service tree
3365          */
3366         if (!new_cfqq)
3367                 cfq_choose_cfqg(cfqd);
3368
3369         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3370 keep_queue:
3371         return cfqq;
3372 }
3373
3374 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3375 {
3376         int dispatched = 0;
3377
3378         while (cfqq->next_rq) {
3379                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3380                 dispatched++;
3381         }
3382
3383         BUG_ON(!list_empty(&cfqq->fifo));
3384
3385         /* By default cfqq is not expired if it is empty. Do it explicitly */
3386         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3387         return dispatched;
3388 }
3389
3390 /*
3391  * Drain our current requests. Used for barriers and when switching
3392  * io schedulers on-the-fly.
3393  */
3394 static int cfq_forced_dispatch(struct cfq_data *cfqd)
3395 {
3396         struct cfq_queue *cfqq;
3397         int dispatched = 0;
3398
3399         /* Expire the timeslice of the current active queue first */
3400         cfq_slice_expired(cfqd, 0);
3401         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3402                 __cfq_set_active_queue(cfqd, cfqq);
3403                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3404         }
3405
3406         BUG_ON(cfqd->busy_queues);
3407
3408         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3409         return dispatched;
3410 }
3411
3412 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3413         struct cfq_queue *cfqq)
3414 {
3415         u64 now = ktime_get_ns();
3416
3417         /* the queue hasn't finished any request, can't estimate */
3418         if (cfq_cfqq_slice_new(cfqq))
3419                 return true;
3420         if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end)
3421                 return true;
3422
3423         return false;
3424 }
3425
3426 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3427 {
3428         unsigned int max_dispatch;
3429
3430         if (cfq_cfqq_must_dispatch(cfqq))
3431                 return true;
3432
3433         /*
3434          * Drain async requests before we start sync IO
3435          */
3436         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3437                 return false;
3438
3439         /*
3440          * If this is an async queue and we have sync IO in flight, let it wait
3441          */
3442         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3443                 return false;
3444
3445         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3446         if (cfq_class_idle(cfqq))
3447                 max_dispatch = 1;
3448
3449         /*
3450          * Does this cfqq already have too much IO in flight?
3451          */
3452         if (cfqq->dispatched >= max_dispatch) {
3453                 bool promote_sync = false;
3454                 /*
3455                  * idle queue must always only have a single IO in flight
3456                  */
3457                 if (cfq_class_idle(cfqq))
3458                         return false;
3459
3460                 /*
3461                  * If there is only one sync queue
3462                  * we can ignore async queue here and give the sync
3463                  * queue no dispatch limit. The reason is a sync queue can
3464                  * preempt async queue, limiting the sync queue doesn't make
3465                  * sense. This is useful for aiostress test.
3466                  */
3467                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3468                         promote_sync = true;
3469
3470                 /*
3471                  * We have other queues, don't allow more IO from this one
3472                  */
3473                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3474                                 !promote_sync)
3475                         return false;
3476
3477                 /*
3478                  * Sole queue user, no limit
3479                  */
3480                 if (cfqd->busy_queues == 1 || promote_sync)
3481                         max_dispatch = -1;
3482                 else
3483                         /*
3484                          * Normally we start throttling cfqq when cfq_quantum/2
3485                          * requests have been dispatched. But we can drive
3486                          * deeper queue depths at the beginning of slice
3487                          * subjected to upper limit of cfq_quantum.
3488                          * */
3489                         max_dispatch = cfqd->cfq_quantum;
3490         }
3491
3492         /*
3493          * Async queues must wait a bit before being allowed dispatch.
3494          * We also ramp up the dispatch depth gradually for async IO,
3495          * based on the last sync IO we serviced
3496          */
3497         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3498                 u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync;
3499                 unsigned int depth;
3500
3501                 depth = div64_u64(last_sync, cfqd->cfq_slice[1]);
3502                 if (!depth && !cfqq->dispatched)
3503                         depth = 1;
3504                 if (depth < max_dispatch)
3505                         max_dispatch = depth;
3506         }
3507
3508         /*
3509          * If we're below the current max, allow a dispatch
3510          */
3511         return cfqq->dispatched < max_dispatch;
3512 }
3513
3514 /*
3515  * Dispatch a request from cfqq, moving them to the request queue
3516  * dispatch list.
3517  */
3518 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3519 {
3520         struct request *rq;
3521
3522         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3523
3524         rq = cfq_check_fifo(cfqq);
3525         if (rq)
3526                 cfq_mark_cfqq_must_dispatch(cfqq);
3527
3528         if (!cfq_may_dispatch(cfqd, cfqq))
3529                 return false;
3530
3531         /*
3532          * follow expired path, else get first next available
3533          */
3534         if (!rq)
3535                 rq = cfqq->next_rq;
3536         else
3537                 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
3538
3539         /*
3540          * insert request into driver dispatch list
3541          */
3542         cfq_dispatch_insert(cfqd->queue, rq);
3543
3544         if (!cfqd->active_cic) {
3545                 struct cfq_io_cq *cic = RQ_CIC(rq);
3546
3547                 atomic_long_inc(&cic->icq.ioc->refcount);
3548                 cfqd->active_cic = cic;
3549         }
3550
3551         return true;
3552 }
3553
3554 /*
3555  * Find the cfqq that we need to service and move a request from that to the
3556  * dispatch list
3557  */
3558 static int cfq_dispatch_requests(struct request_queue *q, int force)
3559 {
3560         struct cfq_data *cfqd = q->elevator->elevator_data;
3561         struct cfq_queue *cfqq;
3562
3563         if (!cfqd->busy_queues)
3564                 return 0;
3565
3566         if (unlikely(force))
3567                 return cfq_forced_dispatch(cfqd);
3568
3569         cfqq = cfq_select_queue(cfqd);
3570         if (!cfqq)
3571                 return 0;
3572
3573         /*
3574          * Dispatch a request from this cfqq, if it is allowed
3575          */
3576         if (!cfq_dispatch_request(cfqd, cfqq))
3577                 return 0;
3578
3579         cfqq->slice_dispatch++;
3580         cfq_clear_cfqq_must_dispatch(cfqq);
3581
3582         /*
3583          * expire an async queue immediately if it has used up its slice. idle
3584          * queue always expire after 1 dispatch round.
3585          */
3586         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3587             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3588             cfq_class_idle(cfqq))) {
3589                 cfqq->slice_end = ktime_get_ns() + 1;
3590                 cfq_slice_expired(cfqd, 0);
3591         }
3592
3593         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3594         return 1;
3595 }
3596
3597 /*
3598  * task holds one reference to the queue, dropped when task exits. each rq
3599  * in-flight on this queue also holds a reference, dropped when rq is freed.
3600  *
3601  * Each cfq queue took a reference on the parent group. Drop it now.
3602  * queue lock must be held here.
3603  */
3604 static void cfq_put_queue(struct cfq_queue *cfqq)
3605 {
3606         struct cfq_data *cfqd = cfqq->cfqd;
3607         struct cfq_group *cfqg;
3608
3609         BUG_ON(cfqq->ref <= 0);
3610
3611         cfqq->ref--;
3612         if (cfqq->ref)
3613                 return;
3614
3615         cfq_log_cfqq(cfqd, cfqq, "put_queue");
3616         BUG_ON(rb_first(&cfqq->sort_list));
3617         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3618         cfqg = cfqq->cfqg;
3619
3620         if (unlikely(cfqd->active_queue == cfqq)) {
3621                 __cfq_slice_expired(cfqd, cfqq, 0);
3622                 cfq_schedule_dispatch(cfqd);
3623         }
3624
3625         BUG_ON(cfq_cfqq_on_rr(cfqq));
3626         kmem_cache_free(cfq_pool, cfqq);
3627         cfqg_put(cfqg);
3628 }
3629
3630 static void cfq_put_cooperator(struct cfq_queue *cfqq)
3631 {
3632         struct cfq_queue *__cfqq, *next;
3633
3634         /*
3635          * If this queue was scheduled to merge with another queue, be
3636          * sure to drop the reference taken on that queue (and others in
3637          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3638          */
3639         __cfqq = cfqq->new_cfqq;
3640         while (__cfqq) {
3641                 if (__cfqq == cfqq) {
3642                         WARN(1, "cfqq->new_cfqq loop detected\n");
3643                         break;
3644                 }
3645                 next = __cfqq->new_cfqq;
3646                 cfq_put_queue(__cfqq);
3647                 __cfqq = next;
3648         }
3649 }
3650
3651 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3652 {
3653         if (unlikely(cfqq == cfqd->active_queue)) {
3654                 __cfq_slice_expired(cfqd, cfqq, 0);
3655                 cfq_schedule_dispatch(cfqd);
3656         }
3657
3658         cfq_put_cooperator(cfqq);
3659
3660         cfq_put_queue(cfqq);
3661 }
3662
3663 static void cfq_init_icq(struct io_cq *icq)
3664 {
3665         struct cfq_io_cq *cic = icq_to_cic(icq);
3666
3667         cic->ttime.last_end_request = ktime_get_ns();
3668 }
3669
3670 static void cfq_exit_icq(struct io_cq *icq)
3671 {
3672         struct cfq_io_cq *cic = icq_to_cic(icq);
3673         struct cfq_data *cfqd = cic_to_cfqd(cic);
3674
3675         if (cic_to_cfqq(cic, false)) {
3676                 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3677                 cic_set_cfqq(cic, NULL, false);
3678         }
3679
3680         if (cic_to_cfqq(cic, true)) {
3681                 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3682                 cic_set_cfqq(cic, NULL, true);
3683         }
3684 }
3685
3686 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3687 {
3688         struct task_struct *tsk = current;
3689         int ioprio_class;
3690
3691         if (!cfq_cfqq_prio_changed(cfqq))
3692                 return;
3693
3694         ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3695         switch (ioprio_class) {
3696         default:
3697                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3698         case IOPRIO_CLASS_NONE:
3699                 /*
3700                  * no prio set, inherit CPU scheduling settings
3701                  */
3702                 cfqq->ioprio = task_nice_ioprio(tsk);
3703                 cfqq->ioprio_class = task_nice_ioclass(tsk);
3704                 break;
3705         case IOPRIO_CLASS_RT:
3706                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3707                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3708                 break;
3709         case IOPRIO_CLASS_BE:
3710                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3711                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3712                 break;
3713         case IOPRIO_CLASS_IDLE:
3714                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3715                 cfqq->ioprio = 7;
3716                 cfq_clear_cfqq_idle_window(cfqq);
3717                 break;
3718         }
3719
3720         /*
3721          * keep track of original prio settings in case we have to temporarily
3722          * elevate the priority of this queue
3723          */
3724         cfqq->org_ioprio = cfqq->ioprio;
3725         cfqq->org_ioprio_class = cfqq->ioprio_class;
3726         cfq_clear_cfqq_prio_changed(cfqq);
3727 }
3728
3729 static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3730 {
3731         int ioprio = cic->icq.ioc->ioprio;
3732         struct cfq_data *cfqd = cic_to_cfqd(cic);
3733         struct cfq_queue *cfqq;
3734
3735         /*
3736          * Check whether ioprio has changed.  The condition may trigger
3737          * spuriously on a newly created cic but there's no harm.
3738          */
3739         if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3740                 return;
3741
3742         cfqq = cic_to_cfqq(cic, false);
3743         if (cfqq) {
3744                 cfq_put_queue(cfqq);
3745                 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
3746                 cic_set_cfqq(cic, cfqq, false);
3747         }
3748
3749         cfqq = cic_to_cfqq(cic, true);
3750         if (cfqq)
3751                 cfq_mark_cfqq_prio_changed(cfqq);
3752
3753         cic->ioprio = ioprio;
3754 }
3755
3756 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3757                           pid_t pid, bool is_sync)
3758 {
3759         RB_CLEAR_NODE(&cfqq->rb_node);
3760         RB_CLEAR_NODE(&cfqq->p_node);
3761         INIT_LIST_HEAD(&cfqq->fifo);
3762
3763         cfqq->ref = 0;
3764         cfqq->cfqd = cfqd;
3765
3766         cfq_mark_cfqq_prio_changed(cfqq);
3767
3768         if (is_sync) {
3769                 if (!cfq_class_idle(cfqq))
3770                         cfq_mark_cfqq_idle_window(cfqq);
3771                 cfq_mark_cfqq_sync(cfqq);
3772         }
3773         cfqq->pid = pid;
3774 }
3775
3776 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3777 static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3778 {
3779         struct cfq_data *cfqd = cic_to_cfqd(cic);
3780         struct cfq_queue *cfqq;
3781         uint64_t serial_nr;
3782
3783         rcu_read_lock();
3784         serial_nr = bio_blkcg(bio)->css.serial_nr;
3785         rcu_read_unlock();
3786
3787         /*
3788          * Check whether blkcg has changed.  The condition may trigger
3789          * spuriously on a newly created cic but there's no harm.
3790          */
3791         if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
3792                 return;
3793
3794         /*
3795          * Drop reference to queues.  New queues will be assigned in new
3796          * group upon arrival of fresh requests.
3797          */
3798         cfqq = cic_to_cfqq(cic, false);
3799         if (cfqq) {
3800                 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3801                 cic_set_cfqq(cic, NULL, false);
3802                 cfq_put_queue(cfqq);
3803         }
3804
3805         cfqq = cic_to_cfqq(cic, true);
3806         if (cfqq) {
3807                 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3808                 cic_set_cfqq(cic, NULL, true);
3809                 cfq_put_queue(cfqq);
3810         }
3811
3812         cic->blkcg_serial_nr = serial_nr;
3813 }
3814 #else
3815 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3816 {
3817 }
3818 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3819
3820 static struct cfq_queue **
3821 cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
3822 {
3823         switch (ioprio_class) {
3824         case IOPRIO_CLASS_RT:
3825                 return &cfqg->async_cfqq[0][ioprio];
3826         case IOPRIO_CLASS_NONE:
3827                 ioprio = IOPRIO_NORM;
3828                 /* fall through */
3829         case IOPRIO_CLASS_BE:
3830                 return &cfqg->async_cfqq[1][ioprio];
3831         case IOPRIO_CLASS_IDLE:
3832                 return &cfqg->async_idle_cfqq;
3833         default:
3834                 BUG();
3835         }
3836 }
3837
3838 static struct cfq_queue *
3839 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3840               struct bio *bio)
3841 {
3842         int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3843         int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3844         struct cfq_queue **async_cfqq = NULL;
3845         struct cfq_queue *cfqq;
3846         struct cfq_group *cfqg;
3847
3848         rcu_read_lock();
3849         cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
3850         if (!cfqg) {
3851                 cfqq = &cfqd->oom_cfqq;
3852                 goto out;
3853         }
3854
3855         if (!is_sync) {
3856                 if (!ioprio_valid(cic->ioprio)) {
3857                         struct task_struct *tsk = current;
3858                         ioprio = task_nice_ioprio(tsk);
3859                         ioprio_class = task_nice_ioclass(tsk);
3860                 }
3861                 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
3862                 cfqq = *async_cfqq;
3863                 if (cfqq)
3864                         goto out;
3865         }
3866
3867         cfqq = kmem_cache_alloc_node(cfq_pool,
3868                                      GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
3869                                      cfqd->queue->node);
3870         if (!cfqq) {
3871                 cfqq = &cfqd->oom_cfqq;
3872                 goto out;
3873         }
3874
3875         /* cfq_init_cfqq() assumes cfqq->ioprio_class is initialized. */
3876         cfqq->ioprio_class = IOPRIO_CLASS_NONE;
3877         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3878         cfq_init_prio_data(cfqq, cic);
3879         cfq_link_cfqq_cfqg(cfqq, cfqg);
3880         cfq_log_cfqq(cfqd, cfqq, "alloced");
3881
3882         if (async_cfqq) {
3883                 /* a new async queue is created, pin and remember */
3884                 cfqq->ref++;
3885                 *async_cfqq = cfqq;
3886         }
3887 out:
3888         cfqq->ref++;
3889         rcu_read_unlock();
3890         return cfqq;
3891 }
3892
3893 static void
3894 __cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle)
3895 {
3896         u64 elapsed = ktime_get_ns() - ttime->last_end_request;
3897         elapsed = min(elapsed, 2UL * slice_idle);
3898
3899         ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3900         ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8);
3901         ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
3902                                      ttime->ttime_samples);
3903 }
3904
3905 static void
3906 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3907                         struct cfq_io_cq *cic)
3908 {
3909         if (cfq_cfqq_sync(cfqq)) {
3910                 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3911                 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3912                         cfqd->cfq_slice_idle);
3913         }
3914 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3915         __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3916 #endif
3917 }
3918
3919 static void
3920 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3921                        struct request *rq)
3922 {
3923         sector_t sdist = 0;
3924         sector_t n_sec = blk_rq_sectors(rq);
3925         if (cfqq->last_request_pos) {
3926                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3927                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3928                 else
3929                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3930         }
3931
3932         cfqq->seek_history <<= 1;
3933         if (blk_queue_nonrot(cfqd->queue))
3934                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3935         else
3936                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3937 }
3938
3939 static inline bool req_noidle(struct request *req)
3940 {
3941         return req_op(req) == REQ_OP_WRITE &&
3942                 (req->cmd_flags & (REQ_SYNC | REQ_IDLE)) == REQ_SYNC;
3943 }
3944
3945 /*
3946  * Disable idle window if the process thinks too long or seeks so much that
3947  * it doesn't matter
3948  */
3949 static void
3950 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3951                        struct cfq_io_cq *cic)
3952 {
3953         int old_idle, enable_idle;
3954
3955         /*
3956          * Don't idle for async or idle io prio class
3957          */
3958         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3959                 return;
3960
3961         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3962
3963         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3964                 cfq_mark_cfqq_deep(cfqq);
3965
3966         if (cfqq->next_rq && req_noidle(cfqq->next_rq))
3967                 enable_idle = 0;
3968         else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3969                  !cfqd->cfq_slice_idle ||
3970                  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3971                 enable_idle = 0;
3972         else if (sample_valid(cic->ttime.ttime_samples)) {
3973                 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3974                         enable_idle = 0;
3975                 else
3976                         enable_idle = 1;
3977         }
3978
3979         if (old_idle != enable_idle) {
3980                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3981                 if (enable_idle)
3982                         cfq_mark_cfqq_idle_window(cfqq);
3983                 else
3984                         cfq_clear_cfqq_idle_window(cfqq);
3985         }
3986 }
3987
3988 /*
3989  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3990  * no or if we aren't sure, a 1 will cause a preempt.
3991  */
3992 static bool
3993 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3994                    struct request *rq)
3995 {
3996         struct cfq_queue *cfqq;
3997
3998         cfqq = cfqd->active_queue;
3999         if (!cfqq)
4000                 return false;
4001
4002         if (cfq_class_idle(new_cfqq))
4003                 return false;
4004
4005         if (cfq_class_idle(cfqq))
4006                 return true;
4007
4008         /*
4009          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
4010          */
4011         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
4012                 return false;
4013
4014         /*
4015          * if the new request is sync, but the currently running queue is
4016          * not, let the sync request have priority.
4017          */
4018         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
4019                 return true;
4020
4021         /*
4022          * Treat ancestors of current cgroup the same way as current cgroup.
4023          * For anybody else we disallow preemption to guarantee service
4024          * fairness among cgroups.
4025          */
4026         if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
4027                 return false;
4028
4029         if (cfq_slice_used(cfqq))
4030                 return true;
4031
4032         /*
4033          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
4034          */
4035         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
4036                 return true;
4037
4038         WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
4039         /* Allow preemption only if we are idling on sync-noidle tree */
4040         if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
4041             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
4042             RB_EMPTY_ROOT(&cfqq->sort_list))
4043                 return true;
4044
4045         /*
4046          * So both queues are sync. Let the new request get disk time if
4047          * it's a metadata request and the current queue is doing regular IO.
4048          */
4049         if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
4050                 return true;
4051
4052         /* An idle queue should not be idle now for some reason */
4053         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
4054                 return true;
4055
4056         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
4057                 return false;
4058
4059         /*
4060          * if this request is as-good as one we would expect from the
4061          * current cfqq, let it preempt
4062          */
4063         if (cfq_rq_close(cfqd, cfqq, rq))
4064                 return true;
4065
4066         return false;
4067 }
4068
4069 /*
4070  * cfqq preempts the active queue. if we allowed preempt with no slice left,
4071  * let it have half of its nominal slice.
4072  */
4073 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4074 {
4075         enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
4076
4077         cfq_log_cfqq(cfqd, cfqq, "preempt");
4078         cfq_slice_expired(cfqd, 1);
4079
4080         /*
4081          * workload type is changed, don't save slice, otherwise preempt
4082          * doesn't happen
4083          */
4084         if (old_type != cfqq_type(cfqq))
4085                 cfqq->cfqg->saved_wl_slice = 0;
4086
4087         /*
4088          * Put the new queue at the front of the of the current list,
4089          * so we know that it will be selected next.
4090          */
4091         BUG_ON(!cfq_cfqq_on_rr(cfqq));
4092
4093         cfq_service_tree_add(cfqd, cfqq, 1);
4094
4095         cfqq->slice_end = 0;
4096         cfq_mark_cfqq_slice_new(cfqq);
4097 }
4098
4099 /*
4100  * Called when a new fs request (rq) is added (to cfqq). Check if there's
4101  * something we should do about it
4102  */
4103 static void
4104 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4105                 struct request *rq)
4106 {
4107         struct cfq_io_cq *cic = RQ_CIC(rq);
4108
4109         cfqd->rq_queued++;
4110         if (rq->cmd_flags & REQ_PRIO)
4111                 cfqq->prio_pending++;
4112
4113         cfq_update_io_thinktime(cfqd, cfqq, cic);
4114         cfq_update_io_seektime(cfqd, cfqq, rq);
4115         cfq_update_idle_window(cfqd, cfqq, cic);
4116
4117         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4118
4119         if (cfqq == cfqd->active_queue) {
4120                 /*
4121                  * Remember that we saw a request from this process, but
4122                  * don't start queuing just yet. Otherwise we risk seeing lots
4123                  * of tiny requests, because we disrupt the normal plugging
4124                  * and merging. If the request is already larger than a single
4125                  * page, let it rip immediately. For that case we assume that
4126                  * merging is already done. Ditto for a busy system that
4127                  * has other work pending, don't risk delaying until the
4128                  * idle timer unplug to continue working.
4129                  */
4130                 if (cfq_cfqq_wait_request(cfqq)) {
4131                         if (blk_rq_bytes(rq) > PAGE_SIZE ||
4132                             cfqd->busy_queues > 1) {
4133                                 cfq_del_timer(cfqd, cfqq);
4134                                 cfq_clear_cfqq_wait_request(cfqq);
4135                                 __blk_run_queue(cfqd->queue);
4136                         } else {
4137                                 cfqg_stats_update_idle_time(cfqq->cfqg);
4138                                 cfq_mark_cfqq_must_dispatch(cfqq);
4139                         }
4140                 }
4141         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
4142                 /*
4143                  * not the active queue - expire current slice if it is
4144                  * idle and has expired it's mean thinktime or this new queue
4145                  * has some old slice time left and is of higher priority or
4146                  * this new queue is RT and the current one is BE
4147                  */
4148                 cfq_preempt_queue(cfqd, cfqq);
4149                 __blk_run_queue(cfqd->queue);
4150         }
4151 }
4152
4153 static void cfq_insert_request(struct request_queue *q, struct request *rq)
4154 {
4155         struct cfq_data *cfqd = q->elevator->elevator_data;
4156         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4157
4158         cfq_log_cfqq(cfqd, cfqq, "insert_request");
4159         cfq_init_prio_data(cfqq, RQ_CIC(rq));
4160
4161         rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
4162         list_add_tail(&rq->queuelist, &cfqq->fifo);
4163         cfq_add_rq_rb(rq);
4164         cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
4165                                  rq->cmd_flags);
4166         cfq_rq_enqueued(cfqd, cfqq, rq);
4167 }
4168
4169 /*
4170  * Update hw_tag based on peak queue depth over 50 samples under
4171  * sufficient load.
4172  */
4173 static void cfq_update_hw_tag(struct cfq_data *cfqd)
4174 {
4175         struct cfq_queue *cfqq = cfqd->active_queue;
4176
4177         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4178                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
4179
4180         if (cfqd->hw_tag == 1)
4181                 return;
4182
4183         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
4184             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
4185                 return;
4186
4187         /*
4188          * If active queue hasn't enough requests and can idle, cfq might not
4189          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4190          * case
4191          */
4192         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4193             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
4194             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
4195                 return;
4196
4197         if (cfqd->hw_tag_samples++ < 50)
4198                 return;
4199
4200         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
4201                 cfqd->hw_tag = 1;
4202         else
4203                 cfqd->hw_tag = 0;
4204 }
4205
4206 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4207 {
4208         struct cfq_io_cq *cic = cfqd->active_cic;
4209         u64 now = ktime_get_ns();
4210
4211         /* If the queue already has requests, don't wait */
4212         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4213                 return false;
4214
4215         /* If there are other queues in the group, don't wait */
4216         if (cfqq->cfqg->nr_cfqq > 1)
4217                 return false;
4218
4219         /* the only queue in the group, but think time is big */
4220         if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4221                 return false;
4222
4223         if (cfq_slice_used(cfqq))
4224                 return true;
4225
4226         /* if slice left is less than think time, wait busy */
4227         if (cic && sample_valid(cic->ttime.ttime_samples)
4228             && (cfqq->slice_end - now < cic->ttime.ttime_mean))
4229                 return true;
4230
4231         /*
4232          * If think times is less than a jiffy than ttime_mean=0 and above
4233          * will not be true. It might happen that slice has not expired yet
4234          * but will expire soon (4-5 ns) during select_queue(). To cover the
4235          * case where think time is less than a jiffy, mark the queue wait
4236          * busy if only 1 jiffy is left in the slice.
4237          */
4238         if (cfqq->slice_end - now <= jiffies_to_nsecs(1))
4239                 return true;
4240
4241         return false;
4242 }
4243
4244 static void cfq_completed_request(struct request_queue *q, struct request *rq)
4245 {
4246         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4247         struct cfq_data *cfqd = cfqq->cfqd;
4248         const int sync = rq_is_sync(rq);
4249         u64 now = ktime_get_ns();
4250
4251         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
4252
4253         cfq_update_hw_tag(cfqd);
4254
4255         WARN_ON(!cfqd->rq_in_driver);
4256         WARN_ON(!cfqq->dispatched);
4257         cfqd->rq_in_driver--;
4258         cfqq->dispatched--;
4259         (RQ_CFQG(rq))->dispatched--;
4260         cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4261                                      rq_io_start_time_ns(rq), rq->cmd_flags);
4262
4263         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4264
4265         if (sync) {
4266                 struct cfq_rb_root *st;
4267
4268                 RQ_CIC(rq)->ttime.last_end_request = now;
4269
4270                 if (cfq_cfqq_on_rr(cfqq))
4271                         st = cfqq->service_tree;
4272                 else
4273                         st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4274                                         cfqq_type(cfqq));
4275
4276                 st->ttime.last_end_request = now;
4277                 /*
4278                  * We have to do this check in jiffies since start_time is in
4279                  * jiffies and it is not trivial to convert to ns. If
4280                  * cfq_fifo_expire[1] ever comes close to 1 jiffie, this test
4281                  * will become problematic but so far we are fine (the default
4282                  * is 128 ms).
4283                  */
4284                 if (!time_after(rq->start_time +
4285                                   nsecs_to_jiffies(cfqd->cfq_fifo_expire[1]),
4286                                 jiffies))
4287                         cfqd->last_delayed_sync = now;
4288         }
4289
4290 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4291         cfqq->cfqg->ttime.last_end_request = now;
4292 #endif
4293
4294         /*
4295          * If this is the active queue, check if it needs to be expired,
4296          * or if we want to idle in case it has no pending requests.
4297          */
4298         if (cfqd->active_queue == cfqq) {
4299                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4300
4301                 if (cfq_cfqq_slice_new(cfqq)) {
4302                         cfq_set_prio_slice(cfqd, cfqq);
4303                         cfq_clear_cfqq_slice_new(cfqq);
4304                 }
4305
4306                 /*
4307                  * Should we wait for next request to come in before we expire
4308                  * the queue.
4309                  */
4310                 if (cfq_should_wait_busy(cfqd, cfqq)) {
4311                         u64 extend_sl = cfqd->cfq_slice_idle;
4312                         if (!cfqd->cfq_slice_idle)
4313                                 extend_sl = cfqd->cfq_group_idle;
4314                         cfqq->slice_end = now + extend_sl;
4315                         cfq_mark_cfqq_wait_busy(cfqq);
4316                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4317                 }
4318
4319                 /*
4320                  * Idling is not enabled on:
4321                  * - expired queues
4322                  * - idle-priority queues
4323                  * - async queues
4324                  * - queues with still some requests queued
4325                  * - when there is a close cooperator
4326                  */
4327                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4328                         cfq_slice_expired(cfqd, 1);
4329                 else if (sync && cfqq_empty &&
4330                          !cfq_close_cooperator(cfqd, cfqq)) {
4331                         cfq_arm_slice_timer(cfqd);
4332                 }
4333         }
4334
4335         if (!cfqd->rq_in_driver)
4336                 cfq_schedule_dispatch(cfqd);
4337 }
4338
4339 static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
4340 {
4341         /*
4342          * If REQ_PRIO is set, boost class and prio level, if it's below
4343          * BE/NORM. If prio is not set, restore the potentially boosted
4344          * class/prio level.
4345          */
4346         if (!(op & REQ_PRIO)) {
4347                 cfqq->ioprio_class = cfqq->org_ioprio_class;
4348                 cfqq->ioprio = cfqq->org_ioprio;
4349         } else {
4350                 if (cfq_class_idle(cfqq))
4351                         cfqq->ioprio_class = IOPRIO_CLASS_BE;
4352                 if (cfqq->ioprio > IOPRIO_NORM)
4353                         cfqq->ioprio = IOPRIO_NORM;
4354         }
4355 }
4356
4357 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4358 {
4359         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4360                 cfq_mark_cfqq_must_alloc_slice(cfqq);
4361                 return ELV_MQUEUE_MUST;
4362         }
4363
4364         return ELV_MQUEUE_MAY;
4365 }
4366
4367 static int cfq_may_queue(struct request_queue *q, unsigned int op)
4368 {
4369         struct cfq_data *cfqd = q->elevator->elevator_data;
4370         struct task_struct *tsk = current;
4371         struct cfq_io_cq *cic;
4372         struct cfq_queue *cfqq;
4373
4374         /*
4375          * don't force setup of a queue from here, as a call to may_queue
4376          * does not necessarily imply that a request actually will be queued.
4377          * so just lookup a possibly existing queue, or return 'may queue'
4378          * if that fails
4379          */
4380         cic = cfq_cic_lookup(cfqd, tsk->io_context);
4381         if (!cic)
4382                 return ELV_MQUEUE_MAY;
4383
4384         cfqq = cic_to_cfqq(cic, op_is_sync(op));
4385         if (cfqq) {
4386                 cfq_init_prio_data(cfqq, cic);
4387                 cfqq_boost_on_prio(cfqq, op);
4388
4389                 return __cfq_may_queue(cfqq);
4390         }
4391
4392         return ELV_MQUEUE_MAY;
4393 }
4394
4395 /*
4396  * queue lock held here
4397  */
4398 static void cfq_put_request(struct request *rq)
4399 {
4400         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4401
4402         if (cfqq) {
4403                 const int rw = rq_data_dir(rq);
4404
4405                 BUG_ON(!cfqq->allocated[rw]);
4406                 cfqq->allocated[rw]--;
4407
4408                 /* Put down rq reference on cfqg */
4409                 cfqg_put(RQ_CFQG(rq));
4410                 rq->elv.priv[0] = NULL;
4411                 rq->elv.priv[1] = NULL;
4412
4413                 cfq_put_queue(cfqq);
4414         }
4415 }
4416
4417 static struct cfq_queue *
4418 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4419                 struct cfq_queue *cfqq)
4420 {
4421         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4422         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4423         cfq_mark_cfqq_coop(cfqq->new_cfqq);
4424         cfq_put_queue(cfqq);
4425         return cic_to_cfqq(cic, 1);
4426 }
4427
4428 /*
4429  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4430  * was the last process referring to said cfqq.
4431  */
4432 static struct cfq_queue *
4433 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4434 {
4435         if (cfqq_process_refs(cfqq) == 1) {
4436                 cfqq->pid = current->pid;
4437                 cfq_clear_cfqq_coop(cfqq);
4438                 cfq_clear_cfqq_split_coop(cfqq);
4439                 return cfqq;
4440         }
4441
4442         cic_set_cfqq(cic, NULL, 1);
4443
4444         cfq_put_cooperator(cfqq);
4445
4446         cfq_put_queue(cfqq);
4447         return NULL;
4448 }
4449 /*
4450  * Allocate cfq data structures associated with this request.
4451  */
4452 static int
4453 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4454                 gfp_t gfp_mask)
4455 {
4456         struct cfq_data *cfqd = q->elevator->elevator_data;
4457         struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4458         const int rw = rq_data_dir(rq);
4459         const bool is_sync = rq_is_sync(rq);
4460         struct cfq_queue *cfqq;
4461
4462         spin_lock_irq(q->queue_lock);
4463
4464         check_ioprio_changed(cic, bio);
4465         check_blkcg_changed(cic, bio);
4466 new_queue:
4467         cfqq = cic_to_cfqq(cic, is_sync);
4468         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4469                 if (cfqq)
4470                         cfq_put_queue(cfqq);
4471                 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
4472                 cic_set_cfqq(cic, cfqq, is_sync);
4473         } else {
4474                 /*
4475                  * If the queue was seeky for too long, break it apart.
4476                  */
4477                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4478                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4479                         cfqq = split_cfqq(cic, cfqq);
4480                         if (!cfqq)
4481                                 goto new_queue;
4482                 }
4483
4484                 /*
4485                  * Check to see if this queue is scheduled to merge with
4486                  * another, closely cooperating queue.  The merging of
4487                  * queues happens here as it must be done in process context.
4488                  * The reference on new_cfqq was taken in merge_cfqqs.
4489                  */
4490                 if (cfqq->new_cfqq)
4491                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4492         }
4493
4494         cfqq->allocated[rw]++;
4495
4496         cfqq->ref++;
4497         cfqg_get(cfqq->cfqg);
4498         rq->elv.priv[0] = cfqq;
4499         rq->elv.priv[1] = cfqq->cfqg;
4500         spin_unlock_irq(q->queue_lock);
4501
4502         return 0;
4503 }
4504
4505 static void cfq_kick_queue(struct work_struct *work)
4506 {
4507         struct cfq_data *cfqd =
4508                 container_of(work, struct cfq_data, unplug_work);
4509         struct request_queue *q = cfqd->queue;
4510
4511         spin_lock_irq(q->queue_lock);
4512         __blk_run_queue(cfqd->queue);
4513         spin_unlock_irq(q->queue_lock);
4514 }
4515
4516 /*
4517  * Timer running if the active_queue is currently idling inside its time slice
4518  */
4519 static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer)
4520 {
4521         struct cfq_data *cfqd = container_of(timer, struct cfq_data,
4522                                              idle_slice_timer);
4523         struct cfq_queue *cfqq;
4524         unsigned long flags;
4525         int timed_out = 1;
4526
4527         cfq_log(cfqd, "idle timer fired");
4528
4529         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4530
4531         cfqq = cfqd->active_queue;
4532         if (cfqq) {
4533                 timed_out = 0;
4534
4535                 /*
4536                  * We saw a request before the queue expired, let it through
4537                  */
4538                 if (cfq_cfqq_must_dispatch(cfqq))
4539                         goto out_kick;
4540
4541                 /*
4542                  * expired
4543                  */
4544                 if (cfq_slice_used(cfqq))
4545                         goto expire;
4546
4547                 /*
4548                  * only expire and reinvoke request handler, if there are
4549                  * other queues with pending requests
4550                  */
4551                 if (!cfqd->busy_queues)
4552                         goto out_cont;
4553
4554                 /*
4555                  * not expired and it has a request pending, let it dispatch
4556                  */
4557                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4558                         goto out_kick;
4559
4560                 /*
4561                  * Queue depth flag is reset only when the idle didn't succeed
4562                  */
4563                 cfq_clear_cfqq_deep(cfqq);
4564         }
4565 expire:
4566         cfq_slice_expired(cfqd, timed_out);
4567 out_kick:
4568         cfq_schedule_dispatch(cfqd);
4569 out_cont:
4570         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4571         return HRTIMER_NORESTART;
4572 }
4573
4574 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4575 {
4576         hrtimer_cancel(&cfqd->idle_slice_timer);
4577         cancel_work_sync(&cfqd->unplug_work);
4578 }
4579
4580 static void cfq_exit_queue(struct elevator_queue *e)
4581 {
4582         struct cfq_data *cfqd = e->elevator_data;
4583         struct request_queue *q = cfqd->queue;
4584
4585         cfq_shutdown_timer_wq(cfqd);
4586
4587         spin_lock_irq(q->queue_lock);
4588
4589         if (cfqd->active_queue)
4590                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4591
4592         spin_unlock_irq(q->queue_lock);
4593
4594         cfq_shutdown_timer_wq(cfqd);
4595
4596 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4597         blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4598 #else
4599         kfree(cfqd->root_group);
4600 #endif
4601         kfree(cfqd);
4602 }
4603
4604 static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4605 {
4606         struct cfq_data *cfqd;
4607         struct blkcg_gq *blkg __maybe_unused;
4608         int i, ret;
4609         struct elevator_queue *eq;
4610
4611         eq = elevator_alloc(q, e);
4612         if (!eq)
4613                 return -ENOMEM;
4614
4615         cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4616         if (!cfqd) {
4617                 kobject_put(&eq->kobj);
4618                 return -ENOMEM;
4619         }
4620         eq->elevator_data = cfqd;
4621
4622         cfqd->queue = q;
4623         spin_lock_irq(q->queue_lock);
4624         q->elevator = eq;
4625         spin_unlock_irq(q->queue_lock);
4626
4627         /* Init root service tree */
4628         cfqd->grp_service_tree = CFQ_RB_ROOT;
4629
4630         /* Init root group and prefer root group over other groups by default */
4631 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4632         ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4633         if (ret)
4634                 goto out_free;
4635
4636         cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4637 #else
4638         ret = -ENOMEM;
4639         cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4640                                         GFP_KERNEL, cfqd->queue->node);
4641         if (!cfqd->root_group)
4642                 goto out_free;
4643
4644         cfq_init_cfqg_base(cfqd->root_group);
4645         cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4646         cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4647 #endif
4648
4649         /*
4650          * Not strictly needed (since RB_ROOT just clears the node and we
4651          * zeroed cfqd on alloc), but better be safe in case someone decides
4652          * to add magic to the rb code
4653          */
4654         for (i = 0; i < CFQ_PRIO_LISTS; i++)
4655                 cfqd->prio_trees[i] = RB_ROOT;
4656
4657         /*
4658          * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
4659          * Grab a permanent reference to it, so that the normal code flow
4660          * will not attempt to free it.  oom_cfqq is linked to root_group
4661          * but shouldn't hold a reference as it'll never be unlinked.  Lose
4662          * the reference from linking right away.
4663          */
4664         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4665         cfqd->oom_cfqq.ref++;
4666
4667         spin_lock_irq(q->queue_lock);
4668         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4669         cfqg_put(cfqd->root_group);
4670         spin_unlock_irq(q->queue_lock);
4671
4672         hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC,
4673                      HRTIMER_MODE_REL);
4674         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4675
4676         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4677
4678         cfqd->cfq_quantum = cfq_quantum;
4679         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4680         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4681         cfqd->cfq_back_max = cfq_back_max;
4682         cfqd->cfq_back_penalty = cfq_back_penalty;
4683         cfqd->cfq_slice[0] = cfq_slice_async;
4684         cfqd->cfq_slice[1] = cfq_slice_sync;
4685         cfqd->cfq_target_latency = cfq_target_latency;
4686         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4687         cfqd->cfq_slice_idle = cfq_slice_idle;
4688         cfqd->cfq_group_idle = cfq_group_idle;
4689         cfqd->cfq_latency = 1;
4690         cfqd->hw_tag = -1;
4691         /*
4692          * we optimistically start assuming sync ops weren't delayed in last
4693          * second, in order to have larger depth for async operations.
4694          */
4695         cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC;
4696         return 0;
4697
4698 out_free:
4699         kfree(cfqd);
4700         kobject_put(&eq->kobj);
4701         return ret;
4702 }
4703
4704 static void cfq_registered_queue(struct request_queue *q)
4705 {
4706         struct elevator_queue *e = q->elevator;
4707         struct cfq_data *cfqd = e->elevator_data;
4708
4709         /*
4710          * Default to IOPS mode with no idling for SSDs
4711          */
4712         if (blk_queue_nonrot(q))
4713                 cfqd->cfq_slice_idle = 0;
4714         wbt_disable_default(q);
4715 }
4716
4717 /*
4718  * sysfs parts below -->
4719  */
4720 static ssize_t
4721 cfq_var_show(unsigned int var, char *page)
4722 {
4723         return sprintf(page, "%u\n", var);
4724 }
4725
4726 static ssize_t
4727 cfq_var_store(unsigned int *var, const char *page, size_t count)
4728 {
4729         char *p = (char *) page;
4730
4731         *var = simple_strtoul(p, &p, 10);
4732         return count;
4733 }
4734
4735 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4736 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4737 {                                                                       \
4738         struct cfq_data *cfqd = e->elevator_data;                       \
4739         u64 __data = __VAR;                                             \
4740         if (__CONV)                                                     \
4741                 __data = div_u64(__data, NSEC_PER_MSEC);                        \
4742         return cfq_var_show(__data, (page));                            \
4743 }
4744 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4745 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4746 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4747 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4748 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4749 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4750 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4751 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4752 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4753 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4754 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4755 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4756 #undef SHOW_FUNCTION
4757
4758 #define USEC_SHOW_FUNCTION(__FUNC, __VAR)                               \
4759 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4760 {                                                                       \
4761         struct cfq_data *cfqd = e->elevator_data;                       \
4762         u64 __data = __VAR;                                             \
4763         __data = div_u64(__data, NSEC_PER_USEC);                        \
4764         return cfq_var_show(__data, (page));                            \
4765 }
4766 USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
4767 USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
4768 USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
4769 USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
4770 USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
4771 #undef USEC_SHOW_FUNCTION
4772
4773 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4774 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4775 {                                                                       \
4776         struct cfq_data *cfqd = e->elevator_data;                       \
4777         unsigned int __data;                                            \
4778         int ret = cfq_var_store(&__data, (page), count);                \
4779         if (__data < (MIN))                                             \
4780                 __data = (MIN);                                         \
4781         else if (__data > (MAX))                                        \
4782                 __data = (MAX);                                         \
4783         if (__CONV)                                                     \
4784                 *(__PTR) = (u64)__data * NSEC_PER_MSEC;                 \
4785         else                                                            \
4786                 *(__PTR) = __data;                                      \
4787         return ret;                                                     \
4788 }
4789 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4790 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4791                 UINT_MAX, 1);
4792 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4793                 UINT_MAX, 1);
4794 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4795 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4796                 UINT_MAX, 0);
4797 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4798 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4799 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4800 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4801 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4802                 UINT_MAX, 0);
4803 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4804 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4805 #undef STORE_FUNCTION
4806
4807 #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                    \
4808 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4809 {                                                                       \
4810         struct cfq_data *cfqd = e->elevator_data;                       \
4811         unsigned int __data;                                            \
4812         int ret = cfq_var_store(&__data, (page), count);                \
4813         if (__data < (MIN))                                             \
4814                 __data = (MIN);                                         \
4815         else if (__data > (MAX))                                        \
4816                 __data = (MAX);                                         \
4817         *(__PTR) = (u64)__data * NSEC_PER_USEC;                         \
4818         return ret;                                                     \
4819 }
4820 USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
4821 USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
4822 USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
4823 USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
4824 USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);
4825 #undef USEC_STORE_FUNCTION
4826
4827 #define CFQ_ATTR(name) \
4828         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4829
4830 static struct elv_fs_entry cfq_attrs[] = {
4831         CFQ_ATTR(quantum),
4832         CFQ_ATTR(fifo_expire_sync),
4833         CFQ_ATTR(fifo_expire_async),
4834         CFQ_ATTR(back_seek_max),
4835         CFQ_ATTR(back_seek_penalty),
4836         CFQ_ATTR(slice_sync),
4837         CFQ_ATTR(slice_sync_us),
4838         CFQ_ATTR(slice_async),
4839         CFQ_ATTR(slice_async_us),
4840         CFQ_ATTR(slice_async_rq),
4841         CFQ_ATTR(slice_idle),
4842         CFQ_ATTR(slice_idle_us),
4843         CFQ_ATTR(group_idle),
4844         CFQ_ATTR(group_idle_us),
4845         CFQ_ATTR(low_latency),
4846         CFQ_ATTR(target_latency),
4847         CFQ_ATTR(target_latency_us),
4848         __ATTR_NULL
4849 };
4850
4851 static struct elevator_type iosched_cfq = {
4852         .ops.sq = {
4853                 .elevator_merge_fn =            cfq_merge,
4854                 .elevator_merged_fn =           cfq_merged_request,
4855                 .elevator_merge_req_fn =        cfq_merged_requests,
4856                 .elevator_allow_bio_merge_fn =  cfq_allow_bio_merge,
4857                 .elevator_allow_rq_merge_fn =   cfq_allow_rq_merge,
4858                 .elevator_bio_merged_fn =       cfq_bio_merged,
4859                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4860                 .elevator_add_req_fn =          cfq_insert_request,
4861                 .elevator_activate_req_fn =     cfq_activate_request,
4862                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4863                 .elevator_completed_req_fn =    cfq_completed_request,
4864                 .elevator_former_req_fn =       elv_rb_former_request,
4865                 .elevator_latter_req_fn =       elv_rb_latter_request,
4866                 .elevator_init_icq_fn =         cfq_init_icq,
4867                 .elevator_exit_icq_fn =         cfq_exit_icq,
4868                 .elevator_set_req_fn =          cfq_set_request,
4869                 .elevator_put_req_fn =          cfq_put_request,
4870                 .elevator_may_queue_fn =        cfq_may_queue,
4871                 .elevator_init_fn =             cfq_init_queue,
4872                 .elevator_exit_fn =             cfq_exit_queue,
4873                 .elevator_registered_fn =       cfq_registered_queue,
4874         },
4875         .icq_size       =       sizeof(struct cfq_io_cq),
4876         .icq_align      =       __alignof__(struct cfq_io_cq),
4877         .elevator_attrs =       cfq_attrs,
4878         .elevator_name  =       "cfq",
4879         .elevator_owner =       THIS_MODULE,
4880 };
4881
4882 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4883 static struct blkcg_policy blkcg_policy_cfq = {
4884         .dfl_cftypes            = cfq_blkcg_files,
4885         .legacy_cftypes         = cfq_blkcg_legacy_files,
4886
4887         .cpd_alloc_fn           = cfq_cpd_alloc,
4888         .cpd_init_fn            = cfq_cpd_init,
4889         .cpd_free_fn            = cfq_cpd_free,
4890         .cpd_bind_fn            = cfq_cpd_bind,
4891
4892         .pd_alloc_fn            = cfq_pd_alloc,
4893         .pd_init_fn             = cfq_pd_init,
4894         .pd_offline_fn          = cfq_pd_offline,
4895         .pd_free_fn             = cfq_pd_free,
4896         .pd_reset_stats_fn      = cfq_pd_reset_stats,
4897 };
4898 #endif
4899
4900 static int __init cfq_init(void)
4901 {
4902         int ret;
4903
4904 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4905         ret = blkcg_policy_register(&blkcg_policy_cfq);
4906         if (ret)
4907                 return ret;
4908 #else
4909         cfq_group_idle = 0;
4910 #endif
4911
4912         ret = -ENOMEM;
4913         cfq_pool = KMEM_CACHE(cfq_queue, 0);
4914         if (!cfq_pool)
4915                 goto err_pol_unreg;
4916
4917         ret = elv_register(&iosched_cfq);
4918         if (ret)
4919                 goto err_free_pool;
4920
4921         return 0;
4922
4923 err_free_pool:
4924         kmem_cache_destroy(cfq_pool);
4925 err_pol_unreg:
4926 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4927         blkcg_policy_unregister(&blkcg_policy_cfq);
4928 #endif
4929         return ret;
4930 }
4931
4932 static void __exit cfq_exit(void)
4933 {
4934 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4935         blkcg_policy_unregister(&blkcg_policy_cfq);
4936 #endif
4937         elv_unregister(&iosched_cfq);
4938         kmem_cache_destroy(cfq_pool);
4939 }
4940
4941 module_init(cfq_init);
4942 module_exit(cfq_exit);
4943
4944 MODULE_AUTHOR("Jens Axboe");
4945 MODULE_LICENSE("GPL");
4946 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");