Merge remote-tracking branch 'regulator/fix/core' into regulator-linus
[sfrench/cifs-2.6.git] / block / blk-throttle.c
1 /*
2  * Interface for controlling IO bandwidth on a request queue
3  *
4  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5  */
6
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-cgroup.h>
13 #include "blk.h"
14
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum = 8;
17
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum = 32;
20
21 /* Throttling is performed over a slice and after that slice is renewed */
22 #define DFL_THROTL_SLICE_HD (HZ / 10)
23 #define DFL_THROTL_SLICE_SSD (HZ / 50)
24 #define MAX_THROTL_SLICE (HZ)
25 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
26 #define MIN_THROTL_BPS (320 * 1024)
27 #define MIN_THROTL_IOPS (10)
28 #define DFL_LATENCY_TARGET (-1L)
29 #define DFL_IDLE_THRESHOLD (0)
30 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
31 #define LATENCY_FILTERED_SSD (0)
32 /*
33  * For HD, very small latency comes from sequential IO. Such IO is helpless to
34  * help determine if its IO is impacted by others, hence we ignore the IO
35  */
36 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
37
38 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
39
40 static struct blkcg_policy blkcg_policy_throtl;
41
42 /* A workqueue to queue throttle related work */
43 static struct workqueue_struct *kthrotld_workqueue;
44
45 /*
46  * To implement hierarchical throttling, throtl_grps form a tree and bios
47  * are dispatched upwards level by level until they reach the top and get
48  * issued.  When dispatching bios from the children and local group at each
49  * level, if the bios are dispatched into a single bio_list, there's a risk
50  * of a local or child group which can queue many bios at once filling up
51  * the list starving others.
52  *
53  * To avoid such starvation, dispatched bios are queued separately
54  * according to where they came from.  When they are again dispatched to
55  * the parent, they're popped in round-robin order so that no single source
56  * hogs the dispatch window.
57  *
58  * throtl_qnode is used to keep the queued bios separated by their sources.
59  * Bios are queued to throtl_qnode which in turn is queued to
60  * throtl_service_queue and then dispatched in round-robin order.
61  *
62  * It's also used to track the reference counts on blkg's.  A qnode always
63  * belongs to a throtl_grp and gets queued on itself or the parent, so
64  * incrementing the reference of the associated throtl_grp when a qnode is
65  * queued and decrementing when dequeued is enough to keep the whole blkg
66  * tree pinned while bios are in flight.
67  */
68 struct throtl_qnode {
69         struct list_head        node;           /* service_queue->queued[] */
70         struct bio_list         bios;           /* queued bios */
71         struct throtl_grp       *tg;            /* tg this qnode belongs to */
72 };
73
74 struct throtl_service_queue {
75         struct throtl_service_queue *parent_sq; /* the parent service_queue */
76
77         /*
78          * Bios queued directly to this service_queue or dispatched from
79          * children throtl_grp's.
80          */
81         struct list_head        queued[2];      /* throtl_qnode [READ/WRITE] */
82         unsigned int            nr_queued[2];   /* number of queued bios */
83
84         /*
85          * RB tree of active children throtl_grp's, which are sorted by
86          * their ->disptime.
87          */
88         struct rb_root          pending_tree;   /* RB tree of active tgs */
89         struct rb_node          *first_pending; /* first node in the tree */
90         unsigned int            nr_pending;     /* # queued in the tree */
91         unsigned long           first_pending_disptime; /* disptime of the first tg */
92         struct timer_list       pending_timer;  /* fires on first_pending_disptime */
93 };
94
95 enum tg_state_flags {
96         THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
97         THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
98 };
99
100 #define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
101
102 enum {
103         LIMIT_LOW,
104         LIMIT_MAX,
105         LIMIT_CNT,
106 };
107
108 struct throtl_grp {
109         /* must be the first member */
110         struct blkg_policy_data pd;
111
112         /* active throtl group service_queue member */
113         struct rb_node rb_node;
114
115         /* throtl_data this group belongs to */
116         struct throtl_data *td;
117
118         /* this group's service queue */
119         struct throtl_service_queue service_queue;
120
121         /*
122          * qnode_on_self is used when bios are directly queued to this
123          * throtl_grp so that local bios compete fairly with bios
124          * dispatched from children.  qnode_on_parent is used when bios are
125          * dispatched from this throtl_grp into its parent and will compete
126          * with the sibling qnode_on_parents and the parent's
127          * qnode_on_self.
128          */
129         struct throtl_qnode qnode_on_self[2];
130         struct throtl_qnode qnode_on_parent[2];
131
132         /*
133          * Dispatch time in jiffies. This is the estimated time when group
134          * will unthrottle and is ready to dispatch more bio. It is used as
135          * key to sort active groups in service tree.
136          */
137         unsigned long disptime;
138
139         unsigned int flags;
140
141         /* are there any throtl rules between this group and td? */
142         bool has_rules[2];
143
144         /* internally used bytes per second rate limits */
145         uint64_t bps[2][LIMIT_CNT];
146         /* user configured bps limits */
147         uint64_t bps_conf[2][LIMIT_CNT];
148
149         /* internally used IOPS limits */
150         unsigned int iops[2][LIMIT_CNT];
151         /* user configured IOPS limits */
152         unsigned int iops_conf[2][LIMIT_CNT];
153
154         /* Number of bytes disptached in current slice */
155         uint64_t bytes_disp[2];
156         /* Number of bio's dispatched in current slice */
157         unsigned int io_disp[2];
158
159         unsigned long last_low_overflow_time[2];
160
161         uint64_t last_bytes_disp[2];
162         unsigned int last_io_disp[2];
163
164         unsigned long last_check_time;
165
166         unsigned long latency_target; /* us */
167         unsigned long latency_target_conf; /* us */
168         /* When did we start a new slice */
169         unsigned long slice_start[2];
170         unsigned long slice_end[2];
171
172         unsigned long last_finish_time; /* ns / 1024 */
173         unsigned long checked_last_finish_time; /* ns / 1024 */
174         unsigned long avg_idletime; /* ns / 1024 */
175         unsigned long idletime_threshold; /* us */
176         unsigned long idletime_threshold_conf; /* us */
177
178         unsigned int bio_cnt; /* total bios */
179         unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
180         unsigned long bio_cnt_reset_time;
181 };
182
183 /* We measure latency for request size from <= 4k to >= 1M */
184 #define LATENCY_BUCKET_SIZE 9
185
186 struct latency_bucket {
187         unsigned long total_latency; /* ns / 1024 */
188         int samples;
189 };
190
191 struct avg_latency_bucket {
192         unsigned long latency; /* ns / 1024 */
193         bool valid;
194 };
195
196 struct throtl_data
197 {
198         /* service tree for active throtl groups */
199         struct throtl_service_queue service_queue;
200
201         struct request_queue *queue;
202
203         /* Total Number of queued bios on READ and WRITE lists */
204         unsigned int nr_queued[2];
205
206         unsigned int throtl_slice;
207
208         /* Work for dispatching throttled bios */
209         struct work_struct dispatch_work;
210         unsigned int limit_index;
211         bool limit_valid[LIMIT_CNT];
212
213         unsigned long low_upgrade_time;
214         unsigned long low_downgrade_time;
215
216         unsigned int scale;
217
218         struct latency_bucket tmp_buckets[LATENCY_BUCKET_SIZE];
219         struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
220         struct latency_bucket __percpu *latency_buckets;
221         unsigned long last_calculate_time;
222         unsigned long filtered_latency;
223
224         bool track_bio_latency;
225 };
226
227 static void throtl_pending_timer_fn(unsigned long arg);
228
229 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
230 {
231         return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
232 }
233
234 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
235 {
236         return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
237 }
238
239 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
240 {
241         return pd_to_blkg(&tg->pd);
242 }
243
244 /**
245  * sq_to_tg - return the throl_grp the specified service queue belongs to
246  * @sq: the throtl_service_queue of interest
247  *
248  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
249  * embedded in throtl_data, %NULL is returned.
250  */
251 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
252 {
253         if (sq && sq->parent_sq)
254                 return container_of(sq, struct throtl_grp, service_queue);
255         else
256                 return NULL;
257 }
258
259 /**
260  * sq_to_td - return throtl_data the specified service queue belongs to
261  * @sq: the throtl_service_queue of interest
262  *
263  * A service_queue can be embedded in either a throtl_grp or throtl_data.
264  * Determine the associated throtl_data accordingly and return it.
265  */
266 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
267 {
268         struct throtl_grp *tg = sq_to_tg(sq);
269
270         if (tg)
271                 return tg->td;
272         else
273                 return container_of(sq, struct throtl_data, service_queue);
274 }
275
276 /*
277  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
278  * make the IO dispatch more smooth.
279  * Scale up: linearly scale up according to lapsed time since upgrade. For
280  *           every throtl_slice, the limit scales up 1/2 .low limit till the
281  *           limit hits .max limit
282  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
283  */
284 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
285 {
286         /* arbitrary value to avoid too big scale */
287         if (td->scale < 4096 && time_after_eq(jiffies,
288             td->low_upgrade_time + td->scale * td->throtl_slice))
289                 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
290
291         return low + (low >> 1) * td->scale;
292 }
293
294 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
295 {
296         struct blkcg_gq *blkg = tg_to_blkg(tg);
297         struct throtl_data *td;
298         uint64_t ret;
299
300         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
301                 return U64_MAX;
302
303         td = tg->td;
304         ret = tg->bps[rw][td->limit_index];
305         if (ret == 0 && td->limit_index == LIMIT_LOW) {
306                 /* intermediate node or iops isn't 0 */
307                 if (!list_empty(&blkg->blkcg->css.children) ||
308                     tg->iops[rw][td->limit_index])
309                         return U64_MAX;
310                 else
311                         return MIN_THROTL_BPS;
312         }
313
314         if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
315             tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
316                 uint64_t adjusted;
317
318                 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
319                 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
320         }
321         return ret;
322 }
323
324 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
325 {
326         struct blkcg_gq *blkg = tg_to_blkg(tg);
327         struct throtl_data *td;
328         unsigned int ret;
329
330         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
331                 return UINT_MAX;
332
333         td = tg->td;
334         ret = tg->iops[rw][td->limit_index];
335         if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
336                 /* intermediate node or bps isn't 0 */
337                 if (!list_empty(&blkg->blkcg->css.children) ||
338                     tg->bps[rw][td->limit_index])
339                         return UINT_MAX;
340                 else
341                         return MIN_THROTL_IOPS;
342         }
343
344         if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
345             tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
346                 uint64_t adjusted;
347
348                 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
349                 if (adjusted > UINT_MAX)
350                         adjusted = UINT_MAX;
351                 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
352         }
353         return ret;
354 }
355
356 #define request_bucket_index(sectors) \
357         clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
358
359 /**
360  * throtl_log - log debug message via blktrace
361  * @sq: the service_queue being reported
362  * @fmt: printf format string
363  * @args: printf args
364  *
365  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
366  * throtl_grp; otherwise, just "throtl".
367  */
368 #define throtl_log(sq, fmt, args...)    do {                            \
369         struct throtl_grp *__tg = sq_to_tg((sq));                       \
370         struct throtl_data *__td = sq_to_td((sq));                      \
371                                                                         \
372         (void)__td;                                                     \
373         if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
374                 break;                                                  \
375         if ((__tg)) {                                                   \
376                 char __pbuf[128];                                       \
377                                                                         \
378                 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf));    \
379                 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
380         } else {                                                        \
381                 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
382         }                                                               \
383 } while (0)
384
385 static inline unsigned int throtl_bio_data_size(struct bio *bio)
386 {
387         /* assume it's one sector */
388         if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
389                 return 512;
390         return bio->bi_iter.bi_size;
391 }
392
393 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
394 {
395         INIT_LIST_HEAD(&qn->node);
396         bio_list_init(&qn->bios);
397         qn->tg = tg;
398 }
399
400 /**
401  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
402  * @bio: bio being added
403  * @qn: qnode to add bio to
404  * @queued: the service_queue->queued[] list @qn belongs to
405  *
406  * Add @bio to @qn and put @qn on @queued if it's not already on.
407  * @qn->tg's reference count is bumped when @qn is activated.  See the
408  * comment on top of throtl_qnode definition for details.
409  */
410 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
411                                  struct list_head *queued)
412 {
413         bio_list_add(&qn->bios, bio);
414         if (list_empty(&qn->node)) {
415                 list_add_tail(&qn->node, queued);
416                 blkg_get(tg_to_blkg(qn->tg));
417         }
418 }
419
420 /**
421  * throtl_peek_queued - peek the first bio on a qnode list
422  * @queued: the qnode list to peek
423  */
424 static struct bio *throtl_peek_queued(struct list_head *queued)
425 {
426         struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
427         struct bio *bio;
428
429         if (list_empty(queued))
430                 return NULL;
431
432         bio = bio_list_peek(&qn->bios);
433         WARN_ON_ONCE(!bio);
434         return bio;
435 }
436
437 /**
438  * throtl_pop_queued - pop the first bio form a qnode list
439  * @queued: the qnode list to pop a bio from
440  * @tg_to_put: optional out argument for throtl_grp to put
441  *
442  * Pop the first bio from the qnode list @queued.  After popping, the first
443  * qnode is removed from @queued if empty or moved to the end of @queued so
444  * that the popping order is round-robin.
445  *
446  * When the first qnode is removed, its associated throtl_grp should be put
447  * too.  If @tg_to_put is NULL, this function automatically puts it;
448  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
449  * responsible for putting it.
450  */
451 static struct bio *throtl_pop_queued(struct list_head *queued,
452                                      struct throtl_grp **tg_to_put)
453 {
454         struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
455         struct bio *bio;
456
457         if (list_empty(queued))
458                 return NULL;
459
460         bio = bio_list_pop(&qn->bios);
461         WARN_ON_ONCE(!bio);
462
463         if (bio_list_empty(&qn->bios)) {
464                 list_del_init(&qn->node);
465                 if (tg_to_put)
466                         *tg_to_put = qn->tg;
467                 else
468                         blkg_put(tg_to_blkg(qn->tg));
469         } else {
470                 list_move_tail(&qn->node, queued);
471         }
472
473         return bio;
474 }
475
476 /* init a service_queue, assumes the caller zeroed it */
477 static void throtl_service_queue_init(struct throtl_service_queue *sq)
478 {
479         INIT_LIST_HEAD(&sq->queued[0]);
480         INIT_LIST_HEAD(&sq->queued[1]);
481         sq->pending_tree = RB_ROOT;
482         setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
483                     (unsigned long)sq);
484 }
485
486 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
487 {
488         struct throtl_grp *tg;
489         int rw;
490
491         tg = kzalloc_node(sizeof(*tg), gfp, node);
492         if (!tg)
493                 return NULL;
494
495         throtl_service_queue_init(&tg->service_queue);
496
497         for (rw = READ; rw <= WRITE; rw++) {
498                 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
499                 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
500         }
501
502         RB_CLEAR_NODE(&tg->rb_node);
503         tg->bps[READ][LIMIT_MAX] = U64_MAX;
504         tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
505         tg->iops[READ][LIMIT_MAX] = UINT_MAX;
506         tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
507         tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
508         tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
509         tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
510         tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
511         /* LIMIT_LOW will have default value 0 */
512
513         tg->latency_target = DFL_LATENCY_TARGET;
514         tg->latency_target_conf = DFL_LATENCY_TARGET;
515         tg->idletime_threshold = DFL_IDLE_THRESHOLD;
516         tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
517
518         return &tg->pd;
519 }
520
521 static void throtl_pd_init(struct blkg_policy_data *pd)
522 {
523         struct throtl_grp *tg = pd_to_tg(pd);
524         struct blkcg_gq *blkg = tg_to_blkg(tg);
525         struct throtl_data *td = blkg->q->td;
526         struct throtl_service_queue *sq = &tg->service_queue;
527
528         /*
529          * If on the default hierarchy, we switch to properly hierarchical
530          * behavior where limits on a given throtl_grp are applied to the
531          * whole subtree rather than just the group itself.  e.g. If 16M
532          * read_bps limit is set on the root group, the whole system can't
533          * exceed 16M for the device.
534          *
535          * If not on the default hierarchy, the broken flat hierarchy
536          * behavior is retained where all throtl_grps are treated as if
537          * they're all separate root groups right below throtl_data.
538          * Limits of a group don't interact with limits of other groups
539          * regardless of the position of the group in the hierarchy.
540          */
541         sq->parent_sq = &td->service_queue;
542         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
543                 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
544         tg->td = td;
545 }
546
547 /*
548  * Set has_rules[] if @tg or any of its parents have limits configured.
549  * This doesn't require walking up to the top of the hierarchy as the
550  * parent's has_rules[] is guaranteed to be correct.
551  */
552 static void tg_update_has_rules(struct throtl_grp *tg)
553 {
554         struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
555         struct throtl_data *td = tg->td;
556         int rw;
557
558         for (rw = READ; rw <= WRITE; rw++)
559                 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
560                         (td->limit_valid[td->limit_index] &&
561                          (tg_bps_limit(tg, rw) != U64_MAX ||
562                           tg_iops_limit(tg, rw) != UINT_MAX));
563 }
564
565 static void throtl_pd_online(struct blkg_policy_data *pd)
566 {
567         struct throtl_grp *tg = pd_to_tg(pd);
568         /*
569          * We don't want new groups to escape the limits of its ancestors.
570          * Update has_rules[] after a new group is brought online.
571          */
572         tg_update_has_rules(tg);
573 }
574
575 static void blk_throtl_update_limit_valid(struct throtl_data *td)
576 {
577         struct cgroup_subsys_state *pos_css;
578         struct blkcg_gq *blkg;
579         bool low_valid = false;
580
581         rcu_read_lock();
582         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
583                 struct throtl_grp *tg = blkg_to_tg(blkg);
584
585                 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
586                     tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
587                         low_valid = true;
588         }
589         rcu_read_unlock();
590
591         td->limit_valid[LIMIT_LOW] = low_valid;
592 }
593
594 static void throtl_upgrade_state(struct throtl_data *td);
595 static void throtl_pd_offline(struct blkg_policy_data *pd)
596 {
597         struct throtl_grp *tg = pd_to_tg(pd);
598
599         tg->bps[READ][LIMIT_LOW] = 0;
600         tg->bps[WRITE][LIMIT_LOW] = 0;
601         tg->iops[READ][LIMIT_LOW] = 0;
602         tg->iops[WRITE][LIMIT_LOW] = 0;
603
604         blk_throtl_update_limit_valid(tg->td);
605
606         if (!tg->td->limit_valid[tg->td->limit_index])
607                 throtl_upgrade_state(tg->td);
608 }
609
610 static void throtl_pd_free(struct blkg_policy_data *pd)
611 {
612         struct throtl_grp *tg = pd_to_tg(pd);
613
614         del_timer_sync(&tg->service_queue.pending_timer);
615         kfree(tg);
616 }
617
618 static struct throtl_grp *
619 throtl_rb_first(struct throtl_service_queue *parent_sq)
620 {
621         /* Service tree is empty */
622         if (!parent_sq->nr_pending)
623                 return NULL;
624
625         if (!parent_sq->first_pending)
626                 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
627
628         if (parent_sq->first_pending)
629                 return rb_entry_tg(parent_sq->first_pending);
630
631         return NULL;
632 }
633
634 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
635 {
636         rb_erase(n, root);
637         RB_CLEAR_NODE(n);
638 }
639
640 static void throtl_rb_erase(struct rb_node *n,
641                             struct throtl_service_queue *parent_sq)
642 {
643         if (parent_sq->first_pending == n)
644                 parent_sq->first_pending = NULL;
645         rb_erase_init(n, &parent_sq->pending_tree);
646         --parent_sq->nr_pending;
647 }
648
649 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
650 {
651         struct throtl_grp *tg;
652
653         tg = throtl_rb_first(parent_sq);
654         if (!tg)
655                 return;
656
657         parent_sq->first_pending_disptime = tg->disptime;
658 }
659
660 static void tg_service_queue_add(struct throtl_grp *tg)
661 {
662         struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
663         struct rb_node **node = &parent_sq->pending_tree.rb_node;
664         struct rb_node *parent = NULL;
665         struct throtl_grp *__tg;
666         unsigned long key = tg->disptime;
667         int left = 1;
668
669         while (*node != NULL) {
670                 parent = *node;
671                 __tg = rb_entry_tg(parent);
672
673                 if (time_before(key, __tg->disptime))
674                         node = &parent->rb_left;
675                 else {
676                         node = &parent->rb_right;
677                         left = 0;
678                 }
679         }
680
681         if (left)
682                 parent_sq->first_pending = &tg->rb_node;
683
684         rb_link_node(&tg->rb_node, parent, node);
685         rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
686 }
687
688 static void __throtl_enqueue_tg(struct throtl_grp *tg)
689 {
690         tg_service_queue_add(tg);
691         tg->flags |= THROTL_TG_PENDING;
692         tg->service_queue.parent_sq->nr_pending++;
693 }
694
695 static void throtl_enqueue_tg(struct throtl_grp *tg)
696 {
697         if (!(tg->flags & THROTL_TG_PENDING))
698                 __throtl_enqueue_tg(tg);
699 }
700
701 static void __throtl_dequeue_tg(struct throtl_grp *tg)
702 {
703         throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
704         tg->flags &= ~THROTL_TG_PENDING;
705 }
706
707 static void throtl_dequeue_tg(struct throtl_grp *tg)
708 {
709         if (tg->flags & THROTL_TG_PENDING)
710                 __throtl_dequeue_tg(tg);
711 }
712
713 /* Call with queue lock held */
714 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
715                                           unsigned long expires)
716 {
717         unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
718
719         /*
720          * Since we are adjusting the throttle limit dynamically, the sleep
721          * time calculated according to previous limit might be invalid. It's
722          * possible the cgroup sleep time is very long and no other cgroups
723          * have IO running so notify the limit changes. Make sure the cgroup
724          * doesn't sleep too long to avoid the missed notification.
725          */
726         if (time_after(expires, max_expire))
727                 expires = max_expire;
728         mod_timer(&sq->pending_timer, expires);
729         throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
730                    expires - jiffies, jiffies);
731 }
732
733 /**
734  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
735  * @sq: the service_queue to schedule dispatch for
736  * @force: force scheduling
737  *
738  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
739  * dispatch time of the first pending child.  Returns %true if either timer
740  * is armed or there's no pending child left.  %false if the current
741  * dispatch window is still open and the caller should continue
742  * dispatching.
743  *
744  * If @force is %true, the dispatch timer is always scheduled and this
745  * function is guaranteed to return %true.  This is to be used when the
746  * caller can't dispatch itself and needs to invoke pending_timer
747  * unconditionally.  Note that forced scheduling is likely to induce short
748  * delay before dispatch starts even if @sq->first_pending_disptime is not
749  * in the future and thus shouldn't be used in hot paths.
750  */
751 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
752                                           bool force)
753 {
754         /* any pending children left? */
755         if (!sq->nr_pending)
756                 return true;
757
758         update_min_dispatch_time(sq);
759
760         /* is the next dispatch time in the future? */
761         if (force || time_after(sq->first_pending_disptime, jiffies)) {
762                 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
763                 return true;
764         }
765
766         /* tell the caller to continue dispatching */
767         return false;
768 }
769
770 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
771                 bool rw, unsigned long start)
772 {
773         tg->bytes_disp[rw] = 0;
774         tg->io_disp[rw] = 0;
775
776         /*
777          * Previous slice has expired. We must have trimmed it after last
778          * bio dispatch. That means since start of last slice, we never used
779          * that bandwidth. Do try to make use of that bandwidth while giving
780          * credit.
781          */
782         if (time_after_eq(start, tg->slice_start[rw]))
783                 tg->slice_start[rw] = start;
784
785         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
786         throtl_log(&tg->service_queue,
787                    "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
788                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
789                    tg->slice_end[rw], jiffies);
790 }
791
792 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
793 {
794         tg->bytes_disp[rw] = 0;
795         tg->io_disp[rw] = 0;
796         tg->slice_start[rw] = jiffies;
797         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
798         throtl_log(&tg->service_queue,
799                    "[%c] new slice start=%lu end=%lu jiffies=%lu",
800                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
801                    tg->slice_end[rw], jiffies);
802 }
803
804 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
805                                         unsigned long jiffy_end)
806 {
807         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
808 }
809
810 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
811                                        unsigned long jiffy_end)
812 {
813         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
814         throtl_log(&tg->service_queue,
815                    "[%c] extend slice start=%lu end=%lu jiffies=%lu",
816                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
817                    tg->slice_end[rw], jiffies);
818 }
819
820 /* Determine if previously allocated or extended slice is complete or not */
821 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
822 {
823         if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
824                 return false;
825
826         return 1;
827 }
828
829 /* Trim the used slices and adjust slice start accordingly */
830 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
831 {
832         unsigned long nr_slices, time_elapsed, io_trim;
833         u64 bytes_trim, tmp;
834
835         BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
836
837         /*
838          * If bps are unlimited (-1), then time slice don't get
839          * renewed. Don't try to trim the slice if slice is used. A new
840          * slice will start when appropriate.
841          */
842         if (throtl_slice_used(tg, rw))
843                 return;
844
845         /*
846          * A bio has been dispatched. Also adjust slice_end. It might happen
847          * that initially cgroup limit was very low resulting in high
848          * slice_end, but later limit was bumped up and bio was dispached
849          * sooner, then we need to reduce slice_end. A high bogus slice_end
850          * is bad because it does not allow new slice to start.
851          */
852
853         throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
854
855         time_elapsed = jiffies - tg->slice_start[rw];
856
857         nr_slices = time_elapsed / tg->td->throtl_slice;
858
859         if (!nr_slices)
860                 return;
861         tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
862         do_div(tmp, HZ);
863         bytes_trim = tmp;
864
865         io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
866                 HZ;
867
868         if (!bytes_trim && !io_trim)
869                 return;
870
871         if (tg->bytes_disp[rw] >= bytes_trim)
872                 tg->bytes_disp[rw] -= bytes_trim;
873         else
874                 tg->bytes_disp[rw] = 0;
875
876         if (tg->io_disp[rw] >= io_trim)
877                 tg->io_disp[rw] -= io_trim;
878         else
879                 tg->io_disp[rw] = 0;
880
881         tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
882
883         throtl_log(&tg->service_queue,
884                    "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
885                    rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
886                    tg->slice_start[rw], tg->slice_end[rw], jiffies);
887 }
888
889 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
890                                   unsigned long *wait)
891 {
892         bool rw = bio_data_dir(bio);
893         unsigned int io_allowed;
894         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
895         u64 tmp;
896
897         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
898
899         /* Slice has just started. Consider one slice interval */
900         if (!jiffy_elapsed)
901                 jiffy_elapsed_rnd = tg->td->throtl_slice;
902
903         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
904
905         /*
906          * jiffy_elapsed_rnd should not be a big value as minimum iops can be
907          * 1 then at max jiffy elapsed should be equivalent of 1 second as we
908          * will allow dispatch after 1 second and after that slice should
909          * have been trimmed.
910          */
911
912         tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
913         do_div(tmp, HZ);
914
915         if (tmp > UINT_MAX)
916                 io_allowed = UINT_MAX;
917         else
918                 io_allowed = tmp;
919
920         if (tg->io_disp[rw] + 1 <= io_allowed) {
921                 if (wait)
922                         *wait = 0;
923                 return true;
924         }
925
926         /* Calc approx time to dispatch */
927         jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
928
929         if (jiffy_wait > jiffy_elapsed)
930                 jiffy_wait = jiffy_wait - jiffy_elapsed;
931         else
932                 jiffy_wait = 1;
933
934         if (wait)
935                 *wait = jiffy_wait;
936         return 0;
937 }
938
939 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
940                                  unsigned long *wait)
941 {
942         bool rw = bio_data_dir(bio);
943         u64 bytes_allowed, extra_bytes, tmp;
944         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
945         unsigned int bio_size = throtl_bio_data_size(bio);
946
947         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
948
949         /* Slice has just started. Consider one slice interval */
950         if (!jiffy_elapsed)
951                 jiffy_elapsed_rnd = tg->td->throtl_slice;
952
953         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
954
955         tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
956         do_div(tmp, HZ);
957         bytes_allowed = tmp;
958
959         if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
960                 if (wait)
961                         *wait = 0;
962                 return true;
963         }
964
965         /* Calc approx time to dispatch */
966         extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
967         jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
968
969         if (!jiffy_wait)
970                 jiffy_wait = 1;
971
972         /*
973          * This wait time is without taking into consideration the rounding
974          * up we did. Add that time also.
975          */
976         jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
977         if (wait)
978                 *wait = jiffy_wait;
979         return 0;
980 }
981
982 /*
983  * Returns whether one can dispatch a bio or not. Also returns approx number
984  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
985  */
986 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
987                             unsigned long *wait)
988 {
989         bool rw = bio_data_dir(bio);
990         unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
991
992         /*
993          * Currently whole state machine of group depends on first bio
994          * queued in the group bio list. So one should not be calling
995          * this function with a different bio if there are other bios
996          * queued.
997          */
998         BUG_ON(tg->service_queue.nr_queued[rw] &&
999                bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
1000
1001         /* If tg->bps = -1, then BW is unlimited */
1002         if (tg_bps_limit(tg, rw) == U64_MAX &&
1003             tg_iops_limit(tg, rw) == UINT_MAX) {
1004                 if (wait)
1005                         *wait = 0;
1006                 return true;
1007         }
1008
1009         /*
1010          * If previous slice expired, start a new one otherwise renew/extend
1011          * existing slice to make sure it is at least throtl_slice interval
1012          * long since now. New slice is started only for empty throttle group.
1013          * If there is queued bio, that means there should be an active
1014          * slice and it should be extended instead.
1015          */
1016         if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1017                 throtl_start_new_slice(tg, rw);
1018         else {
1019                 if (time_before(tg->slice_end[rw],
1020                     jiffies + tg->td->throtl_slice))
1021                         throtl_extend_slice(tg, rw,
1022                                 jiffies + tg->td->throtl_slice);
1023         }
1024
1025         if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1026             tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1027                 if (wait)
1028                         *wait = 0;
1029                 return 1;
1030         }
1031
1032         max_wait = max(bps_wait, iops_wait);
1033
1034         if (wait)
1035                 *wait = max_wait;
1036
1037         if (time_before(tg->slice_end[rw], jiffies + max_wait))
1038                 throtl_extend_slice(tg, rw, jiffies + max_wait);
1039
1040         return 0;
1041 }
1042
1043 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1044 {
1045         bool rw = bio_data_dir(bio);
1046         unsigned int bio_size = throtl_bio_data_size(bio);
1047
1048         /* Charge the bio to the group */
1049         tg->bytes_disp[rw] += bio_size;
1050         tg->io_disp[rw]++;
1051         tg->last_bytes_disp[rw] += bio_size;
1052         tg->last_io_disp[rw]++;
1053
1054         /*
1055          * BIO_THROTTLED is used to prevent the same bio to be throttled
1056          * more than once as a throttled bio will go through blk-throtl the
1057          * second time when it eventually gets issued.  Set it when a bio
1058          * is being charged to a tg.
1059          */
1060         if (!bio_flagged(bio, BIO_THROTTLED))
1061                 bio_set_flag(bio, BIO_THROTTLED);
1062 }
1063
1064 /**
1065  * throtl_add_bio_tg - add a bio to the specified throtl_grp
1066  * @bio: bio to add
1067  * @qn: qnode to use
1068  * @tg: the target throtl_grp
1069  *
1070  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1071  * tg->qnode_on_self[] is used.
1072  */
1073 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1074                               struct throtl_grp *tg)
1075 {
1076         struct throtl_service_queue *sq = &tg->service_queue;
1077         bool rw = bio_data_dir(bio);
1078
1079         if (!qn)
1080                 qn = &tg->qnode_on_self[rw];
1081
1082         /*
1083          * If @tg doesn't currently have any bios queued in the same
1084          * direction, queueing @bio can change when @tg should be
1085          * dispatched.  Mark that @tg was empty.  This is automatically
1086          * cleaered on the next tg_update_disptime().
1087          */
1088         if (!sq->nr_queued[rw])
1089                 tg->flags |= THROTL_TG_WAS_EMPTY;
1090
1091         throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1092
1093         sq->nr_queued[rw]++;
1094         throtl_enqueue_tg(tg);
1095 }
1096
1097 static void tg_update_disptime(struct throtl_grp *tg)
1098 {
1099         struct throtl_service_queue *sq = &tg->service_queue;
1100         unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1101         struct bio *bio;
1102
1103         bio = throtl_peek_queued(&sq->queued[READ]);
1104         if (bio)
1105                 tg_may_dispatch(tg, bio, &read_wait);
1106
1107         bio = throtl_peek_queued(&sq->queued[WRITE]);
1108         if (bio)
1109                 tg_may_dispatch(tg, bio, &write_wait);
1110
1111         min_wait = min(read_wait, write_wait);
1112         disptime = jiffies + min_wait;
1113
1114         /* Update dispatch time */
1115         throtl_dequeue_tg(tg);
1116         tg->disptime = disptime;
1117         throtl_enqueue_tg(tg);
1118
1119         /* see throtl_add_bio_tg() */
1120         tg->flags &= ~THROTL_TG_WAS_EMPTY;
1121 }
1122
1123 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1124                                         struct throtl_grp *parent_tg, bool rw)
1125 {
1126         if (throtl_slice_used(parent_tg, rw)) {
1127                 throtl_start_new_slice_with_credit(parent_tg, rw,
1128                                 child_tg->slice_start[rw]);
1129         }
1130
1131 }
1132
1133 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1134 {
1135         struct throtl_service_queue *sq = &tg->service_queue;
1136         struct throtl_service_queue *parent_sq = sq->parent_sq;
1137         struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1138         struct throtl_grp *tg_to_put = NULL;
1139         struct bio *bio;
1140
1141         /*
1142          * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1143          * from @tg may put its reference and @parent_sq might end up
1144          * getting released prematurely.  Remember the tg to put and put it
1145          * after @bio is transferred to @parent_sq.
1146          */
1147         bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1148         sq->nr_queued[rw]--;
1149
1150         throtl_charge_bio(tg, bio);
1151
1152         /*
1153          * If our parent is another tg, we just need to transfer @bio to
1154          * the parent using throtl_add_bio_tg().  If our parent is
1155          * @td->service_queue, @bio is ready to be issued.  Put it on its
1156          * bio_lists[] and decrease total number queued.  The caller is
1157          * responsible for issuing these bios.
1158          */
1159         if (parent_tg) {
1160                 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1161                 start_parent_slice_with_credit(tg, parent_tg, rw);
1162         } else {
1163                 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1164                                      &parent_sq->queued[rw]);
1165                 BUG_ON(tg->td->nr_queued[rw] <= 0);
1166                 tg->td->nr_queued[rw]--;
1167         }
1168
1169         throtl_trim_slice(tg, rw);
1170
1171         if (tg_to_put)
1172                 blkg_put(tg_to_blkg(tg_to_put));
1173 }
1174
1175 static int throtl_dispatch_tg(struct throtl_grp *tg)
1176 {
1177         struct throtl_service_queue *sq = &tg->service_queue;
1178         unsigned int nr_reads = 0, nr_writes = 0;
1179         unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1180         unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1181         struct bio *bio;
1182
1183         /* Try to dispatch 75% READS and 25% WRITES */
1184
1185         while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1186                tg_may_dispatch(tg, bio, NULL)) {
1187
1188                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1189                 nr_reads++;
1190
1191                 if (nr_reads >= max_nr_reads)
1192                         break;
1193         }
1194
1195         while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1196                tg_may_dispatch(tg, bio, NULL)) {
1197
1198                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1199                 nr_writes++;
1200
1201                 if (nr_writes >= max_nr_writes)
1202                         break;
1203         }
1204
1205         return nr_reads + nr_writes;
1206 }
1207
1208 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1209 {
1210         unsigned int nr_disp = 0;
1211
1212         while (1) {
1213                 struct throtl_grp *tg = throtl_rb_first(parent_sq);
1214                 struct throtl_service_queue *sq = &tg->service_queue;
1215
1216                 if (!tg)
1217                         break;
1218
1219                 if (time_before(jiffies, tg->disptime))
1220                         break;
1221
1222                 throtl_dequeue_tg(tg);
1223
1224                 nr_disp += throtl_dispatch_tg(tg);
1225
1226                 if (sq->nr_queued[0] || sq->nr_queued[1])
1227                         tg_update_disptime(tg);
1228
1229                 if (nr_disp >= throtl_quantum)
1230                         break;
1231         }
1232
1233         return nr_disp;
1234 }
1235
1236 static bool throtl_can_upgrade(struct throtl_data *td,
1237         struct throtl_grp *this_tg);
1238 /**
1239  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1240  * @arg: the throtl_service_queue being serviced
1241  *
1242  * This timer is armed when a child throtl_grp with active bio's become
1243  * pending and queued on the service_queue's pending_tree and expires when
1244  * the first child throtl_grp should be dispatched.  This function
1245  * dispatches bio's from the children throtl_grps to the parent
1246  * service_queue.
1247  *
1248  * If the parent's parent is another throtl_grp, dispatching is propagated
1249  * by either arming its pending_timer or repeating dispatch directly.  If
1250  * the top-level service_tree is reached, throtl_data->dispatch_work is
1251  * kicked so that the ready bio's are issued.
1252  */
1253 static void throtl_pending_timer_fn(unsigned long arg)
1254 {
1255         struct throtl_service_queue *sq = (void *)arg;
1256         struct throtl_grp *tg = sq_to_tg(sq);
1257         struct throtl_data *td = sq_to_td(sq);
1258         struct request_queue *q = td->queue;
1259         struct throtl_service_queue *parent_sq;
1260         bool dispatched;
1261         int ret;
1262
1263         spin_lock_irq(q->queue_lock);
1264         if (throtl_can_upgrade(td, NULL))
1265                 throtl_upgrade_state(td);
1266
1267 again:
1268         parent_sq = sq->parent_sq;
1269         dispatched = false;
1270
1271         while (true) {
1272                 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1273                            sq->nr_queued[READ] + sq->nr_queued[WRITE],
1274                            sq->nr_queued[READ], sq->nr_queued[WRITE]);
1275
1276                 ret = throtl_select_dispatch(sq);
1277                 if (ret) {
1278                         throtl_log(sq, "bios disp=%u", ret);
1279                         dispatched = true;
1280                 }
1281
1282                 if (throtl_schedule_next_dispatch(sq, false))
1283                         break;
1284
1285                 /* this dispatch windows is still open, relax and repeat */
1286                 spin_unlock_irq(q->queue_lock);
1287                 cpu_relax();
1288                 spin_lock_irq(q->queue_lock);
1289         }
1290
1291         if (!dispatched)
1292                 goto out_unlock;
1293
1294         if (parent_sq) {
1295                 /* @parent_sq is another throl_grp, propagate dispatch */
1296                 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1297                         tg_update_disptime(tg);
1298                         if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1299                                 /* window is already open, repeat dispatching */
1300                                 sq = parent_sq;
1301                                 tg = sq_to_tg(sq);
1302                                 goto again;
1303                         }
1304                 }
1305         } else {
1306                 /* reached the top-level, queue issueing */
1307                 queue_work(kthrotld_workqueue, &td->dispatch_work);
1308         }
1309 out_unlock:
1310         spin_unlock_irq(q->queue_lock);
1311 }
1312
1313 /**
1314  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1315  * @work: work item being executed
1316  *
1317  * This function is queued for execution when bio's reach the bio_lists[]
1318  * of throtl_data->service_queue.  Those bio's are ready and issued by this
1319  * function.
1320  */
1321 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1322 {
1323         struct throtl_data *td = container_of(work, struct throtl_data,
1324                                               dispatch_work);
1325         struct throtl_service_queue *td_sq = &td->service_queue;
1326         struct request_queue *q = td->queue;
1327         struct bio_list bio_list_on_stack;
1328         struct bio *bio;
1329         struct blk_plug plug;
1330         int rw;
1331
1332         bio_list_init(&bio_list_on_stack);
1333
1334         spin_lock_irq(q->queue_lock);
1335         for (rw = READ; rw <= WRITE; rw++)
1336                 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1337                         bio_list_add(&bio_list_on_stack, bio);
1338         spin_unlock_irq(q->queue_lock);
1339
1340         if (!bio_list_empty(&bio_list_on_stack)) {
1341                 blk_start_plug(&plug);
1342                 while((bio = bio_list_pop(&bio_list_on_stack)))
1343                         generic_make_request(bio);
1344                 blk_finish_plug(&plug);
1345         }
1346 }
1347
1348 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1349                               int off)
1350 {
1351         struct throtl_grp *tg = pd_to_tg(pd);
1352         u64 v = *(u64 *)((void *)tg + off);
1353
1354         if (v == U64_MAX)
1355                 return 0;
1356         return __blkg_prfill_u64(sf, pd, v);
1357 }
1358
1359 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1360                                int off)
1361 {
1362         struct throtl_grp *tg = pd_to_tg(pd);
1363         unsigned int v = *(unsigned int *)((void *)tg + off);
1364
1365         if (v == UINT_MAX)
1366                 return 0;
1367         return __blkg_prfill_u64(sf, pd, v);
1368 }
1369
1370 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1371 {
1372         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1373                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1374         return 0;
1375 }
1376
1377 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1378 {
1379         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1380                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1381         return 0;
1382 }
1383
1384 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1385 {
1386         struct throtl_service_queue *sq = &tg->service_queue;
1387         struct cgroup_subsys_state *pos_css;
1388         struct blkcg_gq *blkg;
1389
1390         throtl_log(&tg->service_queue,
1391                    "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1392                    tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1393                    tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1394
1395         /*
1396          * Update has_rules[] flags for the updated tg's subtree.  A tg is
1397          * considered to have rules if either the tg itself or any of its
1398          * ancestors has rules.  This identifies groups without any
1399          * restrictions in the whole hierarchy and allows them to bypass
1400          * blk-throttle.
1401          */
1402         blkg_for_each_descendant_pre(blkg, pos_css,
1403                         global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1404                 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1405                 struct throtl_grp *parent_tg;
1406
1407                 tg_update_has_rules(this_tg);
1408                 /* ignore root/second level */
1409                 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1410                     !blkg->parent->parent)
1411                         continue;
1412                 parent_tg = blkg_to_tg(blkg->parent);
1413                 /*
1414                  * make sure all children has lower idle time threshold and
1415                  * higher latency target
1416                  */
1417                 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1418                                 parent_tg->idletime_threshold);
1419                 this_tg->latency_target = max(this_tg->latency_target,
1420                                 parent_tg->latency_target);
1421         }
1422
1423         /*
1424          * We're already holding queue_lock and know @tg is valid.  Let's
1425          * apply the new config directly.
1426          *
1427          * Restart the slices for both READ and WRITES. It might happen
1428          * that a group's limit are dropped suddenly and we don't want to
1429          * account recently dispatched IO with new low rate.
1430          */
1431         throtl_start_new_slice(tg, 0);
1432         throtl_start_new_slice(tg, 1);
1433
1434         if (tg->flags & THROTL_TG_PENDING) {
1435                 tg_update_disptime(tg);
1436                 throtl_schedule_next_dispatch(sq->parent_sq, true);
1437         }
1438 }
1439
1440 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1441                            char *buf, size_t nbytes, loff_t off, bool is_u64)
1442 {
1443         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1444         struct blkg_conf_ctx ctx;
1445         struct throtl_grp *tg;
1446         int ret;
1447         u64 v;
1448
1449         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1450         if (ret)
1451                 return ret;
1452
1453         ret = -EINVAL;
1454         if (sscanf(ctx.body, "%llu", &v) != 1)
1455                 goto out_finish;
1456         if (!v)
1457                 v = U64_MAX;
1458
1459         tg = blkg_to_tg(ctx.blkg);
1460
1461         if (is_u64)
1462                 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1463         else
1464                 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1465
1466         tg_conf_updated(tg, false);
1467         ret = 0;
1468 out_finish:
1469         blkg_conf_finish(&ctx);
1470         return ret ?: nbytes;
1471 }
1472
1473 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1474                                char *buf, size_t nbytes, loff_t off)
1475 {
1476         return tg_set_conf(of, buf, nbytes, off, true);
1477 }
1478
1479 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1480                                 char *buf, size_t nbytes, loff_t off)
1481 {
1482         return tg_set_conf(of, buf, nbytes, off, false);
1483 }
1484
1485 static struct cftype throtl_legacy_files[] = {
1486         {
1487                 .name = "throttle.read_bps_device",
1488                 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1489                 .seq_show = tg_print_conf_u64,
1490                 .write = tg_set_conf_u64,
1491         },
1492         {
1493                 .name = "throttle.write_bps_device",
1494                 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1495                 .seq_show = tg_print_conf_u64,
1496                 .write = tg_set_conf_u64,
1497         },
1498         {
1499                 .name = "throttle.read_iops_device",
1500                 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1501                 .seq_show = tg_print_conf_uint,
1502                 .write = tg_set_conf_uint,
1503         },
1504         {
1505                 .name = "throttle.write_iops_device",
1506                 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1507                 .seq_show = tg_print_conf_uint,
1508                 .write = tg_set_conf_uint,
1509         },
1510         {
1511                 .name = "throttle.io_service_bytes",
1512                 .private = (unsigned long)&blkcg_policy_throtl,
1513                 .seq_show = blkg_print_stat_bytes,
1514         },
1515         {
1516                 .name = "throttle.io_serviced",
1517                 .private = (unsigned long)&blkcg_policy_throtl,
1518                 .seq_show = blkg_print_stat_ios,
1519         },
1520         { }     /* terminate */
1521 };
1522
1523 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1524                          int off)
1525 {
1526         struct throtl_grp *tg = pd_to_tg(pd);
1527         const char *dname = blkg_dev_name(pd->blkg);
1528         char bufs[4][21] = { "max", "max", "max", "max" };
1529         u64 bps_dft;
1530         unsigned int iops_dft;
1531         char idle_time[26] = "";
1532         char latency_time[26] = "";
1533
1534         if (!dname)
1535                 return 0;
1536
1537         if (off == LIMIT_LOW) {
1538                 bps_dft = 0;
1539                 iops_dft = 0;
1540         } else {
1541                 bps_dft = U64_MAX;
1542                 iops_dft = UINT_MAX;
1543         }
1544
1545         if (tg->bps_conf[READ][off] == bps_dft &&
1546             tg->bps_conf[WRITE][off] == bps_dft &&
1547             tg->iops_conf[READ][off] == iops_dft &&
1548             tg->iops_conf[WRITE][off] == iops_dft &&
1549             (off != LIMIT_LOW ||
1550              (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1551               tg->latency_target_conf == DFL_LATENCY_TARGET)))
1552                 return 0;
1553
1554         if (tg->bps_conf[READ][off] != U64_MAX)
1555                 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1556                         tg->bps_conf[READ][off]);
1557         if (tg->bps_conf[WRITE][off] != U64_MAX)
1558                 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1559                         tg->bps_conf[WRITE][off]);
1560         if (tg->iops_conf[READ][off] != UINT_MAX)
1561                 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1562                         tg->iops_conf[READ][off]);
1563         if (tg->iops_conf[WRITE][off] != UINT_MAX)
1564                 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1565                         tg->iops_conf[WRITE][off]);
1566         if (off == LIMIT_LOW) {
1567                 if (tg->idletime_threshold_conf == ULONG_MAX)
1568                         strcpy(idle_time, " idle=max");
1569                 else
1570                         snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1571                                 tg->idletime_threshold_conf);
1572
1573                 if (tg->latency_target_conf == ULONG_MAX)
1574                         strcpy(latency_time, " latency=max");
1575                 else
1576                         snprintf(latency_time, sizeof(latency_time),
1577                                 " latency=%lu", tg->latency_target_conf);
1578         }
1579
1580         seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1581                    dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1582                    latency_time);
1583         return 0;
1584 }
1585
1586 static int tg_print_limit(struct seq_file *sf, void *v)
1587 {
1588         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1589                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1590         return 0;
1591 }
1592
1593 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1594                           char *buf, size_t nbytes, loff_t off)
1595 {
1596         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1597         struct blkg_conf_ctx ctx;
1598         struct throtl_grp *tg;
1599         u64 v[4];
1600         unsigned long idle_time;
1601         unsigned long latency_time;
1602         int ret;
1603         int index = of_cft(of)->private;
1604
1605         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1606         if (ret)
1607                 return ret;
1608
1609         tg = blkg_to_tg(ctx.blkg);
1610
1611         v[0] = tg->bps_conf[READ][index];
1612         v[1] = tg->bps_conf[WRITE][index];
1613         v[2] = tg->iops_conf[READ][index];
1614         v[3] = tg->iops_conf[WRITE][index];
1615
1616         idle_time = tg->idletime_threshold_conf;
1617         latency_time = tg->latency_target_conf;
1618         while (true) {
1619                 char tok[27];   /* wiops=18446744073709551616 */
1620                 char *p;
1621                 u64 val = U64_MAX;
1622                 int len;
1623
1624                 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1625                         break;
1626                 if (tok[0] == '\0')
1627                         break;
1628                 ctx.body += len;
1629
1630                 ret = -EINVAL;
1631                 p = tok;
1632                 strsep(&p, "=");
1633                 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1634                         goto out_finish;
1635
1636                 ret = -ERANGE;
1637                 if (!val)
1638                         goto out_finish;
1639
1640                 ret = -EINVAL;
1641                 if (!strcmp(tok, "rbps"))
1642                         v[0] = val;
1643                 else if (!strcmp(tok, "wbps"))
1644                         v[1] = val;
1645                 else if (!strcmp(tok, "riops"))
1646                         v[2] = min_t(u64, val, UINT_MAX);
1647                 else if (!strcmp(tok, "wiops"))
1648                         v[3] = min_t(u64, val, UINT_MAX);
1649                 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1650                         idle_time = val;
1651                 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1652                         latency_time = val;
1653                 else
1654                         goto out_finish;
1655         }
1656
1657         tg->bps_conf[READ][index] = v[0];
1658         tg->bps_conf[WRITE][index] = v[1];
1659         tg->iops_conf[READ][index] = v[2];
1660         tg->iops_conf[WRITE][index] = v[3];
1661
1662         if (index == LIMIT_MAX) {
1663                 tg->bps[READ][index] = v[0];
1664                 tg->bps[WRITE][index] = v[1];
1665                 tg->iops[READ][index] = v[2];
1666                 tg->iops[WRITE][index] = v[3];
1667         }
1668         tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1669                 tg->bps_conf[READ][LIMIT_MAX]);
1670         tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1671                 tg->bps_conf[WRITE][LIMIT_MAX]);
1672         tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1673                 tg->iops_conf[READ][LIMIT_MAX]);
1674         tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1675                 tg->iops_conf[WRITE][LIMIT_MAX]);
1676         tg->idletime_threshold_conf = idle_time;
1677         tg->latency_target_conf = latency_time;
1678
1679         /* force user to configure all settings for low limit  */
1680         if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1681               tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1682             tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1683             tg->latency_target_conf == DFL_LATENCY_TARGET) {
1684                 tg->bps[READ][LIMIT_LOW] = 0;
1685                 tg->bps[WRITE][LIMIT_LOW] = 0;
1686                 tg->iops[READ][LIMIT_LOW] = 0;
1687                 tg->iops[WRITE][LIMIT_LOW] = 0;
1688                 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1689                 tg->latency_target = DFL_LATENCY_TARGET;
1690         } else if (index == LIMIT_LOW) {
1691                 tg->idletime_threshold = tg->idletime_threshold_conf;
1692                 tg->latency_target = tg->latency_target_conf;
1693         }
1694
1695         blk_throtl_update_limit_valid(tg->td);
1696         if (tg->td->limit_valid[LIMIT_LOW]) {
1697                 if (index == LIMIT_LOW)
1698                         tg->td->limit_index = LIMIT_LOW;
1699         } else
1700                 tg->td->limit_index = LIMIT_MAX;
1701         tg_conf_updated(tg, index == LIMIT_LOW &&
1702                 tg->td->limit_valid[LIMIT_LOW]);
1703         ret = 0;
1704 out_finish:
1705         blkg_conf_finish(&ctx);
1706         return ret ?: nbytes;
1707 }
1708
1709 static struct cftype throtl_files[] = {
1710 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1711         {
1712                 .name = "low",
1713                 .flags = CFTYPE_NOT_ON_ROOT,
1714                 .seq_show = tg_print_limit,
1715                 .write = tg_set_limit,
1716                 .private = LIMIT_LOW,
1717         },
1718 #endif
1719         {
1720                 .name = "max",
1721                 .flags = CFTYPE_NOT_ON_ROOT,
1722                 .seq_show = tg_print_limit,
1723                 .write = tg_set_limit,
1724                 .private = LIMIT_MAX,
1725         },
1726         { }     /* terminate */
1727 };
1728
1729 static void throtl_shutdown_wq(struct request_queue *q)
1730 {
1731         struct throtl_data *td = q->td;
1732
1733         cancel_work_sync(&td->dispatch_work);
1734 }
1735
1736 static struct blkcg_policy blkcg_policy_throtl = {
1737         .dfl_cftypes            = throtl_files,
1738         .legacy_cftypes         = throtl_legacy_files,
1739
1740         .pd_alloc_fn            = throtl_pd_alloc,
1741         .pd_init_fn             = throtl_pd_init,
1742         .pd_online_fn           = throtl_pd_online,
1743         .pd_offline_fn          = throtl_pd_offline,
1744         .pd_free_fn             = throtl_pd_free,
1745 };
1746
1747 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1748 {
1749         unsigned long rtime = jiffies, wtime = jiffies;
1750
1751         if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1752                 rtime = tg->last_low_overflow_time[READ];
1753         if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1754                 wtime = tg->last_low_overflow_time[WRITE];
1755         return min(rtime, wtime);
1756 }
1757
1758 /* tg should not be an intermediate node */
1759 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1760 {
1761         struct throtl_service_queue *parent_sq;
1762         struct throtl_grp *parent = tg;
1763         unsigned long ret = __tg_last_low_overflow_time(tg);
1764
1765         while (true) {
1766                 parent_sq = parent->service_queue.parent_sq;
1767                 parent = sq_to_tg(parent_sq);
1768                 if (!parent)
1769                         break;
1770
1771                 /*
1772                  * The parent doesn't have low limit, it always reaches low
1773                  * limit. Its overflow time is useless for children
1774                  */
1775                 if (!parent->bps[READ][LIMIT_LOW] &&
1776                     !parent->iops[READ][LIMIT_LOW] &&
1777                     !parent->bps[WRITE][LIMIT_LOW] &&
1778                     !parent->iops[WRITE][LIMIT_LOW])
1779                         continue;
1780                 if (time_after(__tg_last_low_overflow_time(parent), ret))
1781                         ret = __tg_last_low_overflow_time(parent);
1782         }
1783         return ret;
1784 }
1785
1786 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1787 {
1788         /*
1789          * cgroup is idle if:
1790          * - single idle is too long, longer than a fixed value (in case user
1791          *   configure a too big threshold) or 4 times of idletime threshold
1792          * - average think time is more than threshold
1793          * - IO latency is largely below threshold
1794          */
1795         unsigned long time;
1796         bool ret;
1797
1798         time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1799         ret = tg->latency_target == DFL_LATENCY_TARGET ||
1800               tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1801               (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1802               tg->avg_idletime > tg->idletime_threshold ||
1803               (tg->latency_target && tg->bio_cnt &&
1804                 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1805         throtl_log(&tg->service_queue,
1806                 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1807                 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1808                 tg->bio_cnt, ret, tg->td->scale);
1809         return ret;
1810 }
1811
1812 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1813 {
1814         struct throtl_service_queue *sq = &tg->service_queue;
1815         bool read_limit, write_limit;
1816
1817         /*
1818          * if cgroup reaches low limit (if low limit is 0, the cgroup always
1819          * reaches), it's ok to upgrade to next limit
1820          */
1821         read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1822         write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1823         if (!read_limit && !write_limit)
1824                 return true;
1825         if (read_limit && sq->nr_queued[READ] &&
1826             (!write_limit || sq->nr_queued[WRITE]))
1827                 return true;
1828         if (write_limit && sq->nr_queued[WRITE] &&
1829             (!read_limit || sq->nr_queued[READ]))
1830                 return true;
1831
1832         if (time_after_eq(jiffies,
1833                 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1834             throtl_tg_is_idle(tg))
1835                 return true;
1836         return false;
1837 }
1838
1839 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1840 {
1841         while (true) {
1842                 if (throtl_tg_can_upgrade(tg))
1843                         return true;
1844                 tg = sq_to_tg(tg->service_queue.parent_sq);
1845                 if (!tg || !tg_to_blkg(tg)->parent)
1846                         return false;
1847         }
1848         return false;
1849 }
1850
1851 static bool throtl_can_upgrade(struct throtl_data *td,
1852         struct throtl_grp *this_tg)
1853 {
1854         struct cgroup_subsys_state *pos_css;
1855         struct blkcg_gq *blkg;
1856
1857         if (td->limit_index != LIMIT_LOW)
1858                 return false;
1859
1860         if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1861                 return false;
1862
1863         rcu_read_lock();
1864         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1865                 struct throtl_grp *tg = blkg_to_tg(blkg);
1866
1867                 if (tg == this_tg)
1868                         continue;
1869                 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1870                         continue;
1871                 if (!throtl_hierarchy_can_upgrade(tg)) {
1872                         rcu_read_unlock();
1873                         return false;
1874                 }
1875         }
1876         rcu_read_unlock();
1877         return true;
1878 }
1879
1880 static void throtl_upgrade_check(struct throtl_grp *tg)
1881 {
1882         unsigned long now = jiffies;
1883
1884         if (tg->td->limit_index != LIMIT_LOW)
1885                 return;
1886
1887         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1888                 return;
1889
1890         tg->last_check_time = now;
1891
1892         if (!time_after_eq(now,
1893              __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1894                 return;
1895
1896         if (throtl_can_upgrade(tg->td, NULL))
1897                 throtl_upgrade_state(tg->td);
1898 }
1899
1900 static void throtl_upgrade_state(struct throtl_data *td)
1901 {
1902         struct cgroup_subsys_state *pos_css;
1903         struct blkcg_gq *blkg;
1904
1905         throtl_log(&td->service_queue, "upgrade to max");
1906         td->limit_index = LIMIT_MAX;
1907         td->low_upgrade_time = jiffies;
1908         td->scale = 0;
1909         rcu_read_lock();
1910         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1911                 struct throtl_grp *tg = blkg_to_tg(blkg);
1912                 struct throtl_service_queue *sq = &tg->service_queue;
1913
1914                 tg->disptime = jiffies - 1;
1915                 throtl_select_dispatch(sq);
1916                 throtl_schedule_next_dispatch(sq, false);
1917         }
1918         rcu_read_unlock();
1919         throtl_select_dispatch(&td->service_queue);
1920         throtl_schedule_next_dispatch(&td->service_queue, false);
1921         queue_work(kthrotld_workqueue, &td->dispatch_work);
1922 }
1923
1924 static void throtl_downgrade_state(struct throtl_data *td, int new)
1925 {
1926         td->scale /= 2;
1927
1928         throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1929         if (td->scale) {
1930                 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1931                 return;
1932         }
1933
1934         td->limit_index = new;
1935         td->low_downgrade_time = jiffies;
1936 }
1937
1938 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1939 {
1940         struct throtl_data *td = tg->td;
1941         unsigned long now = jiffies;
1942
1943         /*
1944          * If cgroup is below low limit, consider downgrade and throttle other
1945          * cgroups
1946          */
1947         if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1948             time_after_eq(now, tg_last_low_overflow_time(tg) +
1949                                         td->throtl_slice) &&
1950             (!throtl_tg_is_idle(tg) ||
1951              !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1952                 return true;
1953         return false;
1954 }
1955
1956 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1957 {
1958         while (true) {
1959                 if (!throtl_tg_can_downgrade(tg))
1960                         return false;
1961                 tg = sq_to_tg(tg->service_queue.parent_sq);
1962                 if (!tg || !tg_to_blkg(tg)->parent)
1963                         break;
1964         }
1965         return true;
1966 }
1967
1968 static void throtl_downgrade_check(struct throtl_grp *tg)
1969 {
1970         uint64_t bps;
1971         unsigned int iops;
1972         unsigned long elapsed_time;
1973         unsigned long now = jiffies;
1974
1975         if (tg->td->limit_index != LIMIT_MAX ||
1976             !tg->td->limit_valid[LIMIT_LOW])
1977                 return;
1978         if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1979                 return;
1980         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1981                 return;
1982
1983         elapsed_time = now - tg->last_check_time;
1984         tg->last_check_time = now;
1985
1986         if (time_before(now, tg_last_low_overflow_time(tg) +
1987                         tg->td->throtl_slice))
1988                 return;
1989
1990         if (tg->bps[READ][LIMIT_LOW]) {
1991                 bps = tg->last_bytes_disp[READ] * HZ;
1992                 do_div(bps, elapsed_time);
1993                 if (bps >= tg->bps[READ][LIMIT_LOW])
1994                         tg->last_low_overflow_time[READ] = now;
1995         }
1996
1997         if (tg->bps[WRITE][LIMIT_LOW]) {
1998                 bps = tg->last_bytes_disp[WRITE] * HZ;
1999                 do_div(bps, elapsed_time);
2000                 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2001                         tg->last_low_overflow_time[WRITE] = now;
2002         }
2003
2004         if (tg->iops[READ][LIMIT_LOW]) {
2005                 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2006                 if (iops >= tg->iops[READ][LIMIT_LOW])
2007                         tg->last_low_overflow_time[READ] = now;
2008         }
2009
2010         if (tg->iops[WRITE][LIMIT_LOW]) {
2011                 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2012                 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2013                         tg->last_low_overflow_time[WRITE] = now;
2014         }
2015
2016         /*
2017          * If cgroup is below low limit, consider downgrade and throttle other
2018          * cgroups
2019          */
2020         if (throtl_hierarchy_can_downgrade(tg))
2021                 throtl_downgrade_state(tg->td, LIMIT_LOW);
2022
2023         tg->last_bytes_disp[READ] = 0;
2024         tg->last_bytes_disp[WRITE] = 0;
2025         tg->last_io_disp[READ] = 0;
2026         tg->last_io_disp[WRITE] = 0;
2027 }
2028
2029 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2030 {
2031         unsigned long now = ktime_get_ns() >> 10;
2032         unsigned long last_finish_time = tg->last_finish_time;
2033
2034         if (now <= last_finish_time || last_finish_time == 0 ||
2035             last_finish_time == tg->checked_last_finish_time)
2036                 return;
2037
2038         tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2039         tg->checked_last_finish_time = last_finish_time;
2040 }
2041
2042 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2043 static void throtl_update_latency_buckets(struct throtl_data *td)
2044 {
2045         struct avg_latency_bucket avg_latency[LATENCY_BUCKET_SIZE];
2046         int i, cpu;
2047         unsigned long last_latency = 0;
2048         unsigned long latency;
2049
2050         if (!blk_queue_nonrot(td->queue))
2051                 return;
2052         if (time_before(jiffies, td->last_calculate_time + HZ))
2053                 return;
2054         td->last_calculate_time = jiffies;
2055
2056         memset(avg_latency, 0, sizeof(avg_latency));
2057         for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2058                 struct latency_bucket *tmp = &td->tmp_buckets[i];
2059
2060                 for_each_possible_cpu(cpu) {
2061                         struct latency_bucket *bucket;
2062
2063                         /* this isn't race free, but ok in practice */
2064                         bucket = per_cpu_ptr(td->latency_buckets, cpu);
2065                         tmp->total_latency += bucket[i].total_latency;
2066                         tmp->samples += bucket[i].samples;
2067                         bucket[i].total_latency = 0;
2068                         bucket[i].samples = 0;
2069                 }
2070
2071                 if (tmp->samples >= 32) {
2072                         int samples = tmp->samples;
2073
2074                         latency = tmp->total_latency;
2075
2076                         tmp->total_latency = 0;
2077                         tmp->samples = 0;
2078                         latency /= samples;
2079                         if (latency == 0)
2080                                 continue;
2081                         avg_latency[i].latency = latency;
2082                 }
2083         }
2084
2085         for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2086                 if (!avg_latency[i].latency) {
2087                         if (td->avg_buckets[i].latency < last_latency)
2088                                 td->avg_buckets[i].latency = last_latency;
2089                         continue;
2090                 }
2091
2092                 if (!td->avg_buckets[i].valid)
2093                         latency = avg_latency[i].latency;
2094                 else
2095                         latency = (td->avg_buckets[i].latency * 7 +
2096                                 avg_latency[i].latency) >> 3;
2097
2098                 td->avg_buckets[i].latency = max(latency, last_latency);
2099                 td->avg_buckets[i].valid = true;
2100                 last_latency = td->avg_buckets[i].latency;
2101         }
2102
2103         for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2104                 throtl_log(&td->service_queue,
2105                         "Latency bucket %d: latency=%ld, valid=%d", i,
2106                         td->avg_buckets[i].latency, td->avg_buckets[i].valid);
2107 }
2108 #else
2109 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2110 {
2111 }
2112 #endif
2113
2114 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2115 {
2116 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2117         int ret;
2118
2119         ret = bio_associate_current(bio);
2120         if (ret == 0 || ret == -EBUSY)
2121                 bio->bi_cg_private = tg;
2122         blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
2123 #else
2124         bio_associate_current(bio);
2125 #endif
2126 }
2127
2128 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2129                     struct bio *bio)
2130 {
2131         struct throtl_qnode *qn = NULL;
2132         struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2133         struct throtl_service_queue *sq;
2134         bool rw = bio_data_dir(bio);
2135         bool throttled = false;
2136         struct throtl_data *td = tg->td;
2137
2138         WARN_ON_ONCE(!rcu_read_lock_held());
2139
2140         /* see throtl_charge_bio() */
2141         if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2142                 goto out;
2143
2144         spin_lock_irq(q->queue_lock);
2145
2146         throtl_update_latency_buckets(td);
2147
2148         if (unlikely(blk_queue_bypass(q)))
2149                 goto out_unlock;
2150
2151         blk_throtl_assoc_bio(tg, bio);
2152         blk_throtl_update_idletime(tg);
2153
2154         sq = &tg->service_queue;
2155
2156 again:
2157         while (true) {
2158                 if (tg->last_low_overflow_time[rw] == 0)
2159                         tg->last_low_overflow_time[rw] = jiffies;
2160                 throtl_downgrade_check(tg);
2161                 throtl_upgrade_check(tg);
2162                 /* throtl is FIFO - if bios are already queued, should queue */
2163                 if (sq->nr_queued[rw])
2164                         break;
2165
2166                 /* if above limits, break to queue */
2167                 if (!tg_may_dispatch(tg, bio, NULL)) {
2168                         tg->last_low_overflow_time[rw] = jiffies;
2169                         if (throtl_can_upgrade(td, tg)) {
2170                                 throtl_upgrade_state(td);
2171                                 goto again;
2172                         }
2173                         break;
2174                 }
2175
2176                 /* within limits, let's charge and dispatch directly */
2177                 throtl_charge_bio(tg, bio);
2178
2179                 /*
2180                  * We need to trim slice even when bios are not being queued
2181                  * otherwise it might happen that a bio is not queued for
2182                  * a long time and slice keeps on extending and trim is not
2183                  * called for a long time. Now if limits are reduced suddenly
2184                  * we take into account all the IO dispatched so far at new
2185                  * low rate and * newly queued IO gets a really long dispatch
2186                  * time.
2187                  *
2188                  * So keep on trimming slice even if bio is not queued.
2189                  */
2190                 throtl_trim_slice(tg, rw);
2191
2192                 /*
2193                  * @bio passed through this layer without being throttled.
2194                  * Climb up the ladder.  If we''re already at the top, it
2195                  * can be executed directly.
2196                  */
2197                 qn = &tg->qnode_on_parent[rw];
2198                 sq = sq->parent_sq;
2199                 tg = sq_to_tg(sq);
2200                 if (!tg)
2201                         goto out_unlock;
2202         }
2203
2204         /* out-of-limit, queue to @tg */
2205         throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2206                    rw == READ ? 'R' : 'W',
2207                    tg->bytes_disp[rw], bio->bi_iter.bi_size,
2208                    tg_bps_limit(tg, rw),
2209                    tg->io_disp[rw], tg_iops_limit(tg, rw),
2210                    sq->nr_queued[READ], sq->nr_queued[WRITE]);
2211
2212         tg->last_low_overflow_time[rw] = jiffies;
2213
2214         td->nr_queued[rw]++;
2215         throtl_add_bio_tg(bio, qn, tg);
2216         throttled = true;
2217
2218         /*
2219          * Update @tg's dispatch time and force schedule dispatch if @tg
2220          * was empty before @bio.  The forced scheduling isn't likely to
2221          * cause undue delay as @bio is likely to be dispatched directly if
2222          * its @tg's disptime is not in the future.
2223          */
2224         if (tg->flags & THROTL_TG_WAS_EMPTY) {
2225                 tg_update_disptime(tg);
2226                 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2227         }
2228
2229 out_unlock:
2230         spin_unlock_irq(q->queue_lock);
2231 out:
2232         /*
2233          * As multiple blk-throtls may stack in the same issue path, we
2234          * don't want bios to leave with the flag set.  Clear the flag if
2235          * being issued.
2236          */
2237         if (!throttled)
2238                 bio_clear_flag(bio, BIO_THROTTLED);
2239
2240 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2241         if (throttled || !td->track_bio_latency)
2242                 bio->bi_issue_stat.stat |= SKIP_LATENCY;
2243 #endif
2244         return throttled;
2245 }
2246
2247 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2248 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2249         int op, unsigned long time)
2250 {
2251         struct latency_bucket *latency;
2252         int index;
2253
2254         if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ ||
2255             !blk_queue_nonrot(td->queue))
2256                 return;
2257
2258         index = request_bucket_index(size);
2259
2260         latency = get_cpu_ptr(td->latency_buckets);
2261         latency[index].total_latency += time;
2262         latency[index].samples++;
2263         put_cpu_ptr(td->latency_buckets);
2264 }
2265
2266 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2267 {
2268         struct request_queue *q = rq->q;
2269         struct throtl_data *td = q->td;
2270
2271         throtl_track_latency(td, blk_stat_size(&rq->issue_stat),
2272                 req_op(rq), time_ns >> 10);
2273 }
2274
2275 void blk_throtl_bio_endio(struct bio *bio)
2276 {
2277         struct throtl_grp *tg;
2278         u64 finish_time_ns;
2279         unsigned long finish_time;
2280         unsigned long start_time;
2281         unsigned long lat;
2282
2283         tg = bio->bi_cg_private;
2284         if (!tg)
2285                 return;
2286         bio->bi_cg_private = NULL;
2287
2288         finish_time_ns = ktime_get_ns();
2289         tg->last_finish_time = finish_time_ns >> 10;
2290
2291         start_time = blk_stat_time(&bio->bi_issue_stat) >> 10;
2292         finish_time = __blk_stat_time(finish_time_ns) >> 10;
2293         if (!start_time || finish_time <= start_time)
2294                 return;
2295
2296         lat = finish_time - start_time;
2297         /* this is only for bio based driver */
2298         if (!(bio->bi_issue_stat.stat & SKIP_LATENCY))
2299                 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2300                         bio_op(bio), lat);
2301
2302         if (tg->latency_target && lat >= tg->td->filtered_latency) {
2303                 int bucket;
2304                 unsigned int threshold;
2305
2306                 bucket = request_bucket_index(
2307                         blk_stat_size(&bio->bi_issue_stat));
2308                 threshold = tg->td->avg_buckets[bucket].latency +
2309                         tg->latency_target;
2310                 if (lat > threshold)
2311                         tg->bad_bio_cnt++;
2312                 /*
2313                  * Not race free, could get wrong count, which means cgroups
2314                  * will be throttled
2315                  */
2316                 tg->bio_cnt++;
2317         }
2318
2319         if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2320                 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2321                 tg->bio_cnt /= 2;
2322                 tg->bad_bio_cnt /= 2;
2323         }
2324 }
2325 #endif
2326
2327 /*
2328  * Dispatch all bios from all children tg's queued on @parent_sq.  On
2329  * return, @parent_sq is guaranteed to not have any active children tg's
2330  * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2331  */
2332 static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2333 {
2334         struct throtl_grp *tg;
2335
2336         while ((tg = throtl_rb_first(parent_sq))) {
2337                 struct throtl_service_queue *sq = &tg->service_queue;
2338                 struct bio *bio;
2339
2340                 throtl_dequeue_tg(tg);
2341
2342                 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2343                         tg_dispatch_one_bio(tg, bio_data_dir(bio));
2344                 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2345                         tg_dispatch_one_bio(tg, bio_data_dir(bio));
2346         }
2347 }
2348
2349 /**
2350  * blk_throtl_drain - drain throttled bios
2351  * @q: request_queue to drain throttled bios for
2352  *
2353  * Dispatch all currently throttled bios on @q through ->make_request_fn().
2354  */
2355 void blk_throtl_drain(struct request_queue *q)
2356         __releases(q->queue_lock) __acquires(q->queue_lock)
2357 {
2358         struct throtl_data *td = q->td;
2359         struct blkcg_gq *blkg;
2360         struct cgroup_subsys_state *pos_css;
2361         struct bio *bio;
2362         int rw;
2363
2364         queue_lockdep_assert_held(q);
2365         rcu_read_lock();
2366
2367         /*
2368          * Drain each tg while doing post-order walk on the blkg tree, so
2369          * that all bios are propagated to td->service_queue.  It'd be
2370          * better to walk service_queue tree directly but blkg walk is
2371          * easier.
2372          */
2373         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2374                 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2375
2376         /* finally, transfer bios from top-level tg's into the td */
2377         tg_drain_bios(&td->service_queue);
2378
2379         rcu_read_unlock();
2380         spin_unlock_irq(q->queue_lock);
2381
2382         /* all bios now should be in td->service_queue, issue them */
2383         for (rw = READ; rw <= WRITE; rw++)
2384                 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2385                                                 NULL)))
2386                         generic_make_request(bio);
2387
2388         spin_lock_irq(q->queue_lock);
2389 }
2390
2391 int blk_throtl_init(struct request_queue *q)
2392 {
2393         struct throtl_data *td;
2394         int ret;
2395
2396         td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2397         if (!td)
2398                 return -ENOMEM;
2399         td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) *
2400                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2401         if (!td->latency_buckets) {
2402                 kfree(td);
2403                 return -ENOMEM;
2404         }
2405
2406         INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2407         throtl_service_queue_init(&td->service_queue);
2408
2409         q->td = td;
2410         td->queue = q;
2411
2412         td->limit_valid[LIMIT_MAX] = true;
2413         td->limit_index = LIMIT_MAX;
2414         td->low_upgrade_time = jiffies;
2415         td->low_downgrade_time = jiffies;
2416
2417         /* activate policy */
2418         ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2419         if (ret) {
2420                 free_percpu(td->latency_buckets);
2421                 kfree(td);
2422         }
2423         return ret;
2424 }
2425
2426 void blk_throtl_exit(struct request_queue *q)
2427 {
2428         BUG_ON(!q->td);
2429         throtl_shutdown_wq(q);
2430         blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2431         free_percpu(q->td->latency_buckets);
2432         kfree(q->td);
2433 }
2434
2435 void blk_throtl_register_queue(struct request_queue *q)
2436 {
2437         struct throtl_data *td;
2438         int i;
2439
2440         td = q->td;
2441         BUG_ON(!td);
2442
2443         if (blk_queue_nonrot(q)) {
2444                 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2445                 td->filtered_latency = LATENCY_FILTERED_SSD;
2446         } else {
2447                 td->throtl_slice = DFL_THROTL_SLICE_HD;
2448                 td->filtered_latency = LATENCY_FILTERED_HD;
2449                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2450                         td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
2451         }
2452 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2453         /* if no low limit, use previous default */
2454         td->throtl_slice = DFL_THROTL_SLICE_HD;
2455 #endif
2456
2457         td->track_bio_latency = !q->mq_ops && !q->request_fn;
2458         if (!td->track_bio_latency)
2459                 blk_stat_enable_accounting(q);
2460 }
2461
2462 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2463 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2464 {
2465         if (!q->td)
2466                 return -EINVAL;
2467         return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2468 }
2469
2470 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2471         const char *page, size_t count)
2472 {
2473         unsigned long v;
2474         unsigned long t;
2475
2476         if (!q->td)
2477                 return -EINVAL;
2478         if (kstrtoul(page, 10, &v))
2479                 return -EINVAL;
2480         t = msecs_to_jiffies(v);
2481         if (t == 0 || t > MAX_THROTL_SLICE)
2482                 return -EINVAL;
2483         q->td->throtl_slice = t;
2484         return count;
2485 }
2486 #endif
2487
2488 static int __init throtl_init(void)
2489 {
2490         kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2491         if (!kthrotld_workqueue)
2492                 panic("Failed to create kthrotld\n");
2493
2494         return blkcg_policy_register(&blkcg_policy_throtl);
2495 }
2496
2497 module_init(throtl_init);