Merge tag 'cramfs_fixes' of git://git.linaro.org/people/nicolas.pitre/linux
[sfrench/cifs-2.6.git] / block / blk-iolatency.c
1 /*
2  * Block rq-qos base io controller
3  *
4  * This works similar to wbt with a few exceptions
5  *
6  * - It's bio based, so the latency covers the whole block layer in addition to
7  *   the actual io.
8  * - We will throttle all IO that comes in here if we need to.
9  * - We use the mean latency over the 100ms window.  This is because writes can
10  *   be particularly fast, which could give us a false sense of the impact of
11  *   other workloads on our protected workload.
12  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
13  *   that we can have as many outstanding bio's as we're allowed to.  Only at
14  *   throttle time do we pay attention to the actual queue depth.
15  *
16  * The hierarchy works like the cpu controller does, we track the latency at
17  * every configured node, and each configured node has it's own independent
18  * queue depth.  This means that we only care about our latency targets at the
19  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
20  * a group at the end of some other path if we're only configred at leaf level.
21  *
22  * Consider the following
23  *
24  *                   root blkg
25  *             /                     \
26  *        fast (target=5ms)     slow (target=10ms)
27  *         /     \                  /        \
28  *       a        b          normal(15ms)   unloved
29  *
30  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
31  * an average latency of 5ms.  If it does then we will throttle the "slow"
32  * group.  In the case of "normal", if it exceeds its 15ms target, we will
33  * throttle "unloved", but nobody else.
34  *
35  * In this example "fast", "slow", and "normal" will be the only groups actually
36  * accounting their io latencies.  We have to walk up the heirarchy to the root
37  * on every submit and complete so we can do the appropriate stat recording and
38  * adjust the queue depth of ourselves if needed.
39  *
40  * There are 2 ways we throttle IO.
41  *
42  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
43  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
44  * to 1.  If the group is only ever submitting IO for itself then this is the
45  * only way we throttle.
46  *
47  * 2) Induced delay throttling.  This is for the case that a group is generating
48  * IO that has to be issued by the root cg to avoid priority inversion. So think
49  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
50  * of work done for us on behalf of the root cg and are being asked to scale
51  * down more then we induce a latency at userspace return.  We accumulate the
52  * total amount of time we need to be punished by doing
53  *
54  * total_time += min_lat_nsec - actual_io_completion
55  *
56  * and then at throttle time will do
57  *
58  * throttle_time = min(total_time, NSEC_PER_SEC)
59  *
60  * This induced delay will throttle back the activity that is generating the
61  * root cg issued io's, wethere that's some metadata intensive operation or the
62  * group is using so much memory that it is pushing us into swap.
63  *
64  * Copyright (C) 2018 Josef Bacik
65  */
66 #include <linux/kernel.h>
67 #include <linux/blk_types.h>
68 #include <linux/backing-dev.h>
69 #include <linux/module.h>
70 #include <linux/timer.h>
71 #include <linux/memcontrol.h>
72 #include <linux/sched/loadavg.h>
73 #include <linux/sched/signal.h>
74 #include <trace/events/block.h>
75 #include "blk-rq-qos.h"
76 #include "blk-stat.h"
77
78 #define DEFAULT_SCALE_COOKIE 1000000U
79
80 static struct blkcg_policy blkcg_policy_iolatency;
81 struct iolatency_grp;
82
83 struct blk_iolatency {
84         struct rq_qos rqos;
85         struct timer_list timer;
86         atomic_t enabled;
87 };
88
89 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
90 {
91         return container_of(rqos, struct blk_iolatency, rqos);
92 }
93
94 static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
95 {
96         return atomic_read(&blkiolat->enabled) > 0;
97 }
98
99 struct child_latency_info {
100         spinlock_t lock;
101
102         /* Last time we adjusted the scale of everybody. */
103         u64 last_scale_event;
104
105         /* The latency that we missed. */
106         u64 scale_lat;
107
108         /* Total io's from all of our children for the last summation. */
109         u64 nr_samples;
110
111         /* The guy who actually changed the latency numbers. */
112         struct iolatency_grp *scale_grp;
113
114         /* Cookie to tell if we need to scale up or down. */
115         atomic_t scale_cookie;
116 };
117
118 struct percentile_stats {
119         u64 total;
120         u64 missed;
121 };
122
123 struct latency_stat {
124         union {
125                 struct percentile_stats ps;
126                 struct blk_rq_stat rqs;
127         };
128 };
129
130 struct iolatency_grp {
131         struct blkg_policy_data pd;
132         struct latency_stat __percpu *stats;
133         struct latency_stat cur_stat;
134         struct blk_iolatency *blkiolat;
135         struct rq_depth rq_depth;
136         struct rq_wait rq_wait;
137         atomic64_t window_start;
138         atomic_t scale_cookie;
139         u64 min_lat_nsec;
140         u64 cur_win_nsec;
141
142         /* total running average of our io latency. */
143         u64 lat_avg;
144
145         /* Our current number of IO's for the last summation. */
146         u64 nr_samples;
147
148         bool ssd;
149         struct child_latency_info child_lat;
150 };
151
152 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
153 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
154 /*
155  * These are the constants used to fake the fixed-point moving average
156  * calculation just like load average.  The call to calc_load() folds
157  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
158  * window size is bucketed to try to approximately calculate average
159  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
160  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
161  * periods extend the most recent window.
162  */
163 #define BLKIOLATENCY_NR_EXP_FACTORS 5
164 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
165                                       (BLKIOLATENCY_NR_EXP_FACTORS - 1))
166 static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
167         2045, // exp(1/600) - 600 samples
168         2039, // exp(1/240) - 240 samples
169         2031, // exp(1/120) - 120 samples
170         2023, // exp(1/80)  - 80 samples
171         2014, // exp(1/60)  - 60 samples
172 };
173
174 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
175 {
176         return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
177 }
178
179 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
180 {
181         return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
182 }
183
184 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
185 {
186         return pd_to_blkg(&iolat->pd);
187 }
188
189 static inline void latency_stat_init(struct iolatency_grp *iolat,
190                                      struct latency_stat *stat)
191 {
192         if (iolat->ssd) {
193                 stat->ps.total = 0;
194                 stat->ps.missed = 0;
195         } else
196                 blk_rq_stat_init(&stat->rqs);
197 }
198
199 static inline void latency_stat_sum(struct iolatency_grp *iolat,
200                                     struct latency_stat *sum,
201                                     struct latency_stat *stat)
202 {
203         if (iolat->ssd) {
204                 sum->ps.total += stat->ps.total;
205                 sum->ps.missed += stat->ps.missed;
206         } else
207                 blk_rq_stat_sum(&sum->rqs, &stat->rqs);
208 }
209
210 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
211                                             u64 req_time)
212 {
213         struct latency_stat *stat = get_cpu_ptr(iolat->stats);
214         if (iolat->ssd) {
215                 if (req_time >= iolat->min_lat_nsec)
216                         stat->ps.missed++;
217                 stat->ps.total++;
218         } else
219                 blk_rq_stat_add(&stat->rqs, req_time);
220         put_cpu_ptr(stat);
221 }
222
223 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
224                                   struct latency_stat *stat)
225 {
226         if (iolat->ssd) {
227                 u64 thresh = div64_u64(stat->ps.total, 10);
228                 thresh = max(thresh, 1ULL);
229                 return stat->ps.missed < thresh;
230         }
231         return stat->rqs.mean <= iolat->min_lat_nsec;
232 }
233
234 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
235                                        struct latency_stat *stat)
236 {
237         if (iolat->ssd)
238                 return stat->ps.total;
239         return stat->rqs.nr_samples;
240 }
241
242 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
243                                               struct latency_stat *stat)
244 {
245         int exp_idx;
246
247         if (iolat->ssd)
248                 return;
249
250         /*
251          * calc_load() takes in a number stored in fixed point representation.
252          * Because we are using this for IO time in ns, the values stored
253          * are significantly larger than the FIXED_1 denominator (2048).
254          * Therefore, rounding errors in the calculation are negligible and
255          * can be ignored.
256          */
257         exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
258                         div64_u64(iolat->cur_win_nsec,
259                                   BLKIOLATENCY_EXP_BUCKET_SIZE));
260         iolat->lat_avg = calc_load(iolat->lat_avg,
261                                    iolatency_exp_factors[exp_idx],
262                                    stat->rqs.mean);
263 }
264
265 static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
266                                        wait_queue_entry_t *wait,
267                                        bool first_block)
268 {
269         struct rq_wait *rqw = &iolat->rq_wait;
270
271         if (first_block && waitqueue_active(&rqw->wait) &&
272             rqw->wait.head.next != &wait->entry)
273                 return false;
274         return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
275 }
276
277 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
278                                        struct iolatency_grp *iolat,
279                                        spinlock_t *lock, bool issue_as_root,
280                                        bool use_memdelay)
281         __releases(lock)
282         __acquires(lock)
283 {
284         struct rq_wait *rqw = &iolat->rq_wait;
285         unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
286         DEFINE_WAIT(wait);
287         bool first_block = true;
288
289         if (use_delay)
290                 blkcg_schedule_throttle(rqos->q, use_memdelay);
291
292         /*
293          * To avoid priority inversions we want to just take a slot if we are
294          * issuing as root.  If we're being killed off there's no point in
295          * delaying things, we may have been killed by OOM so throttling may
296          * make recovery take even longer, so just let the IO's through so the
297          * task can go away.
298          */
299         if (issue_as_root || fatal_signal_pending(current)) {
300                 atomic_inc(&rqw->inflight);
301                 return;
302         }
303
304         if (iolatency_may_queue(iolat, &wait, first_block))
305                 return;
306
307         do {
308                 prepare_to_wait_exclusive(&rqw->wait, &wait,
309                                           TASK_UNINTERRUPTIBLE);
310
311                 if (iolatency_may_queue(iolat, &wait, first_block))
312                         break;
313                 first_block = false;
314
315                 if (lock) {
316                         spin_unlock_irq(lock);
317                         io_schedule();
318                         spin_lock_irq(lock);
319                 } else {
320                         io_schedule();
321                 }
322         } while (1);
323
324         finish_wait(&rqw->wait, &wait);
325 }
326
327 #define SCALE_DOWN_FACTOR 2
328 #define SCALE_UP_FACTOR 4
329
330 static inline unsigned long scale_amount(unsigned long qd, bool up)
331 {
332         return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
333 }
334
335 /*
336  * We scale the qd down faster than we scale up, so we need to use this helper
337  * to adjust the scale_cookie accordingly so we don't prematurely get
338  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
339  *
340  * Each group has their own local copy of the last scale cookie they saw, so if
341  * the global scale cookie goes up or down they know which way they need to go
342  * based on their last knowledge of it.
343  */
344 static void scale_cookie_change(struct blk_iolatency *blkiolat,
345                                 struct child_latency_info *lat_info,
346                                 bool up)
347 {
348         unsigned long qd = blkiolat->rqos.q->nr_requests;
349         unsigned long scale = scale_amount(qd, up);
350         unsigned long old = atomic_read(&lat_info->scale_cookie);
351         unsigned long max_scale = qd << 1;
352         unsigned long diff = 0;
353
354         if (old < DEFAULT_SCALE_COOKIE)
355                 diff = DEFAULT_SCALE_COOKIE - old;
356
357         if (up) {
358                 if (scale + old > DEFAULT_SCALE_COOKIE)
359                         atomic_set(&lat_info->scale_cookie,
360                                    DEFAULT_SCALE_COOKIE);
361                 else if (diff > qd)
362                         atomic_inc(&lat_info->scale_cookie);
363                 else
364                         atomic_add(scale, &lat_info->scale_cookie);
365         } else {
366                 /*
367                  * We don't want to dig a hole so deep that it takes us hours to
368                  * dig out of it.  Just enough that we don't throttle/unthrottle
369                  * with jagged workloads but can still unthrottle once pressure
370                  * has sufficiently dissipated.
371                  */
372                 if (diff > qd) {
373                         if (diff < max_scale)
374                                 atomic_dec(&lat_info->scale_cookie);
375                 } else {
376                         atomic_sub(scale, &lat_info->scale_cookie);
377                 }
378         }
379 }
380
381 /*
382  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
383  * queue depth at a time so we don't get wild swings and hopefully dial in to
384  * fairer distribution of the overall queue depth.
385  */
386 static void scale_change(struct iolatency_grp *iolat, bool up)
387 {
388         unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
389         unsigned long scale = scale_amount(qd, up);
390         unsigned long old = iolat->rq_depth.max_depth;
391
392         if (old > qd)
393                 old = qd;
394
395         if (up) {
396                 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
397                         return;
398
399                 if (old < qd) {
400                         old += scale;
401                         old = min(old, qd);
402                         iolat->rq_depth.max_depth = old;
403                         wake_up_all(&iolat->rq_wait.wait);
404                 }
405         } else {
406                 old >>= 1;
407                 iolat->rq_depth.max_depth = max(old, 1UL);
408         }
409 }
410
411 /* Check our parent and see if the scale cookie has changed. */
412 static void check_scale_change(struct iolatency_grp *iolat)
413 {
414         struct iolatency_grp *parent;
415         struct child_latency_info *lat_info;
416         unsigned int cur_cookie;
417         unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
418         u64 scale_lat;
419         unsigned int old;
420         int direction = 0;
421
422         if (lat_to_blkg(iolat)->parent == NULL)
423                 return;
424
425         parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
426         if (!parent)
427                 return;
428
429         lat_info = &parent->child_lat;
430         cur_cookie = atomic_read(&lat_info->scale_cookie);
431         scale_lat = READ_ONCE(lat_info->scale_lat);
432
433         if (cur_cookie < our_cookie)
434                 direction = -1;
435         else if (cur_cookie > our_cookie)
436                 direction = 1;
437         else
438                 return;
439
440         old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
441
442         /* Somebody beat us to the punch, just bail. */
443         if (old != our_cookie)
444                 return;
445
446         if (direction < 0 && iolat->min_lat_nsec) {
447                 u64 samples_thresh;
448
449                 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
450                         return;
451
452                 /*
453                  * Sometimes high priority groups are their own worst enemy, so
454                  * instead of taking it out on some poor other group that did 5%
455                  * or less of the IO's for the last summation just skip this
456                  * scale down event.
457                  */
458                 samples_thresh = lat_info->nr_samples * 5;
459                 samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
460                 if (iolat->nr_samples <= samples_thresh)
461                         return;
462         }
463
464         /* We're as low as we can go. */
465         if (iolat->rq_depth.max_depth == 1 && direction < 0) {
466                 blkcg_use_delay(lat_to_blkg(iolat));
467                 return;
468         }
469
470         /* We're back to the default cookie, unthrottle all the things. */
471         if (cur_cookie == DEFAULT_SCALE_COOKIE) {
472                 blkcg_clear_delay(lat_to_blkg(iolat));
473                 iolat->rq_depth.max_depth = UINT_MAX;
474                 wake_up_all(&iolat->rq_wait.wait);
475                 return;
476         }
477
478         scale_change(iolat, direction > 0);
479 }
480
481 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
482                                      spinlock_t *lock)
483 {
484         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
485         struct blkcg_gq *blkg = bio->bi_blkg;
486         bool issue_as_root = bio_issue_as_root_blkg(bio);
487
488         if (!blk_iolatency_enabled(blkiolat))
489                 return;
490
491         while (blkg && blkg->parent) {
492                 struct iolatency_grp *iolat = blkg_to_lat(blkg);
493                 if (!iolat) {
494                         blkg = blkg->parent;
495                         continue;
496                 }
497
498                 check_scale_change(iolat);
499                 __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
500                                      (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
501                 blkg = blkg->parent;
502         }
503         if (!timer_pending(&blkiolat->timer))
504                 mod_timer(&blkiolat->timer, jiffies + HZ);
505 }
506
507 static void iolatency_record_time(struct iolatency_grp *iolat,
508                                   struct bio_issue *issue, u64 now,
509                                   bool issue_as_root)
510 {
511         u64 start = bio_issue_time(issue);
512         u64 req_time;
513
514         /*
515          * Have to do this so we are truncated to the correct time that our
516          * issue is truncated to.
517          */
518         now = __bio_issue_time(now);
519
520         if (now <= start)
521                 return;
522
523         req_time = now - start;
524
525         /*
526          * We don't want to count issue_as_root bio's in the cgroups latency
527          * statistics as it could skew the numbers downwards.
528          */
529         if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
530                 u64 sub = iolat->min_lat_nsec;
531                 if (req_time < sub)
532                         blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
533                 return;
534         }
535
536         latency_stat_record_time(iolat, req_time);
537 }
538
539 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
540 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
541
542 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
543 {
544         struct blkcg_gq *blkg = lat_to_blkg(iolat);
545         struct iolatency_grp *parent;
546         struct child_latency_info *lat_info;
547         struct latency_stat stat;
548         unsigned long flags;
549         int cpu;
550
551         latency_stat_init(iolat, &stat);
552         preempt_disable();
553         for_each_online_cpu(cpu) {
554                 struct latency_stat *s;
555                 s = per_cpu_ptr(iolat->stats, cpu);
556                 latency_stat_sum(iolat, &stat, s);
557                 latency_stat_init(iolat, s);
558         }
559         preempt_enable();
560
561         parent = blkg_to_lat(blkg->parent);
562         if (!parent)
563                 return;
564
565         lat_info = &parent->child_lat;
566
567         iolat_update_total_lat_avg(iolat, &stat);
568
569         /* Everything is ok and we don't need to adjust the scale. */
570         if (latency_sum_ok(iolat, &stat) &&
571             atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
572                 return;
573
574         /* Somebody beat us to the punch, just bail. */
575         spin_lock_irqsave(&lat_info->lock, flags);
576
577         latency_stat_sum(iolat, &iolat->cur_stat, &stat);
578         lat_info->nr_samples -= iolat->nr_samples;
579         lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
580         iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
581
582         if ((lat_info->last_scale_event >= now ||
583             now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
584                 goto out;
585
586         if (latency_sum_ok(iolat, &iolat->cur_stat) &&
587             latency_sum_ok(iolat, &stat)) {
588                 if (latency_stat_samples(iolat, &iolat->cur_stat) <
589                     BLKIOLATENCY_MIN_GOOD_SAMPLES)
590                         goto out;
591                 if (lat_info->scale_grp == iolat) {
592                         lat_info->last_scale_event = now;
593                         scale_cookie_change(iolat->blkiolat, lat_info, true);
594                 }
595         } else if (lat_info->scale_lat == 0 ||
596                    lat_info->scale_lat >= iolat->min_lat_nsec) {
597                 lat_info->last_scale_event = now;
598                 if (!lat_info->scale_grp ||
599                     lat_info->scale_lat > iolat->min_lat_nsec) {
600                         WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
601                         lat_info->scale_grp = iolat;
602                 }
603                 scale_cookie_change(iolat->blkiolat, lat_info, false);
604         }
605         latency_stat_init(iolat, &iolat->cur_stat);
606 out:
607         spin_unlock_irqrestore(&lat_info->lock, flags);
608 }
609
610 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
611 {
612         struct blkcg_gq *blkg;
613         struct rq_wait *rqw;
614         struct iolatency_grp *iolat;
615         u64 window_start;
616         u64 now = ktime_to_ns(ktime_get());
617         bool issue_as_root = bio_issue_as_root_blkg(bio);
618         bool enabled = false;
619
620         blkg = bio->bi_blkg;
621         if (!blkg)
622                 return;
623
624         iolat = blkg_to_lat(bio->bi_blkg);
625         if (!iolat)
626                 return;
627
628         enabled = blk_iolatency_enabled(iolat->blkiolat);
629         while (blkg && blkg->parent) {
630                 iolat = blkg_to_lat(blkg);
631                 if (!iolat) {
632                         blkg = blkg->parent;
633                         continue;
634                 }
635                 rqw = &iolat->rq_wait;
636
637                 atomic_dec(&rqw->inflight);
638                 if (!enabled || iolat->min_lat_nsec == 0)
639                         goto next;
640                 iolatency_record_time(iolat, &bio->bi_issue, now,
641                                       issue_as_root);
642                 window_start = atomic64_read(&iolat->window_start);
643                 if (now > window_start &&
644                     (now - window_start) >= iolat->cur_win_nsec) {
645                         if (atomic64_cmpxchg(&iolat->window_start,
646                                         window_start, now) == window_start)
647                                 iolatency_check_latencies(iolat, now);
648                 }
649 next:
650                 wake_up(&rqw->wait);
651                 blkg = blkg->parent;
652         }
653 }
654
655 static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
656 {
657         struct blkcg_gq *blkg;
658
659         blkg = bio->bi_blkg;
660         while (blkg && blkg->parent) {
661                 struct rq_wait *rqw;
662                 struct iolatency_grp *iolat;
663
664                 iolat = blkg_to_lat(blkg);
665                 if (!iolat)
666                         goto next;
667
668                 rqw = &iolat->rq_wait;
669                 atomic_dec(&rqw->inflight);
670                 wake_up(&rqw->wait);
671 next:
672                 blkg = blkg->parent;
673         }
674 }
675
676 static void blkcg_iolatency_exit(struct rq_qos *rqos)
677 {
678         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
679
680         del_timer_sync(&blkiolat->timer);
681         blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
682         kfree(blkiolat);
683 }
684
685 static struct rq_qos_ops blkcg_iolatency_ops = {
686         .throttle = blkcg_iolatency_throttle,
687         .cleanup = blkcg_iolatency_cleanup,
688         .done_bio = blkcg_iolatency_done_bio,
689         .exit = blkcg_iolatency_exit,
690 };
691
692 static void blkiolatency_timer_fn(struct timer_list *t)
693 {
694         struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
695         struct blkcg_gq *blkg;
696         struct cgroup_subsys_state *pos_css;
697         u64 now = ktime_to_ns(ktime_get());
698
699         rcu_read_lock();
700         blkg_for_each_descendant_pre(blkg, pos_css,
701                                      blkiolat->rqos.q->root_blkg) {
702                 struct iolatency_grp *iolat;
703                 struct child_latency_info *lat_info;
704                 unsigned long flags;
705                 u64 cookie;
706
707                 /*
708                  * We could be exiting, don't access the pd unless we have a
709                  * ref on the blkg.
710                  */
711                 if (!blkg_tryget(blkg))
712                         continue;
713
714                 iolat = blkg_to_lat(blkg);
715                 if (!iolat)
716                         goto next;
717
718                 lat_info = &iolat->child_lat;
719                 cookie = atomic_read(&lat_info->scale_cookie);
720
721                 if (cookie >= DEFAULT_SCALE_COOKIE)
722                         goto next;
723
724                 spin_lock_irqsave(&lat_info->lock, flags);
725                 if (lat_info->last_scale_event >= now)
726                         goto next_lock;
727
728                 /*
729                  * We scaled down but don't have a scale_grp, scale up and carry
730                  * on.
731                  */
732                 if (lat_info->scale_grp == NULL) {
733                         scale_cookie_change(iolat->blkiolat, lat_info, true);
734                         goto next_lock;
735                 }
736
737                 /*
738                  * It's been 5 seconds since our last scale event, clear the
739                  * scale grp in case the group that needed the scale down isn't
740                  * doing any IO currently.
741                  */
742                 if (now - lat_info->last_scale_event >=
743                     ((u64)NSEC_PER_SEC * 5))
744                         lat_info->scale_grp = NULL;
745 next_lock:
746                 spin_unlock_irqrestore(&lat_info->lock, flags);
747 next:
748                 blkg_put(blkg);
749         }
750         rcu_read_unlock();
751 }
752
753 int blk_iolatency_init(struct request_queue *q)
754 {
755         struct blk_iolatency *blkiolat;
756         struct rq_qos *rqos;
757         int ret;
758
759         blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
760         if (!blkiolat)
761                 return -ENOMEM;
762
763         rqos = &blkiolat->rqos;
764         rqos->id = RQ_QOS_CGROUP;
765         rqos->ops = &blkcg_iolatency_ops;
766         rqos->q = q;
767
768         rq_qos_add(q, rqos);
769
770         ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
771         if (ret) {
772                 rq_qos_del(q, rqos);
773                 kfree(blkiolat);
774                 return ret;
775         }
776
777         timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
778
779         return 0;
780 }
781
782 static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
783 {
784         struct iolatency_grp *iolat = blkg_to_lat(blkg);
785         struct blk_iolatency *blkiolat = iolat->blkiolat;
786         u64 oldval = iolat->min_lat_nsec;
787
788         iolat->min_lat_nsec = val;
789         iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
790         iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
791                                     BLKIOLATENCY_MAX_WIN_SIZE);
792
793         if (!oldval && val)
794                 atomic_inc(&blkiolat->enabled);
795         if (oldval && !val)
796                 atomic_dec(&blkiolat->enabled);
797 }
798
799 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
800 {
801         if (blkg->parent) {
802                 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
803                 struct child_latency_info *lat_info;
804                 if (!iolat)
805                         return;
806
807                 lat_info = &iolat->child_lat;
808                 spin_lock(&lat_info->lock);
809                 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
810                 lat_info->last_scale_event = 0;
811                 lat_info->scale_grp = NULL;
812                 lat_info->scale_lat = 0;
813                 spin_unlock(&lat_info->lock);
814         }
815 }
816
817 static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
818                              size_t nbytes, loff_t off)
819 {
820         struct blkcg *blkcg = css_to_blkcg(of_css(of));
821         struct blkcg_gq *blkg;
822         struct blkg_conf_ctx ctx;
823         struct iolatency_grp *iolat;
824         char *p, *tok;
825         u64 lat_val = 0;
826         u64 oldval;
827         int ret;
828
829         ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
830         if (ret)
831                 return ret;
832
833         iolat = blkg_to_lat(ctx.blkg);
834         p = ctx.body;
835
836         ret = -EINVAL;
837         while ((tok = strsep(&p, " "))) {
838                 char key[16];
839                 char val[21];   /* 18446744073709551616 */
840
841                 if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
842                         goto out;
843
844                 if (!strcmp(key, "target")) {
845                         u64 v;
846
847                         if (!strcmp(val, "max"))
848                                 lat_val = 0;
849                         else if (sscanf(val, "%llu", &v) == 1)
850                                 lat_val = v * NSEC_PER_USEC;
851                         else
852                                 goto out;
853                 } else {
854                         goto out;
855                 }
856         }
857
858         /* Walk up the tree to see if our new val is lower than it should be. */
859         blkg = ctx.blkg;
860         oldval = iolat->min_lat_nsec;
861
862         iolatency_set_min_lat_nsec(blkg, lat_val);
863         if (oldval != iolat->min_lat_nsec) {
864                 iolatency_clear_scaling(blkg);
865         }
866
867         ret = 0;
868 out:
869         blkg_conf_finish(&ctx);
870         return ret ?: nbytes;
871 }
872
873 static u64 iolatency_prfill_limit(struct seq_file *sf,
874                                   struct blkg_policy_data *pd, int off)
875 {
876         struct iolatency_grp *iolat = pd_to_lat(pd);
877         const char *dname = blkg_dev_name(pd->blkg);
878
879         if (!dname || !iolat->min_lat_nsec)
880                 return 0;
881         seq_printf(sf, "%s target=%llu\n",
882                    dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
883         return 0;
884 }
885
886 static int iolatency_print_limit(struct seq_file *sf, void *v)
887 {
888         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
889                           iolatency_prfill_limit,
890                           &blkcg_policy_iolatency, seq_cft(sf)->private, false);
891         return 0;
892 }
893
894 static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
895                                  size_t size)
896 {
897         struct latency_stat stat;
898         int cpu;
899
900         latency_stat_init(iolat, &stat);
901         preempt_disable();
902         for_each_online_cpu(cpu) {
903                 struct latency_stat *s;
904                 s = per_cpu_ptr(iolat->stats, cpu);
905                 latency_stat_sum(iolat, &stat, s);
906         }
907         preempt_enable();
908
909         if (iolat->rq_depth.max_depth == UINT_MAX)
910                 return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
911                                  (unsigned long long)stat.ps.missed,
912                                  (unsigned long long)stat.ps.total);
913         return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
914                          (unsigned long long)stat.ps.missed,
915                          (unsigned long long)stat.ps.total,
916                          iolat->rq_depth.max_depth);
917 }
918
919 static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
920                                 size_t size)
921 {
922         struct iolatency_grp *iolat = pd_to_lat(pd);
923         unsigned long long avg_lat;
924         unsigned long long cur_win;
925
926         if (iolat->ssd)
927                 return iolatency_ssd_stat(iolat, buf, size);
928
929         avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
930         cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
931         if (iolat->rq_depth.max_depth == UINT_MAX)
932                 return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
933                                  avg_lat, cur_win);
934
935         return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
936                          iolat->rq_depth.max_depth, avg_lat, cur_win);
937 }
938
939
940 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
941 {
942         struct iolatency_grp *iolat;
943
944         iolat = kzalloc_node(sizeof(*iolat), gfp, node);
945         if (!iolat)
946                 return NULL;
947         iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
948                                        __alignof__(struct latency_stat), gfp);
949         if (!iolat->stats) {
950                 kfree(iolat);
951                 return NULL;
952         }
953         return &iolat->pd;
954 }
955
956 static void iolatency_pd_init(struct blkg_policy_data *pd)
957 {
958         struct iolatency_grp *iolat = pd_to_lat(pd);
959         struct blkcg_gq *blkg = lat_to_blkg(iolat);
960         struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
961         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
962         u64 now = ktime_to_ns(ktime_get());
963         int cpu;
964
965         if (blk_queue_nonrot(blkg->q))
966                 iolat->ssd = true;
967         else
968                 iolat->ssd = false;
969
970         for_each_possible_cpu(cpu) {
971                 struct latency_stat *stat;
972                 stat = per_cpu_ptr(iolat->stats, cpu);
973                 latency_stat_init(iolat, stat);
974         }
975
976         latency_stat_init(iolat, &iolat->cur_stat);
977         rq_wait_init(&iolat->rq_wait);
978         spin_lock_init(&iolat->child_lat.lock);
979         iolat->rq_depth.queue_depth = blkg->q->nr_requests;
980         iolat->rq_depth.max_depth = UINT_MAX;
981         iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
982         iolat->blkiolat = blkiolat;
983         iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
984         atomic64_set(&iolat->window_start, now);
985
986         /*
987          * We init things in list order, so the pd for the parent may not be
988          * init'ed yet for whatever reason.
989          */
990         if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
991                 struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
992                 atomic_set(&iolat->scale_cookie,
993                            atomic_read(&parent->child_lat.scale_cookie));
994         } else {
995                 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
996         }
997
998         atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
999 }
1000
1001 static void iolatency_pd_offline(struct blkg_policy_data *pd)
1002 {
1003         struct iolatency_grp *iolat = pd_to_lat(pd);
1004         struct blkcg_gq *blkg = lat_to_blkg(iolat);
1005
1006         iolatency_set_min_lat_nsec(blkg, 0);
1007         iolatency_clear_scaling(blkg);
1008 }
1009
1010 static void iolatency_pd_free(struct blkg_policy_data *pd)
1011 {
1012         struct iolatency_grp *iolat = pd_to_lat(pd);
1013         free_percpu(iolat->stats);
1014         kfree(iolat);
1015 }
1016
1017 static struct cftype iolatency_files[] = {
1018         {
1019                 .name = "latency",
1020                 .flags = CFTYPE_NOT_ON_ROOT,
1021                 .seq_show = iolatency_print_limit,
1022                 .write = iolatency_set_limit,
1023         },
1024         {}
1025 };
1026
1027 static struct blkcg_policy blkcg_policy_iolatency = {
1028         .dfl_cftypes    = iolatency_files,
1029         .pd_alloc_fn    = iolatency_pd_alloc,
1030         .pd_init_fn     = iolatency_pd_init,
1031         .pd_offline_fn  = iolatency_pd_offline,
1032         .pd_free_fn     = iolatency_pd_free,
1033         .pd_stat_fn     = iolatency_pd_stat,
1034 };
1035
1036 static int __init iolatency_init(void)
1037 {
1038         return blkcg_policy_register(&blkcg_policy_iolatency);
1039 }
1040
1041 static void __exit iolatency_exit(void)
1042 {
1043         return blkcg_policy_unregister(&blkcg_policy_iolatency);
1044 }
1045
1046 module_init(iolatency_init);
1047 module_exit(iolatency_exit);