Merge tag 'for-linus-20180210' of git://git.kernel.dk/linux-block
[sfrench/cifs-2.6.git] / drivers / md / bcache / writeback.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * background writeback - scan btree for dirty data and write it to the backing
4  * device
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "writeback.h"
14
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
19
20 /* Rate limiting */
21 static uint64_t __calc_target_rate(struct cached_dev *dc)
22 {
23         struct cache_set *c = dc->disk.c;
24
25         /*
26          * This is the size of the cache, minus the amount used for
27          * flash-only devices
28          */
29         uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
30                                 bcache_flash_devs_sectors_dirty(c);
31
32         /*
33          * Unfortunately there is no control of global dirty data.  If the
34          * user states that they want 10% dirty data in the cache, and has,
35          * e.g., 5 backing volumes of equal size, we try and ensure each
36          * backing volume uses about 2% of the cache for dirty data.
37          */
38         uint32_t bdev_share =
39                 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
40                                 c->cached_dev_sectors);
41
42         uint64_t cache_dirty_target =
43                 div_u64(cache_sectors * dc->writeback_percent, 100);
44
45         /* Ensure each backing dev gets at least one dirty share */
46         if (bdev_share < 1)
47                 bdev_share = 1;
48
49         return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
50 }
51
52 static void __update_writeback_rate(struct cached_dev *dc)
53 {
54         /*
55          * PI controller:
56          * Figures out the amount that should be written per second.
57          *
58          * First, the error (number of sectors that are dirty beyond our
59          * target) is calculated.  The error is accumulated (numerically
60          * integrated).
61          *
62          * Then, the proportional value and integral value are scaled
63          * based on configured values.  These are stored as inverses to
64          * avoid fixed point math and to make configuration easy-- e.g.
65          * the default value of 40 for writeback_rate_p_term_inverse
66          * attempts to write at a rate that would retire all the dirty
67          * blocks in 40 seconds.
68          *
69          * The writeback_rate_i_inverse value of 10000 means that 1/10000th
70          * of the error is accumulated in the integral term per second.
71          * This acts as a slow, long-term average that is not subject to
72          * variations in usage like the p term.
73          */
74         int64_t target = __calc_target_rate(dc);
75         int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
76         int64_t error = dirty - target;
77         int64_t proportional_scaled =
78                 div_s64(error, dc->writeback_rate_p_term_inverse);
79         int64_t integral_scaled;
80         uint32_t new_rate;
81
82         if ((error < 0 && dc->writeback_rate_integral > 0) ||
83             (error > 0 && time_before64(local_clock(),
84                          dc->writeback_rate.next + NSEC_PER_MSEC))) {
85                 /*
86                  * Only decrease the integral term if it's more than
87                  * zero.  Only increase the integral term if the device
88                  * is keeping up.  (Don't wind up the integral
89                  * ineffectively in either case).
90                  *
91                  * It's necessary to scale this by
92                  * writeback_rate_update_seconds to keep the integral
93                  * term dimensioned properly.
94                  */
95                 dc->writeback_rate_integral += error *
96                         dc->writeback_rate_update_seconds;
97         }
98
99         integral_scaled = div_s64(dc->writeback_rate_integral,
100                         dc->writeback_rate_i_term_inverse);
101
102         new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
103                         dc->writeback_rate_minimum, NSEC_PER_SEC);
104
105         dc->writeback_rate_proportional = proportional_scaled;
106         dc->writeback_rate_integral_scaled = integral_scaled;
107         dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
108         dc->writeback_rate.rate = new_rate;
109         dc->writeback_rate_target = target;
110 }
111
112 static void update_writeback_rate(struct work_struct *work)
113 {
114         struct cached_dev *dc = container_of(to_delayed_work(work),
115                                              struct cached_dev,
116                                              writeback_rate_update);
117
118         down_read(&dc->writeback_lock);
119
120         if (atomic_read(&dc->has_dirty) &&
121             dc->writeback_percent)
122                 __update_writeback_rate(dc);
123
124         up_read(&dc->writeback_lock);
125
126         schedule_delayed_work(&dc->writeback_rate_update,
127                               dc->writeback_rate_update_seconds * HZ);
128 }
129
130 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
131 {
132         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
133             !dc->writeback_percent)
134                 return 0;
135
136         return bch_next_delay(&dc->writeback_rate, sectors);
137 }
138
139 struct dirty_io {
140         struct closure          cl;
141         struct cached_dev       *dc;
142         uint16_t                sequence;
143         struct bio              bio;
144 };
145
146 static void dirty_init(struct keybuf_key *w)
147 {
148         struct dirty_io *io = w->private;
149         struct bio *bio = &io->bio;
150
151         bio_init(bio, bio->bi_inline_vecs,
152                  DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
153         if (!io->dc->writeback_percent)
154                 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
155
156         bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
157         bio->bi_private         = w;
158         bch_bio_map(bio, NULL);
159 }
160
161 static void dirty_io_destructor(struct closure *cl)
162 {
163         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
164         kfree(io);
165 }
166
167 static void write_dirty_finish(struct closure *cl)
168 {
169         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
170         struct keybuf_key *w = io->bio.bi_private;
171         struct cached_dev *dc = io->dc;
172
173         bio_free_pages(&io->bio);
174
175         /* This is kind of a dumb way of signalling errors. */
176         if (KEY_DIRTY(&w->key)) {
177                 int ret;
178                 unsigned i;
179                 struct keylist keys;
180
181                 bch_keylist_init(&keys);
182
183                 bkey_copy(keys.top, &w->key);
184                 SET_KEY_DIRTY(keys.top, false);
185                 bch_keylist_push(&keys);
186
187                 for (i = 0; i < KEY_PTRS(&w->key); i++)
188                         atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
189
190                 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
191
192                 if (ret)
193                         trace_bcache_writeback_collision(&w->key);
194
195                 atomic_long_inc(ret
196                                 ? &dc->disk.c->writeback_keys_failed
197                                 : &dc->disk.c->writeback_keys_done);
198         }
199
200         bch_keybuf_del(&dc->writeback_keys, w);
201         up(&dc->in_flight);
202
203         closure_return_with_destructor(cl, dirty_io_destructor);
204 }
205
206 static void dirty_endio(struct bio *bio)
207 {
208         struct keybuf_key *w = bio->bi_private;
209         struct dirty_io *io = w->private;
210
211         if (bio->bi_status)
212                 SET_KEY_DIRTY(&w->key, false);
213
214         closure_put(&io->cl);
215 }
216
217 static void write_dirty(struct closure *cl)
218 {
219         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
220         struct keybuf_key *w = io->bio.bi_private;
221         struct cached_dev *dc = io->dc;
222
223         uint16_t next_sequence;
224
225         if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
226                 /* Not our turn to write; wait for a write to complete */
227                 closure_wait(&dc->writeback_ordering_wait, cl);
228
229                 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
230                         /*
231                          * Edge case-- it happened in indeterminate order
232                          * relative to when we were added to wait list..
233                          */
234                         closure_wake_up(&dc->writeback_ordering_wait);
235                 }
236
237                 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
238                 return;
239         }
240
241         next_sequence = io->sequence + 1;
242
243         /*
244          * IO errors are signalled using the dirty bit on the key.
245          * If we failed to read, we should not attempt to write to the
246          * backing device.  Instead, immediately go to write_dirty_finish
247          * to clean up.
248          */
249         if (KEY_DIRTY(&w->key)) {
250                 dirty_init(w);
251                 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
252                 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
253                 bio_set_dev(&io->bio, io->dc->bdev);
254                 io->bio.bi_end_io       = dirty_endio;
255
256                 closure_bio_submit(&io->bio, cl);
257         }
258
259         atomic_set(&dc->writeback_sequence_next, next_sequence);
260         closure_wake_up(&dc->writeback_ordering_wait);
261
262         continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
263 }
264
265 static void read_dirty_endio(struct bio *bio)
266 {
267         struct keybuf_key *w = bio->bi_private;
268         struct dirty_io *io = w->private;
269
270         /* is_read = 1 */
271         bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
272                             bio->bi_status, 1,
273                             "reading dirty data from cache");
274
275         dirty_endio(bio);
276 }
277
278 static void read_dirty_submit(struct closure *cl)
279 {
280         struct dirty_io *io = container_of(cl, struct dirty_io, cl);
281
282         closure_bio_submit(&io->bio, cl);
283
284         continue_at(cl, write_dirty, io->dc->writeback_write_wq);
285 }
286
287 static void read_dirty(struct cached_dev *dc)
288 {
289         unsigned delay = 0;
290         struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
291         size_t size;
292         int nk, i;
293         struct dirty_io *io;
294         struct closure cl;
295         uint16_t sequence = 0;
296
297         BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
298         atomic_set(&dc->writeback_sequence_next, sequence);
299         closure_init_stack(&cl);
300
301         /*
302          * XXX: if we error, background writeback just spins. Should use some
303          * mempools.
304          */
305
306         next = bch_keybuf_next(&dc->writeback_keys);
307
308         while (!kthread_should_stop() && next) {
309                 size = 0;
310                 nk = 0;
311
312                 do {
313                         BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
314
315                         /*
316                          * Don't combine too many operations, even if they
317                          * are all small.
318                          */
319                         if (nk >= MAX_WRITEBACKS_IN_PASS)
320                                 break;
321
322                         /*
323                          * If the current operation is very large, don't
324                          * further combine operations.
325                          */
326                         if (size >= MAX_WRITESIZE_IN_PASS)
327                                 break;
328
329                         /*
330                          * Operations are only eligible to be combined
331                          * if they are contiguous.
332                          *
333                          * TODO: add a heuristic willing to fire a
334                          * certain amount of non-contiguous IO per pass,
335                          * so that we can benefit from backing device
336                          * command queueing.
337                          */
338                         if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
339                                                 &START_KEY(&next->key)))
340                                 break;
341
342                         size += KEY_SIZE(&next->key);
343                         keys[nk++] = next;
344                 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
345
346                 /* Now we have gathered a set of 1..5 keys to write back. */
347                 for (i = 0; i < nk; i++) {
348                         w = keys[i];
349
350                         io = kzalloc(sizeof(struct dirty_io) +
351                                      sizeof(struct bio_vec) *
352                                      DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
353                                      GFP_KERNEL);
354                         if (!io)
355                                 goto err;
356
357                         w->private      = io;
358                         io->dc          = dc;
359                         io->sequence    = sequence++;
360
361                         dirty_init(w);
362                         bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
363                         io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
364                         bio_set_dev(&io->bio,
365                                     PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
366                         io->bio.bi_end_io       = read_dirty_endio;
367
368                         if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
369                                 goto err_free;
370
371                         trace_bcache_writeback(&w->key);
372
373                         down(&dc->in_flight);
374
375                         /* We've acquired a semaphore for the maximum
376                          * simultaneous number of writebacks; from here
377                          * everything happens asynchronously.
378                          */
379                         closure_call(&io->cl, read_dirty_submit, NULL, &cl);
380                 }
381
382                 delay = writeback_delay(dc, size);
383
384                 /* If the control system would wait for at least half a
385                  * second, and there's been no reqs hitting the backing disk
386                  * for awhile: use an alternate mode where we have at most
387                  * one contiguous set of writebacks in flight at a time.  If
388                  * someone wants to do IO it will be quick, as it will only
389                  * have to contend with one operation in flight, and we'll
390                  * be round-tripping data to the backing disk as quickly as
391                  * it can accept it.
392                  */
393                 if (delay >= HZ / 2) {
394                         /* 3 means at least 1.5 seconds, up to 7.5 if we
395                          * have slowed way down.
396                          */
397                         if (atomic_inc_return(&dc->backing_idle) >= 3) {
398                                 /* Wait for current I/Os to finish */
399                                 closure_sync(&cl);
400                                 /* And immediately launch a new set. */
401                                 delay = 0;
402                         }
403                 }
404
405                 while (!kthread_should_stop() && delay) {
406                         schedule_timeout_interruptible(delay);
407                         delay = writeback_delay(dc, 0);
408                 }
409         }
410
411         if (0) {
412 err_free:
413                 kfree(w->private);
414 err:
415                 bch_keybuf_del(&dc->writeback_keys, w);
416         }
417
418         /*
419          * Wait for outstanding writeback IOs to finish (and keybuf slots to be
420          * freed) before refilling again
421          */
422         closure_sync(&cl);
423 }
424
425 /* Scan for dirty data */
426
427 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
428                                   uint64_t offset, int nr_sectors)
429 {
430         struct bcache_device *d = c->devices[inode];
431         unsigned stripe_offset, stripe, sectors_dirty;
432
433         if (!d)
434                 return;
435
436         stripe = offset_to_stripe(d, offset);
437         stripe_offset = offset & (d->stripe_size - 1);
438
439         while (nr_sectors) {
440                 int s = min_t(unsigned, abs(nr_sectors),
441                               d->stripe_size - stripe_offset);
442
443                 if (nr_sectors < 0)
444                         s = -s;
445
446                 if (stripe >= d->nr_stripes)
447                         return;
448
449                 sectors_dirty = atomic_add_return(s,
450                                         d->stripe_sectors_dirty + stripe);
451                 if (sectors_dirty == d->stripe_size)
452                         set_bit(stripe, d->full_dirty_stripes);
453                 else
454                         clear_bit(stripe, d->full_dirty_stripes);
455
456                 nr_sectors -= s;
457                 stripe_offset = 0;
458                 stripe++;
459         }
460 }
461
462 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
463 {
464         struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
465
466         BUG_ON(KEY_INODE(k) != dc->disk.id);
467
468         return KEY_DIRTY(k);
469 }
470
471 static void refill_full_stripes(struct cached_dev *dc)
472 {
473         struct keybuf *buf = &dc->writeback_keys;
474         unsigned start_stripe, stripe, next_stripe;
475         bool wrapped = false;
476
477         stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
478
479         if (stripe >= dc->disk.nr_stripes)
480                 stripe = 0;
481
482         start_stripe = stripe;
483
484         while (1) {
485                 stripe = find_next_bit(dc->disk.full_dirty_stripes,
486                                        dc->disk.nr_stripes, stripe);
487
488                 if (stripe == dc->disk.nr_stripes)
489                         goto next;
490
491                 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
492                                                  dc->disk.nr_stripes, stripe);
493
494                 buf->last_scanned = KEY(dc->disk.id,
495                                         stripe * dc->disk.stripe_size, 0);
496
497                 bch_refill_keybuf(dc->disk.c, buf,
498                                   &KEY(dc->disk.id,
499                                        next_stripe * dc->disk.stripe_size, 0),
500                                   dirty_pred);
501
502                 if (array_freelist_empty(&buf->freelist))
503                         return;
504
505                 stripe = next_stripe;
506 next:
507                 if (wrapped && stripe > start_stripe)
508                         return;
509
510                 if (stripe == dc->disk.nr_stripes) {
511                         stripe = 0;
512                         wrapped = true;
513                 }
514         }
515 }
516
517 /*
518  * Returns true if we scanned the entire disk
519  */
520 static bool refill_dirty(struct cached_dev *dc)
521 {
522         struct keybuf *buf = &dc->writeback_keys;
523         struct bkey start = KEY(dc->disk.id, 0, 0);
524         struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
525         struct bkey start_pos;
526
527         /*
528          * make sure keybuf pos is inside the range for this disk - at bringup
529          * we might not be attached yet so this disk's inode nr isn't
530          * initialized then
531          */
532         if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
533             bkey_cmp(&buf->last_scanned, &end) > 0)
534                 buf->last_scanned = start;
535
536         if (dc->partial_stripes_expensive) {
537                 refill_full_stripes(dc);
538                 if (array_freelist_empty(&buf->freelist))
539                         return false;
540         }
541
542         start_pos = buf->last_scanned;
543         bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
544
545         if (bkey_cmp(&buf->last_scanned, &end) < 0)
546                 return false;
547
548         /*
549          * If we get to the end start scanning again from the beginning, and
550          * only scan up to where we initially started scanning from:
551          */
552         buf->last_scanned = start;
553         bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
554
555         return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
556 }
557
558 static int bch_writeback_thread(void *arg)
559 {
560         struct cached_dev *dc = arg;
561         bool searched_full_index;
562
563         bch_ratelimit_reset(&dc->writeback_rate);
564
565         while (!kthread_should_stop()) {
566                 down_write(&dc->writeback_lock);
567                 set_current_state(TASK_INTERRUPTIBLE);
568                 if (!atomic_read(&dc->has_dirty) ||
569                     (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
570                      !dc->writeback_running)) {
571                         up_write(&dc->writeback_lock);
572
573                         if (kthread_should_stop()) {
574                                 set_current_state(TASK_RUNNING);
575                                 return 0;
576                         }
577
578                         schedule();
579                         continue;
580                 }
581                 set_current_state(TASK_RUNNING);
582
583                 searched_full_index = refill_dirty(dc);
584
585                 if (searched_full_index &&
586                     RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
587                         atomic_set(&dc->has_dirty, 0);
588                         cached_dev_put(dc);
589                         SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
590                         bch_write_bdev_super(dc, NULL);
591                 }
592
593                 up_write(&dc->writeback_lock);
594
595                 read_dirty(dc);
596
597                 if (searched_full_index) {
598                         unsigned delay = dc->writeback_delay * HZ;
599
600                         while (delay &&
601                                !kthread_should_stop() &&
602                                !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
603                                 delay = schedule_timeout_interruptible(delay);
604
605                         bch_ratelimit_reset(&dc->writeback_rate);
606                 }
607         }
608
609         return 0;
610 }
611
612 /* Init */
613
614 struct sectors_dirty_init {
615         struct btree_op op;
616         unsigned        inode;
617 };
618
619 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
620                                  struct bkey *k)
621 {
622         struct sectors_dirty_init *op = container_of(_op,
623                                                 struct sectors_dirty_init, op);
624         if (KEY_INODE(k) > op->inode)
625                 return MAP_DONE;
626
627         if (KEY_DIRTY(k))
628                 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
629                                              KEY_START(k), KEY_SIZE(k));
630
631         return MAP_CONTINUE;
632 }
633
634 void bch_sectors_dirty_init(struct bcache_device *d)
635 {
636         struct sectors_dirty_init op;
637
638         bch_btree_op_init(&op.op, -1);
639         op.inode = d->id;
640
641         bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
642                            sectors_dirty_init_fn, 0);
643 }
644
645 void bch_cached_dev_writeback_init(struct cached_dev *dc)
646 {
647         sema_init(&dc->in_flight, 64);
648         init_rwsem(&dc->writeback_lock);
649         bch_keybuf_init(&dc->writeback_keys);
650
651         dc->writeback_metadata          = true;
652         dc->writeback_running           = true;
653         dc->writeback_percent           = 10;
654         dc->writeback_delay             = 30;
655         dc->writeback_rate.rate         = 1024;
656         dc->writeback_rate_minimum      = 8;
657
658         dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
659         dc->writeback_rate_p_term_inverse = 40;
660         dc->writeback_rate_i_term_inverse = 10000;
661
662         INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
663 }
664
665 int bch_cached_dev_writeback_start(struct cached_dev *dc)
666 {
667         dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
668                                                 WQ_MEM_RECLAIM, 0);
669         if (!dc->writeback_write_wq)
670                 return -ENOMEM;
671
672         dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
673                                               "bcache_writeback");
674         if (IS_ERR(dc->writeback_thread))
675                 return PTR_ERR(dc->writeback_thread);
676
677         schedule_delayed_work(&dc->writeback_rate_update,
678                               dc->writeback_rate_update_seconds * HZ);
679
680         bch_writeback_queue(dc);
681
682         return 0;
683 }