Merge tag 'f2fs-for-4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[sfrench/cifs-2.6.git] / drivers / md / bcache / movinggc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Moving/copying garbage collector
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 #include "request.h"
12
13 #include <trace/events/bcache.h>
14
15 struct moving_io {
16         struct closure          cl;
17         struct keybuf_key       *w;
18         struct data_insert_op   op;
19         struct bbio             bio;
20 };
21
22 static bool moving_pred(struct keybuf *buf, struct bkey *k)
23 {
24         struct cache_set *c = container_of(buf, struct cache_set,
25                                            moving_gc_keys);
26         unsigned i;
27
28         for (i = 0; i < KEY_PTRS(k); i++)
29                 if (ptr_available(c, k, i) &&
30                     GC_MOVE(PTR_BUCKET(c, k, i)))
31                         return true;
32
33         return false;
34 }
35
36 /* Moving GC - IO loop */
37
38 static void moving_io_destructor(struct closure *cl)
39 {
40         struct moving_io *io = container_of(cl, struct moving_io, cl);
41         kfree(io);
42 }
43
44 static void write_moving_finish(struct closure *cl)
45 {
46         struct moving_io *io = container_of(cl, struct moving_io, cl);
47         struct bio *bio = &io->bio.bio;
48
49         bio_free_pages(bio);
50
51         if (io->op.replace_collision)
52                 trace_bcache_gc_copy_collision(&io->w->key);
53
54         bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
55
56         up(&io->op.c->moving_in_flight);
57
58         closure_return_with_destructor(cl, moving_io_destructor);
59 }
60
61 static void read_moving_endio(struct bio *bio)
62 {
63         struct bbio *b = container_of(bio, struct bbio, bio);
64         struct moving_io *io = container_of(bio->bi_private,
65                                             struct moving_io, cl);
66
67         if (bio->bi_status)
68                 io->op.status = bio->bi_status;
69         else if (!KEY_DIRTY(&b->key) &&
70                  ptr_stale(io->op.c, &b->key, 0)) {
71                 io->op.status = BLK_STS_IOERR;
72         }
73
74         bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
75 }
76
77 static void moving_init(struct moving_io *io)
78 {
79         struct bio *bio = &io->bio.bio;
80
81         bio_init(bio, bio->bi_inline_vecs,
82                  DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
83         bio_get(bio);
84         bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
85
86         bio->bi_iter.bi_size    = KEY_SIZE(&io->w->key) << 9;
87         bio->bi_private         = &io->cl;
88         bch_bio_map(bio, NULL);
89 }
90
91 static void write_moving(struct closure *cl)
92 {
93         struct moving_io *io = container_of(cl, struct moving_io, cl);
94         struct data_insert_op *op = &io->op;
95
96         if (!op->status) {
97                 moving_init(io);
98
99                 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
100                 op->write_prio          = 1;
101                 op->bio                 = &io->bio.bio;
102
103                 op->writeback           = KEY_DIRTY(&io->w->key);
104                 op->csum                = KEY_CSUM(&io->w->key);
105
106                 bkey_copy(&op->replace_key, &io->w->key);
107                 op->replace             = true;
108
109                 closure_call(&op->cl, bch_data_insert, NULL, cl);
110         }
111
112         continue_at(cl, write_moving_finish, op->wq);
113 }
114
115 static void read_moving_submit(struct closure *cl)
116 {
117         struct moving_io *io = container_of(cl, struct moving_io, cl);
118         struct bio *bio = &io->bio.bio;
119
120         bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
121
122         continue_at(cl, write_moving, io->op.wq);
123 }
124
125 static void read_moving(struct cache_set *c)
126 {
127         struct keybuf_key *w;
128         struct moving_io *io;
129         struct bio *bio;
130         struct closure cl;
131
132         closure_init_stack(&cl);
133
134         /* XXX: if we error, background writeback could stall indefinitely */
135
136         while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
137                 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
138                                            &MAX_KEY, moving_pred);
139                 if (!w)
140                         break;
141
142                 if (ptr_stale(c, &w->key, 0)) {
143                         bch_keybuf_del(&c->moving_gc_keys, w);
144                         continue;
145                 }
146
147                 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
148                              * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
149                              GFP_KERNEL);
150                 if (!io)
151                         goto err;
152
153                 w->private      = io;
154                 io->w           = w;
155                 io->op.inode    = KEY_INODE(&w->key);
156                 io->op.c        = c;
157                 io->op.wq       = c->moving_gc_wq;
158
159                 moving_init(io);
160                 bio = &io->bio.bio;
161
162                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
163                 bio->bi_end_io  = read_moving_endio;
164
165                 if (bch_bio_alloc_pages(bio, GFP_KERNEL))
166                         goto err;
167
168                 trace_bcache_gc_copy(&w->key);
169
170                 down(&c->moving_in_flight);
171                 closure_call(&io->cl, read_moving_submit, NULL, &cl);
172         }
173
174         if (0) {
175 err:            if (!IS_ERR_OR_NULL(w->private))
176                         kfree(w->private);
177
178                 bch_keybuf_del(&c->moving_gc_keys, w);
179         }
180
181         closure_sync(&cl);
182 }
183
184 static bool bucket_cmp(struct bucket *l, struct bucket *r)
185 {
186         return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
187 }
188
189 static unsigned bucket_heap_top(struct cache *ca)
190 {
191         struct bucket *b;
192         return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
193 }
194
195 void bch_moving_gc(struct cache_set *c)
196 {
197         struct cache *ca;
198         struct bucket *b;
199         unsigned i;
200
201         if (!c->copy_gc_enabled)
202                 return;
203
204         mutex_lock(&c->bucket_lock);
205
206         for_each_cache(ca, c, i) {
207                 unsigned sectors_to_move = 0;
208                 unsigned reserve_sectors = ca->sb.bucket_size *
209                         fifo_used(&ca->free[RESERVE_MOVINGGC]);
210
211                 ca->heap.used = 0;
212
213                 for_each_bucket(b, ca) {
214                         if (GC_MARK(b) == GC_MARK_METADATA ||
215                             !GC_SECTORS_USED(b) ||
216                             GC_SECTORS_USED(b) == ca->sb.bucket_size ||
217                             atomic_read(&b->pin))
218                                 continue;
219
220                         if (!heap_full(&ca->heap)) {
221                                 sectors_to_move += GC_SECTORS_USED(b);
222                                 heap_add(&ca->heap, b, bucket_cmp);
223                         } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
224                                 sectors_to_move -= bucket_heap_top(ca);
225                                 sectors_to_move += GC_SECTORS_USED(b);
226
227                                 ca->heap.data[0] = b;
228                                 heap_sift(&ca->heap, 0, bucket_cmp);
229                         }
230                 }
231
232                 while (sectors_to_move > reserve_sectors) {
233                         heap_pop(&ca->heap, b, bucket_cmp);
234                         sectors_to_move -= GC_SECTORS_USED(b);
235                 }
236
237                 while (heap_pop(&ca->heap, b, bucket_cmp))
238                         SET_GC_MOVE(b, 1);
239         }
240
241         mutex_unlock(&c->bucket_lock);
242
243         c->moving_gc_keys.last_scanned = ZERO_KEY;
244
245         read_moving(c);
246 }
247
248 void bch_moving_init_cache_set(struct cache_set *c)
249 {
250         bch_keybuf_init(&c->moving_gc_keys);
251         sema_init(&c->moving_in_flight, 64);
252 }