bootconfig: Fix the kerneldoc of _xbc_exit()
[sfrench/cifs-2.6.git] / fs / bcachefs / btree_gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  * Copyright (C) 2014 Datera Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_background.h"
9 #include "alloc_foreground.h"
10 #include "backpointers.h"
11 #include "bkey_methods.h"
12 #include "bkey_buf.h"
13 #include "btree_journal_iter.h"
14 #include "btree_key_cache.h"
15 #include "btree_locking.h"
16 #include "btree_node_scan.h"
17 #include "btree_update_interior.h"
18 #include "btree_io.h"
19 #include "btree_gc.h"
20 #include "buckets.h"
21 #include "clock.h"
22 #include "debug.h"
23 #include "ec.h"
24 #include "error.h"
25 #include "extents.h"
26 #include "journal.h"
27 #include "keylist.h"
28 #include "move.h"
29 #include "recovery_passes.h"
30 #include "reflink.h"
31 #include "replicas.h"
32 #include "super-io.h"
33 #include "trace.h"
34
35 #include <linux/slab.h>
36 #include <linux/bitops.h>
37 #include <linux/freezer.h>
38 #include <linux/kthread.h>
39 #include <linux/preempt.h>
40 #include <linux/rcupdate.h>
41 #include <linux/sched/task.h>
42
43 #define DROP_THIS_NODE          10
44 #define DROP_PREV_NODE          11
45 #define DID_FILL_FROM_SCAN      12
46
47 static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
48 {
49         return (struct bkey_s) {{{
50                 (struct bkey *) k.k,
51                 (struct bch_val *) k.v
52         }}};
53 }
54
55 static bool should_restart_for_topology_repair(struct bch_fs *c)
56 {
57         return c->opts.fix_errors != FSCK_FIX_no &&
58                 !(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
59 }
60
61 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
62 {
63         preempt_disable();
64         write_seqcount_begin(&c->gc_pos_lock);
65         c->gc_pos = new_pos;
66         write_seqcount_end(&c->gc_pos_lock);
67         preempt_enable();
68 }
69
70 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
71 {
72         BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
73         __gc_pos_set(c, new_pos);
74 }
75
76 static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
77 {
78         switch (b->key.k.type) {
79         case KEY_TYPE_btree_ptr: {
80                 struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
81
82                 dst->k.p                = src->k.p;
83                 dst->v.mem_ptr          = 0;
84                 dst->v.seq              = b->data->keys.seq;
85                 dst->v.sectors_written  = 0;
86                 dst->v.flags            = 0;
87                 dst->v.min_key          = b->data->min_key;
88                 set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
89                 memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
90                 break;
91         }
92         case KEY_TYPE_btree_ptr_v2:
93                 bkey_copy(&dst->k_i, &b->key);
94                 break;
95         default:
96                 BUG();
97         }
98 }
99
100 static void bch2_btree_node_update_key_early(struct btree_trans *trans,
101                                              enum btree_id btree, unsigned level,
102                                              struct bkey_s_c old, struct bkey_i *new)
103 {
104         struct bch_fs *c = trans->c;
105         struct btree *b;
106         struct bkey_buf tmp;
107         int ret;
108
109         bch2_bkey_buf_init(&tmp);
110         bch2_bkey_buf_reassemble(&tmp, c, old);
111
112         b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true);
113         if (!IS_ERR_OR_NULL(b)) {
114                 mutex_lock(&c->btree_cache.lock);
115
116                 bch2_btree_node_hash_remove(&c->btree_cache, b);
117
118                 bkey_copy(&b->key, new);
119                 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
120                 BUG_ON(ret);
121
122                 mutex_unlock(&c->btree_cache.lock);
123                 six_unlock_read(&b->c.lock);
124         }
125
126         bch2_bkey_buf_exit(&tmp, c);
127 }
128
129 static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
130 {
131         struct bkey_i_btree_ptr_v2 *new;
132         int ret;
133
134         if (c->opts.verbose) {
135                 struct printbuf buf = PRINTBUF;
136
137                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
138                 prt_str(&buf, " -> ");
139                 bch2_bpos_to_text(&buf, new_min);
140
141                 bch_info(c, "%s(): %s", __func__, buf.buf);
142                 printbuf_exit(&buf);
143         }
144
145         new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
146         if (!new)
147                 return -BCH_ERR_ENOMEM_gc_repair_key;
148
149         btree_ptr_to_v2(b, new);
150         b->data->min_key        = new_min;
151         new->v.min_key          = new_min;
152         SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
153
154         ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
155         if (ret) {
156                 kfree(new);
157                 return ret;
158         }
159
160         bch2_btree_node_drop_keys_outside_node(b);
161         bkey_copy(&b->key, &new->k_i);
162         return 0;
163 }
164
165 static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
166 {
167         struct bkey_i_btree_ptr_v2 *new;
168         int ret;
169
170         if (c->opts.verbose) {
171                 struct printbuf buf = PRINTBUF;
172
173                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
174                 prt_str(&buf, " -> ");
175                 bch2_bpos_to_text(&buf, new_max);
176
177                 bch_info(c, "%s(): %s", __func__, buf.buf);
178                 printbuf_exit(&buf);
179         }
180
181         ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
182         if (ret)
183                 return ret;
184
185         new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
186         if (!new)
187                 return -BCH_ERR_ENOMEM_gc_repair_key;
188
189         btree_ptr_to_v2(b, new);
190         b->data->max_key        = new_max;
191         new->k.p                = new_max;
192         SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
193
194         ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
195         if (ret) {
196                 kfree(new);
197                 return ret;
198         }
199
200         bch2_btree_node_drop_keys_outside_node(b);
201
202         mutex_lock(&c->btree_cache.lock);
203         bch2_btree_node_hash_remove(&c->btree_cache, b);
204
205         bkey_copy(&b->key, &new->k_i);
206         ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
207         BUG_ON(ret);
208         mutex_unlock(&c->btree_cache.lock);
209         return 0;
210 }
211
212 static int btree_check_node_boundaries(struct bch_fs *c, struct btree *b,
213                                        struct btree *prev, struct btree *cur,
214                                        struct bpos *pulled_from_scan)
215 {
216         struct bpos expected_start = !prev
217                 ? b->data->min_key
218                 : bpos_successor(prev->key.k.p);
219         struct printbuf buf = PRINTBUF;
220         int ret = 0;
221
222         BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
223                !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
224                         b->data->min_key));
225
226         if (bpos_eq(expected_start, cur->data->min_key))
227                 return 0;
228
229         prt_printf(&buf, "  at btree %s level %u:\n  parent: ",
230                    bch2_btree_id_str(b->c.btree_id), b->c.level);
231         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
232
233         if (prev) {
234                 prt_printf(&buf, "\n  prev: ");
235                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&prev->key));
236         }
237
238         prt_str(&buf, "\n  next: ");
239         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
240
241         if (bpos_lt(expected_start, cur->data->min_key)) {                              /* gap */
242                 if (b->c.level == 1 &&
243                     bpos_lt(*pulled_from_scan, cur->data->min_key)) {
244                         ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
245                                                      expected_start,
246                                                      bpos_predecessor(cur->data->min_key));
247                         if (ret)
248                                 goto err;
249
250                         *pulled_from_scan = cur->data->min_key;
251                         ret = DID_FILL_FROM_SCAN;
252                 } else {
253                         if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
254                                              "btree node with incorrect min_key%s", buf.buf))
255                                 ret = set_node_min(c, cur, expected_start);
256                 }
257         } else {                                                                        /* overlap */
258                 if (prev && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {   /* cur overwrites prev */
259                         if (bpos_ge(prev->data->min_key, cur->data->min_key)) {         /* fully? */
260                                 if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_next_node,
261                                                      "btree node overwritten by next node%s", buf.buf))
262                                         ret = DROP_PREV_NODE;
263                         } else {
264                                 if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
265                                                      "btree node with incorrect max_key%s", buf.buf))
266                                         ret = set_node_max(c, prev,
267                                                            bpos_predecessor(cur->data->min_key));
268                         }
269                 } else {
270                         if (bpos_ge(expected_start, cur->data->max_key)) {              /* fully? */
271                                 if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_prev_node,
272                                                      "btree node overwritten by prev node%s", buf.buf))
273                                         ret = DROP_THIS_NODE;
274                         } else {
275                                 if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
276                                                      "btree node with incorrect min_key%s", buf.buf))
277                                         ret = set_node_min(c, cur, expected_start);
278                         }
279                 }
280         }
281 err:
282 fsck_err:
283         printbuf_exit(&buf);
284         return ret;
285 }
286
287 static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
288                                  struct btree *child, struct bpos *pulled_from_scan)
289 {
290         struct printbuf buf = PRINTBUF;
291         int ret = 0;
292
293         if (bpos_eq(child->key.k.p, b->key.k.p))
294                 return 0;
295
296         prt_printf(&buf, "at btree %s level %u:\n  parent: ",
297                    bch2_btree_id_str(b->c.btree_id), b->c.level);
298         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
299
300         prt_str(&buf, "\n  child: ");
301         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
302
303         if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
304                              "btree node with incorrect max_key%s", buf.buf)) {
305                 if (b->c.level == 1 &&
306                     bpos_lt(*pulled_from_scan, b->key.k.p)) {
307                         ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
308                                                 bpos_successor(child->key.k.p), b->key.k.p);
309                         if (ret)
310                                 goto err;
311
312                         *pulled_from_scan = b->key.k.p;
313                         ret = DID_FILL_FROM_SCAN;
314                 } else {
315                         ret = set_node_max(c, child, b->key.k.p);
316                 }
317         }
318 err:
319 fsck_err:
320         printbuf_exit(&buf);
321         return ret;
322 }
323
324 static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b,
325                                               struct bpos *pulled_from_scan)
326 {
327         struct bch_fs *c = trans->c;
328         struct btree_and_journal_iter iter;
329         struct bkey_s_c k;
330         struct bkey_buf prev_k, cur_k;
331         struct btree *prev = NULL, *cur = NULL;
332         bool have_child, new_pass = false;
333         struct printbuf buf = PRINTBUF;
334         int ret = 0;
335
336         if (!b->c.level)
337                 return 0;
338
339         bch2_bkey_buf_init(&prev_k);
340         bch2_bkey_buf_init(&cur_k);
341 again:
342         cur = prev = NULL;
343         have_child = new_pass = false;
344         bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
345         iter.prefetch = true;
346
347         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
348                 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
349                 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
350
351                 bch2_btree_and_journal_iter_advance(&iter);
352                 bch2_bkey_buf_reassemble(&cur_k, c, k);
353
354                 cur = bch2_btree_node_get_noiter(trans, cur_k.k,
355                                         b->c.btree_id, b->c.level - 1,
356                                         false);
357                 ret = PTR_ERR_OR_ZERO(cur);
358
359                 printbuf_reset(&buf);
360                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
361
362                 if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), c,
363                                 btree_node_unreadable,
364                                 "Topology repair: unreadable btree node at btree %s level %u:\n"
365                                 "  %s",
366                                 bch2_btree_id_str(b->c.btree_id),
367                                 b->c.level - 1,
368                                 buf.buf)) {
369                         bch2_btree_node_evict(trans, cur_k.k);
370                         cur = NULL;
371                         ret =   bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes) ?:
372                                 bch2_journal_key_delete(c, b->c.btree_id,
373                                                         b->c.level, cur_k.k->k.p);
374                         if (ret)
375                                 break;
376                         continue;
377                 }
378
379                 bch_err_msg(c, ret, "getting btree node");
380                 if (ret)
381                         break;
382
383                 if (bch2_btree_node_is_stale(c, cur)) {
384                         bch_info(c, "btree node %s older than nodes found by scanning", buf.buf);
385                         six_unlock_read(&cur->c.lock);
386                         bch2_btree_node_evict(trans, cur_k.k);
387                         ret = bch2_journal_key_delete(c, b->c.btree_id,
388                                                       b->c.level, cur_k.k->k.p);
389                         cur = NULL;
390                         if (ret)
391                                 break;
392                         continue;
393                 }
394
395                 ret = btree_check_node_boundaries(c, b, prev, cur, pulled_from_scan);
396                 if (ret == DID_FILL_FROM_SCAN) {
397                         new_pass = true;
398                         ret = 0;
399                 }
400
401                 if (ret == DROP_THIS_NODE) {
402                         six_unlock_read(&cur->c.lock);
403                         bch2_btree_node_evict(trans, cur_k.k);
404                         ret = bch2_journal_key_delete(c, b->c.btree_id,
405                                                       b->c.level, cur_k.k->k.p);
406                         cur = NULL;
407                         if (ret)
408                                 break;
409                         continue;
410                 }
411
412                 if (prev)
413                         six_unlock_read(&prev->c.lock);
414                 prev = NULL;
415
416                 if (ret == DROP_PREV_NODE) {
417                         bch_info(c, "dropped prev node");
418                         bch2_btree_node_evict(trans, prev_k.k);
419                         ret = bch2_journal_key_delete(c, b->c.btree_id,
420                                                       b->c.level, prev_k.k->k.p);
421                         if (ret)
422                                 break;
423
424                         bch2_btree_and_journal_iter_exit(&iter);
425                         goto again;
426                 } else if (ret)
427                         break;
428
429                 prev = cur;
430                 cur = NULL;
431                 bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
432         }
433
434         if (!ret && !IS_ERR_OR_NULL(prev)) {
435                 BUG_ON(cur);
436                 ret = btree_repair_node_end(c, b, prev, pulled_from_scan);
437                 if (ret == DID_FILL_FROM_SCAN) {
438                         new_pass = true;
439                         ret = 0;
440                 }
441         }
442
443         if (!IS_ERR_OR_NULL(prev))
444                 six_unlock_read(&prev->c.lock);
445         prev = NULL;
446         if (!IS_ERR_OR_NULL(cur))
447                 six_unlock_read(&cur->c.lock);
448         cur = NULL;
449
450         if (ret)
451                 goto err;
452
453         bch2_btree_and_journal_iter_exit(&iter);
454
455         if (new_pass)
456                 goto again;
457
458         bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
459         iter.prefetch = true;
460
461         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
462                 bch2_bkey_buf_reassemble(&cur_k, c, k);
463                 bch2_btree_and_journal_iter_advance(&iter);
464
465                 cur = bch2_btree_node_get_noiter(trans, cur_k.k,
466                                         b->c.btree_id, b->c.level - 1,
467                                         false);
468                 ret = PTR_ERR_OR_ZERO(cur);
469
470                 bch_err_msg(c, ret, "getting btree node");
471                 if (ret)
472                         goto err;
473
474                 ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
475                 six_unlock_read(&cur->c.lock);
476                 cur = NULL;
477
478                 if (ret == DROP_THIS_NODE) {
479                         bch2_btree_node_evict(trans, cur_k.k);
480                         ret = bch2_journal_key_delete(c, b->c.btree_id,
481                                                       b->c.level, cur_k.k->k.p);
482                         new_pass = true;
483                 }
484
485                 if (ret)
486                         goto err;
487
488                 have_child = true;
489         }
490
491         printbuf_reset(&buf);
492         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
493
494         if (mustfix_fsck_err_on(!have_child, c,
495                         btree_node_topology_interior_node_empty,
496                         "empty interior btree node at btree %s level %u\n"
497                         "  %s",
498                         bch2_btree_id_str(b->c.btree_id),
499                         b->c.level, buf.buf))
500                 ret = DROP_THIS_NODE;
501 err:
502 fsck_err:
503         if (!IS_ERR_OR_NULL(prev))
504                 six_unlock_read(&prev->c.lock);
505         if (!IS_ERR_OR_NULL(cur))
506                 six_unlock_read(&cur->c.lock);
507
508         bch2_btree_and_journal_iter_exit(&iter);
509
510         if (!ret && new_pass)
511                 goto again;
512
513         BUG_ON(!ret && bch2_btree_node_check_topology(trans, b));
514
515         bch2_bkey_buf_exit(&prev_k, c);
516         bch2_bkey_buf_exit(&cur_k, c);
517         printbuf_exit(&buf);
518         return ret;
519 }
520
521 int bch2_check_topology(struct bch_fs *c)
522 {
523         struct btree_trans *trans = bch2_trans_get(c);
524         struct bpos pulled_from_scan = POS_MIN;
525         int ret = 0;
526
527         for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
528                 struct btree_root *r = bch2_btree_id_root(c, i);
529                 bool reconstructed_root = false;
530
531                 if (r->error) {
532                         ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
533                         if (ret)
534                                 break;
535 reconstruct_root:
536                         bch_info(c, "btree root %s unreadable, must recover from scan", bch2_btree_id_str(i));
537
538                         r->alive = false;
539                         r->error = 0;
540
541                         if (!bch2_btree_has_scanned_nodes(c, i)) {
542                                 mustfix_fsck_err(c, btree_root_unreadable_and_scan_found_nothing,
543                                                  "no nodes found for btree %s, continue?", bch2_btree_id_str(i));
544                                 bch2_btree_root_alloc_fake(c, i, 0);
545                         } else {
546                                 bch2_btree_root_alloc_fake(c, i, 1);
547                                 ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
548                                 if (ret)
549                                         break;
550                         }
551
552                         bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
553                         reconstructed_root = true;
554                 }
555
556                 struct btree *b = r->b;
557
558                 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
559                 ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
560                 six_unlock_read(&b->c.lock);
561
562                 if (ret == DROP_THIS_NODE) {
563                         bch2_btree_node_hash_remove(&c->btree_cache, b);
564                         mutex_lock(&c->btree_cache.lock);
565                         list_move(&b->list, &c->btree_cache.freeable);
566                         mutex_unlock(&c->btree_cache.lock);
567
568                         r->b = NULL;
569
570                         if (!reconstructed_root)
571                                 goto reconstruct_root;
572
573                         bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
574                         bch2_btree_root_alloc_fake(c, i, 0);
575                         r->alive = false;
576                         ret = 0;
577                 }
578         }
579 fsck_err:
580         bch2_trans_put(trans);
581         return ret;
582 }
583
584 static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id,
585                                unsigned level, bool is_root,
586                                struct bkey_s_c *k)
587 {
588         struct bch_fs *c = trans->c;
589         struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(*k);
590         const union bch_extent_entry *entry_c;
591         struct extent_ptr_decoded p = { 0 };
592         bool do_update = false;
593         struct printbuf buf = PRINTBUF;
594         int ret = 0;
595
596         /*
597          * XXX
598          * use check_bucket_ref here
599          */
600         bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
601                 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
602                 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
603                 enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, p, entry_c);
604
605                 if (fsck_err_on(!g->gen_valid,
606                                 c, ptr_to_missing_alloc_key,
607                                 "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
608                                 "while marking %s",
609                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
610                                 bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
611                                 p.ptr.gen,
612                                 (printbuf_reset(&buf),
613                                  bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
614                         if (!p.ptr.cached) {
615                                 g->gen_valid            = true;
616                                 g->gen                  = p.ptr.gen;
617                         } else {
618                                 do_update = true;
619                         }
620                 }
621
622                 if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
623                                 c, ptr_gen_newer_than_bucket_gen,
624                                 "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
625                                 "while marking %s",
626                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
627                                 bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
628                                 p.ptr.gen, g->gen,
629                                 (printbuf_reset(&buf),
630                                  bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
631                         if (!p.ptr.cached) {
632                                 g->gen_valid            = true;
633                                 g->gen                  = p.ptr.gen;
634                                 g->data_type            = 0;
635                                 g->dirty_sectors        = 0;
636                                 g->cached_sectors       = 0;
637                                 set_bit(BCH_FS_need_another_gc, &c->flags);
638                         } else {
639                                 do_update = true;
640                         }
641                 }
642
643                 if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
644                                 c, ptr_gen_newer_than_bucket_gen,
645                                 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
646                                 "while marking %s",
647                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
648                                 bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
649                                 p.ptr.gen,
650                                 (printbuf_reset(&buf),
651                                  bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
652                         do_update = true;
653
654                 if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
655                                 c, stale_dirty_ptr,
656                                 "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
657                                 "while marking %s",
658                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
659                                 bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
660                                 p.ptr.gen, g->gen,
661                                 (printbuf_reset(&buf),
662                                  bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
663                         do_update = true;
664
665                 if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
666                         continue;
667
668                 if (fsck_err_on(bucket_data_type(g->data_type) &&
669                                 bucket_data_type(g->data_type) !=
670                                 bucket_data_type(data_type), c,
671                                 ptr_bucket_data_type_mismatch,
672                                 "bucket %u:%zu different types of data in same bucket: %s, %s\n"
673                                 "while marking %s",
674                                 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
675                                 bch2_data_type_str(g->data_type),
676                                 bch2_data_type_str(data_type),
677                                 (printbuf_reset(&buf),
678                                  bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
679                         if (data_type == BCH_DATA_btree) {
680                                 g->data_type    = data_type;
681                                 set_bit(BCH_FS_need_another_gc, &c->flags);
682                         } else {
683                                 do_update = true;
684                         }
685                 }
686
687                 if (p.has_ec) {
688                         struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
689
690                         if (fsck_err_on(!m || !m->alive, c,
691                                         ptr_to_missing_stripe,
692                                         "pointer to nonexistent stripe %llu\n"
693                                         "while marking %s",
694                                         (u64) p.ec.idx,
695                                         (printbuf_reset(&buf),
696                                          bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
697                                 do_update = true;
698
699                         if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
700                                         ptr_to_incorrect_stripe,
701                                         "pointer does not match stripe %llu\n"
702                                         "while marking %s",
703                                         (u64) p.ec.idx,
704                                         (printbuf_reset(&buf),
705                                          bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
706                                 do_update = true;
707                 }
708         }
709
710         if (do_update) {
711                 if (is_root) {
712                         bch_err(c, "cannot update btree roots yet");
713                         ret = -EINVAL;
714                         goto err;
715                 }
716
717                 struct bkey_i *new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
718                 if (!new) {
719                         ret = -BCH_ERR_ENOMEM_gc_repair_key;
720                         bch_err_msg(c, ret, "allocating new key");
721                         goto err;
722                 }
723
724                 bkey_reassemble(new, *k);
725
726                 if (level) {
727                         /*
728                          * We don't want to drop btree node pointers - if the
729                          * btree node isn't there anymore, the read path will
730                          * sort it out:
731                          */
732                         struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
733                         bkey_for_each_ptr(ptrs, ptr) {
734                                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
735                                 struct bucket *g = PTR_GC_BUCKET(ca, ptr);
736
737                                 ptr->gen = g->gen;
738                         }
739                 } else {
740                         struct bkey_ptrs ptrs;
741                         union bch_extent_entry *entry;
742 restart_drop_ptrs:
743                         ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
744                         bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
745                                 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
746                                 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
747                                 enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
748
749                                 if ((p.ptr.cached &&
750                                      (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
751                                     (!p.ptr.cached &&
752                                      gen_cmp(p.ptr.gen, g->gen) < 0) ||
753                                     gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
754                                     (g->data_type &&
755                                      g->data_type != data_type)) {
756                                         bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
757                                         goto restart_drop_ptrs;
758                                 }
759                         }
760 again:
761                         ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
762                         bkey_extent_entry_for_each(ptrs, entry) {
763                                 if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
764                                         struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
765                                                                         entry->stripe_ptr.idx);
766                                         union bch_extent_entry *next_ptr;
767
768                                         bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
769                                                 if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
770                                                         goto found;
771                                         next_ptr = NULL;
772 found:
773                                         if (!next_ptr) {
774                                                 bch_err(c, "aieee, found stripe ptr with no data ptr");
775                                                 continue;
776                                         }
777
778                                         if (!m || !m->alive ||
779                                             !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
780                                                                        &next_ptr->ptr,
781                                                                        m->sectors)) {
782                                                 bch2_bkey_extent_entry_drop(new, entry);
783                                                 goto again;
784                                         }
785                                 }
786                         }
787                 }
788
789                 if (level)
790                         bch2_btree_node_update_key_early(trans, btree_id, level - 1, *k, new);
791
792                 if (0) {
793                         printbuf_reset(&buf);
794                         bch2_bkey_val_to_text(&buf, c, *k);
795                         bch_info(c, "updated %s", buf.buf);
796
797                         printbuf_reset(&buf);
798                         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
799                         bch_info(c, "new key %s", buf.buf);
800                 }
801
802                 ret = bch2_journal_key_insert_take(c, btree_id, level, new);
803                 if (ret) {
804                         kfree(new);
805                         goto err;
806                 }
807
808                 *k = bkey_i_to_s_c(new);
809         }
810 err:
811 fsck_err:
812         printbuf_exit(&buf);
813         return ret;
814 }
815
816 /* marking of btree keys/nodes: */
817
818 static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
819                             unsigned level, bool is_root,
820                             struct bkey_s_c *k,
821                             bool initial)
822 {
823         struct bch_fs *c = trans->c;
824         struct bkey deleted = KEY(0, 0, 0);
825         struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
826         int ret = 0;
827
828         deleted.p = k->k->p;
829
830         if (initial) {
831                 BUG_ON(bch2_journal_seq_verify &&
832                        k->k->version.lo > atomic64_read(&c->journal.seq));
833
834                 if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
835                                 bkey_version_in_future,
836                                 "key version number higher than recorded: %llu > %llu",
837                                 k->k->version.lo,
838                                 atomic64_read(&c->key_version)))
839                         atomic64_set(&c->key_version, k->k->version.lo);
840         }
841
842         ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k);
843         if (ret)
844                 goto err;
845
846         ret = commit_do(trans, NULL, NULL, 0,
847                         bch2_key_trigger(trans, btree_id, level, old,
848                                          unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
849 fsck_err:
850 err:
851         bch_err_fn(c, ret);
852         return ret;
853 }
854
855 static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
856 {
857         struct btree_node_iter iter;
858         struct bkey unpacked;
859         struct bkey_s_c k;
860         int ret = 0;
861
862         ret = bch2_btree_node_check_topology(trans, b);
863         if (ret)
864                 return ret;
865
866         if (!btree_node_type_needs_gc(btree_node_type(b)))
867                 return 0;
868
869         bch2_btree_node_iter_init_from_start(&iter, b);
870
871         while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
872                 ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
873                                        &k, initial);
874                 if (ret)
875                         return ret;
876
877                 bch2_btree_node_iter_advance(&iter, b);
878         }
879
880         return 0;
881 }
882
883 static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
884                          bool initial, bool metadata_only)
885 {
886         struct bch_fs *c = trans->c;
887         struct btree_iter iter;
888         struct btree *b;
889         unsigned depth = metadata_only ? 1 : 0;
890         int ret = 0;
891
892         gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
893
894         __for_each_btree_node(trans, iter, btree_id, POS_MIN,
895                               0, depth, BTREE_ITER_PREFETCH, b, ret) {
896                 bch2_verify_btree_nr_keys(b);
897
898                 gc_pos_set(c, gc_pos_btree_node(b));
899
900                 ret = btree_gc_mark_node(trans, b, initial);
901                 if (ret)
902                         break;
903         }
904         bch2_trans_iter_exit(trans, &iter);
905
906         if (ret)
907                 return ret;
908
909         mutex_lock(&c->btree_root_lock);
910         b = bch2_btree_id_root(c, btree_id)->b;
911         if (!btree_node_fake(b)) {
912                 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
913
914                 ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1,
915                                        true, &k, initial);
916         }
917         gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
918         mutex_unlock(&c->btree_root_lock);
919
920         return ret;
921 }
922
923 static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
924                                       unsigned target_depth)
925 {
926         struct bch_fs *c = trans->c;
927         struct btree_and_journal_iter iter;
928         struct bkey_s_c k;
929         struct bkey_buf cur;
930         struct printbuf buf = PRINTBUF;
931         int ret = 0;
932
933         ret = bch2_btree_node_check_topology(trans, b);
934         if (ret)
935                 return ret;
936
937         bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
938         bch2_bkey_buf_init(&cur);
939
940         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
941                 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
942                 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
943
944                 ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
945                                        false, &k, true);
946                 if (ret)
947                         goto fsck_err;
948
949                 bch2_btree_and_journal_iter_advance(&iter);
950         }
951
952         if (b->c.level > target_depth) {
953                 bch2_btree_and_journal_iter_exit(&iter);
954                 bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
955                 iter.prefetch = true;
956
957                 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
958                         struct btree *child;
959
960                         bch2_bkey_buf_reassemble(&cur, c, k);
961                         bch2_btree_and_journal_iter_advance(&iter);
962
963                         child = bch2_btree_node_get_noiter(trans, cur.k,
964                                                 b->c.btree_id, b->c.level - 1,
965                                                 false);
966                         ret = PTR_ERR_OR_ZERO(child);
967
968                         if (bch2_err_matches(ret, EIO)) {
969                                 bch2_topology_error(c);
970
971                                 if (__fsck_err(c,
972                                           FSCK_CAN_FIX|
973                                           FSCK_CAN_IGNORE|
974                                           FSCK_NO_RATELIMIT,
975                                           btree_node_read_error,
976                                           "Unreadable btree node at btree %s level %u:\n"
977                                           "  %s",
978                                           bch2_btree_id_str(b->c.btree_id),
979                                           b->c.level - 1,
980                                           (printbuf_reset(&buf),
981                                            bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
982                                     should_restart_for_topology_repair(c)) {
983                                         bch_info(c, "Halting mark and sweep to start topology repair pass");
984                                         ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
985                                         goto fsck_err;
986                                 } else {
987                                         /* Continue marking when opted to not
988                                          * fix the error: */
989                                         ret = 0;
990                                         set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
991                                         continue;
992                                 }
993                         } else if (ret) {
994                                 bch_err_msg(c, ret, "getting btree node");
995                                 break;
996                         }
997
998                         ret = bch2_gc_btree_init_recurse(trans, child,
999                                                          target_depth);
1000                         six_unlock_read(&child->c.lock);
1001
1002                         if (ret)
1003                                 break;
1004                 }
1005         }
1006 fsck_err:
1007         bch2_bkey_buf_exit(&cur, c);
1008         bch2_btree_and_journal_iter_exit(&iter);
1009         printbuf_exit(&buf);
1010         return ret;
1011 }
1012
1013 static int bch2_gc_btree_init(struct btree_trans *trans,
1014                               enum btree_id btree_id,
1015                               bool metadata_only)
1016 {
1017         struct bch_fs *c = trans->c;
1018         struct btree *b;
1019         unsigned target_depth = metadata_only ? 1 : 0;
1020         struct printbuf buf = PRINTBUF;
1021         int ret = 0;
1022
1023         b = bch2_btree_id_root(c, btree_id)->b;
1024
1025         six_lock_read(&b->c.lock, NULL, NULL);
1026         printbuf_reset(&buf);
1027         bch2_bpos_to_text(&buf, b->data->min_key);
1028         if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
1029                                 btree_root_bad_min_key,
1030                         "btree root with incorrect min_key: %s", buf.buf)) {
1031                 bch_err(c, "repair unimplemented");
1032                 ret = -BCH_ERR_fsck_repair_unimplemented;
1033                 goto fsck_err;
1034         }
1035
1036         printbuf_reset(&buf);
1037         bch2_bpos_to_text(&buf, b->data->max_key);
1038         if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
1039                                 btree_root_bad_max_key,
1040                         "btree root with incorrect max_key: %s", buf.buf)) {
1041                 bch_err(c, "repair unimplemented");
1042                 ret = -BCH_ERR_fsck_repair_unimplemented;
1043                 goto fsck_err;
1044         }
1045
1046         if (b->c.level >= target_depth)
1047                 ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
1048
1049         if (!ret) {
1050                 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1051
1052                 ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, true,
1053                                        &k, true);
1054         }
1055 fsck_err:
1056         six_unlock_read(&b->c.lock);
1057
1058         bch_err_fn(c, ret);
1059         printbuf_exit(&buf);
1060         return ret;
1061 }
1062
1063 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
1064 {
1065         return  (int) btree_id_to_gc_phase(l) -
1066                 (int) btree_id_to_gc_phase(r);
1067 }
1068
1069 static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
1070 {
1071         struct btree_trans *trans = bch2_trans_get(c);
1072         enum btree_id ids[BTREE_ID_NR];
1073         unsigned i;
1074         int ret = 0;
1075
1076         for (i = 0; i < BTREE_ID_NR; i++)
1077                 ids[i] = i;
1078         bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
1079
1080         for (i = 0; i < BTREE_ID_NR && !ret; i++)
1081                 ret = initial
1082                         ? bch2_gc_btree_init(trans, ids[i], metadata_only)
1083                         : bch2_gc_btree(trans, ids[i], initial, metadata_only);
1084
1085         for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) {
1086                 if (!bch2_btree_id_root(c, i)->alive)
1087                         continue;
1088
1089                 ret = initial
1090                         ? bch2_gc_btree_init(trans, i, metadata_only)
1091                         : bch2_gc_btree(trans, i, initial, metadata_only);
1092         }
1093
1094         bch2_trans_put(trans);
1095         bch_err_fn(c, ret);
1096         return ret;
1097 }
1098
1099 static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
1100                                   u64 start, u64 end,
1101                                   enum bch_data_type type,
1102                                   unsigned flags)
1103 {
1104         u64 b = sector_to_bucket(ca, start);
1105
1106         do {
1107                 unsigned sectors =
1108                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1109
1110                 bch2_mark_metadata_bucket(c, ca, b, type, sectors,
1111                                           gc_phase(GC_PHASE_SB), flags);
1112                 b++;
1113                 start += sectors;
1114         } while (start < end);
1115 }
1116
1117 static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
1118                                      unsigned flags)
1119 {
1120         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1121         unsigned i;
1122         u64 b;
1123
1124         for (i = 0; i < layout->nr_superblocks; i++) {
1125                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1126
1127                 if (offset == BCH_SB_SECTOR)
1128                         mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
1129                                               BCH_DATA_sb, flags);
1130
1131                 mark_metadata_sectors(c, ca, offset,
1132                                       offset + (1 << layout->sb_max_size_bits),
1133                                       BCH_DATA_sb, flags);
1134         }
1135
1136         for (i = 0; i < ca->journal.nr; i++) {
1137                 b = ca->journal.buckets[i];
1138                 bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
1139                                           ca->mi.bucket_size,
1140                                           gc_phase(GC_PHASE_SB), flags);
1141         }
1142 }
1143
1144 static void bch2_mark_superblocks(struct bch_fs *c)
1145 {
1146         mutex_lock(&c->sb_lock);
1147         gc_pos_set(c, gc_phase(GC_PHASE_SB));
1148
1149         for_each_online_member(c, ca)
1150                 bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
1151         mutex_unlock(&c->sb_lock);
1152 }
1153
1154 #if 0
1155 /* Also see bch2_pending_btree_node_free_insert_done() */
1156 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
1157 {
1158         struct btree_update *as;
1159         struct pending_btree_node_free *d;
1160
1161         mutex_lock(&c->btree_interior_update_lock);
1162         gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
1163
1164         for_each_pending_btree_node_free(c, as, d)
1165                 if (d->index_update_done)
1166                         bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
1167
1168         mutex_unlock(&c->btree_interior_update_lock);
1169 }
1170 #endif
1171
1172 static void bch2_gc_free(struct bch_fs *c)
1173 {
1174         genradix_free(&c->reflink_gc_table);
1175         genradix_free(&c->gc_stripes);
1176
1177         for_each_member_device(c, ca) {
1178                 kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
1179                 ca->buckets_gc = NULL;
1180
1181                 free_percpu(ca->usage_gc);
1182                 ca->usage_gc = NULL;
1183         }
1184
1185         free_percpu(c->usage_gc);
1186         c->usage_gc = NULL;
1187 }
1188
1189 static int bch2_gc_done(struct bch_fs *c,
1190                         bool initial, bool metadata_only)
1191 {
1192         struct bch_dev *ca = NULL;
1193         struct printbuf buf = PRINTBUF;
1194         bool verify = !metadata_only &&
1195                 !c->opts.reconstruct_alloc &&
1196                 (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
1197         unsigned i;
1198         int ret = 0;
1199
1200         percpu_down_write(&c->mark_lock);
1201
1202 #define copy_field(_err, _f, _msg, ...)                                 \
1203         if (dst->_f != src->_f &&                                       \
1204             (!verify ||                                                 \
1205              fsck_err(c, _err, _msg ": got %llu, should be %llu"        \
1206                       , ##__VA_ARGS__, dst->_f, src->_f)))              \
1207                 dst->_f = src->_f
1208 #define copy_dev_field(_err, _f, _msg, ...)                             \
1209         copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
1210 #define copy_fs_field(_err, _f, _msg, ...)                              \
1211         copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
1212
1213         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1214                 bch2_fs_usage_acc_to_base(c, i);
1215
1216         __for_each_member_device(c, ca) {
1217                 struct bch_dev_usage *dst = ca->usage_base;
1218                 struct bch_dev_usage *src = (void *)
1219                         bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
1220                                              dev_usage_u64s());
1221
1222                 for (i = 0; i < BCH_DATA_NR; i++) {
1223                         copy_dev_field(dev_usage_buckets_wrong,
1224                                        d[i].buckets,    "%s buckets", bch2_data_type_str(i));
1225                         copy_dev_field(dev_usage_sectors_wrong,
1226                                        d[i].sectors,    "%s sectors", bch2_data_type_str(i));
1227                         copy_dev_field(dev_usage_fragmented_wrong,
1228                                        d[i].fragmented, "%s fragmented", bch2_data_type_str(i));
1229                 }
1230         }
1231
1232         {
1233                 unsigned nr = fs_usage_u64s(c);
1234                 struct bch_fs_usage *dst = c->usage_base;
1235                 struct bch_fs_usage *src = (void *)
1236                         bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
1237
1238                 copy_fs_field(fs_usage_hidden_wrong,
1239                               b.hidden,         "hidden");
1240                 copy_fs_field(fs_usage_btree_wrong,
1241                               b.btree,          "btree");
1242
1243                 if (!metadata_only) {
1244                         copy_fs_field(fs_usage_data_wrong,
1245                                       b.data,   "data");
1246                         copy_fs_field(fs_usage_cached_wrong,
1247                                       b.cached, "cached");
1248                         copy_fs_field(fs_usage_reserved_wrong,
1249                                       b.reserved,       "reserved");
1250                         copy_fs_field(fs_usage_nr_inodes_wrong,
1251                                       b.nr_inodes,"nr_inodes");
1252
1253                         for (i = 0; i < BCH_REPLICAS_MAX; i++)
1254                                 copy_fs_field(fs_usage_persistent_reserved_wrong,
1255                                               persistent_reserved[i],
1256                                               "persistent_reserved[%i]", i);
1257                 }
1258
1259                 for (i = 0; i < c->replicas.nr; i++) {
1260                         struct bch_replicas_entry_v1 *e =
1261                                 cpu_replicas_entry(&c->replicas, i);
1262
1263                         if (metadata_only &&
1264                             (e->data_type == BCH_DATA_user ||
1265                              e->data_type == BCH_DATA_cached))
1266                                 continue;
1267
1268                         printbuf_reset(&buf);
1269                         bch2_replicas_entry_to_text(&buf, e);
1270
1271                         copy_fs_field(fs_usage_replicas_wrong,
1272                                       replicas[i], "%s", buf.buf);
1273                 }
1274         }
1275
1276 #undef copy_fs_field
1277 #undef copy_dev_field
1278 #undef copy_stripe_field
1279 #undef copy_field
1280 fsck_err:
1281         if (ca)
1282                 percpu_ref_put(&ca->ref);
1283         bch_err_fn(c, ret);
1284
1285         percpu_up_write(&c->mark_lock);
1286         printbuf_exit(&buf);
1287         return ret;
1288 }
1289
1290 static int bch2_gc_start(struct bch_fs *c)
1291 {
1292         BUG_ON(c->usage_gc);
1293
1294         c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
1295                                          sizeof(u64), GFP_KERNEL);
1296         if (!c->usage_gc) {
1297                 bch_err(c, "error allocating c->usage_gc");
1298                 return -BCH_ERR_ENOMEM_gc_start;
1299         }
1300
1301         for_each_member_device(c, ca) {
1302                 BUG_ON(ca->usage_gc);
1303
1304                 ca->usage_gc = alloc_percpu(struct bch_dev_usage);
1305                 if (!ca->usage_gc) {
1306                         bch_err(c, "error allocating ca->usage_gc");
1307                         percpu_ref_put(&ca->ref);
1308                         return -BCH_ERR_ENOMEM_gc_start;
1309                 }
1310
1311                 this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets,
1312                                ca->mi.nbuckets - ca->mi.first_bucket);
1313         }
1314
1315         return 0;
1316 }
1317
1318 static int bch2_gc_reset(struct bch_fs *c)
1319 {
1320         for_each_member_device(c, ca) {
1321                 free_percpu(ca->usage_gc);
1322                 ca->usage_gc = NULL;
1323         }
1324
1325         free_percpu(c->usage_gc);
1326         c->usage_gc = NULL;
1327
1328         return bch2_gc_start(c);
1329 }
1330
1331 /* returns true if not equal */
1332 static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
1333                                      struct bch_alloc_v4 r)
1334 {
1335         return  l.gen != r.gen                          ||
1336                 l.oldest_gen != r.oldest_gen            ||
1337                 l.data_type != r.data_type              ||
1338                 l.dirty_sectors != r.dirty_sectors      ||
1339                 l.cached_sectors != r.cached_sectors     ||
1340                 l.stripe_redundancy != r.stripe_redundancy ||
1341                 l.stripe != r.stripe;
1342 }
1343
1344 static int bch2_alloc_write_key(struct btree_trans *trans,
1345                                 struct btree_iter *iter,
1346                                 struct bkey_s_c k,
1347                                 bool metadata_only)
1348 {
1349         struct bch_fs *c = trans->c;
1350         struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
1351         struct bucket old_gc, gc, *b;
1352         struct bkey_i_alloc_v4 *a;
1353         struct bch_alloc_v4 old_convert, new;
1354         const struct bch_alloc_v4 *old;
1355         int ret;
1356
1357         old = bch2_alloc_to_v4(k, &old_convert);
1358         new = *old;
1359
1360         percpu_down_read(&c->mark_lock);
1361         b = gc_bucket(ca, iter->pos.offset);
1362         old_gc = *b;
1363
1364         if ((old->data_type == BCH_DATA_sb ||
1365              old->data_type == BCH_DATA_journal) &&
1366             !bch2_dev_is_online(ca)) {
1367                 b->data_type = old->data_type;
1368                 b->dirty_sectors = old->dirty_sectors;
1369         }
1370
1371         /*
1372          * b->data_type doesn't yet include need_discard & need_gc_gen states -
1373          * fix that here:
1374          */
1375         b->data_type = __alloc_data_type(b->dirty_sectors,
1376                                          b->cached_sectors,
1377                                          b->stripe,
1378                                          *old,
1379                                          b->data_type);
1380         gc = *b;
1381
1382         if (gc.data_type != old_gc.data_type ||
1383             gc.dirty_sectors != old_gc.dirty_sectors)
1384                 bch2_dev_usage_update_m(c, ca, &old_gc, &gc);
1385         percpu_up_read(&c->mark_lock);
1386
1387         if (metadata_only &&
1388             gc.data_type != BCH_DATA_sb &&
1389             gc.data_type != BCH_DATA_journal &&
1390             gc.data_type != BCH_DATA_btree)
1391                 return 0;
1392
1393         if (gen_after(old->gen, gc.gen))
1394                 return 0;
1395
1396         if (fsck_err_on(new.data_type != gc.data_type, c,
1397                         alloc_key_data_type_wrong,
1398                         "bucket %llu:%llu gen %u has wrong data_type"
1399                         ": got %s, should be %s",
1400                         iter->pos.inode, iter->pos.offset,
1401                         gc.gen,
1402                         bch2_data_type_str(new.data_type),
1403                         bch2_data_type_str(gc.data_type)))
1404                 new.data_type = gc.data_type;
1405
1406 #define copy_bucket_field(_errtype, _f)                                 \
1407         if (fsck_err_on(new._f != gc._f, c, _errtype,                   \
1408                         "bucket %llu:%llu gen %u data type %s has wrong " #_f   \
1409                         ": got %u, should be %u",                       \
1410                         iter->pos.inode, iter->pos.offset,              \
1411                         gc.gen,                                         \
1412                         bch2_data_type_str(gc.data_type),               \
1413                         new._f, gc._f))                                 \
1414                 new._f = gc._f;                                         \
1415
1416         copy_bucket_field(alloc_key_gen_wrong,
1417                           gen);
1418         copy_bucket_field(alloc_key_dirty_sectors_wrong,
1419                           dirty_sectors);
1420         copy_bucket_field(alloc_key_cached_sectors_wrong,
1421                           cached_sectors);
1422         copy_bucket_field(alloc_key_stripe_wrong,
1423                           stripe);
1424         copy_bucket_field(alloc_key_stripe_redundancy_wrong,
1425                           stripe_redundancy);
1426 #undef copy_bucket_field
1427
1428         if (!bch2_alloc_v4_cmp(*old, new))
1429                 return 0;
1430
1431         a = bch2_alloc_to_v4_mut(trans, k);
1432         ret = PTR_ERR_OR_ZERO(a);
1433         if (ret)
1434                 return ret;
1435
1436         a->v = new;
1437
1438         /*
1439          * The trigger normally makes sure this is set, but we're not running
1440          * triggers:
1441          */
1442         if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
1443                 a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
1444
1445         ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
1446 fsck_err:
1447         return ret;
1448 }
1449
1450 static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
1451 {
1452         int ret = 0;
1453
1454         for_each_member_device(c, ca) {
1455                 ret = bch2_trans_run(c,
1456                         for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
1457                                         POS(ca->dev_idx, ca->mi.first_bucket),
1458                                         POS(ca->dev_idx, ca->mi.nbuckets - 1),
1459                                         BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
1460                                         NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
1461                                 bch2_alloc_write_key(trans, &iter, k, metadata_only)));
1462                 if (ret) {
1463                         percpu_ref_put(&ca->ref);
1464                         break;
1465                 }
1466         }
1467
1468         bch_err_fn(c, ret);
1469         return ret;
1470 }
1471
1472 static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
1473 {
1474         for_each_member_device(c, ca) {
1475                 struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
1476                                 ca->mi.nbuckets * sizeof(struct bucket),
1477                                 GFP_KERNEL|__GFP_ZERO);
1478                 if (!buckets) {
1479                         percpu_ref_put(&ca->ref);
1480                         bch_err(c, "error allocating ca->buckets[gc]");
1481                         return -BCH_ERR_ENOMEM_gc_alloc_start;
1482                 }
1483
1484                 buckets->first_bucket   = ca->mi.first_bucket;
1485                 buckets->nbuckets       = ca->mi.nbuckets;
1486                 rcu_assign_pointer(ca->buckets_gc, buckets);
1487         }
1488
1489         int ret = bch2_trans_run(c,
1490                 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
1491                                          BTREE_ITER_PREFETCH, k, ({
1492                         struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
1493                         struct bucket *g = gc_bucket(ca, k.k->p.offset);
1494
1495                         struct bch_alloc_v4 a_convert;
1496                         const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1497
1498                         g->gen_valid    = 1;
1499                         g->gen          = a->gen;
1500
1501                         if (metadata_only &&
1502                             (a->data_type == BCH_DATA_user ||
1503                              a->data_type == BCH_DATA_cached ||
1504                              a->data_type == BCH_DATA_parity)) {
1505                                 g->data_type            = a->data_type;
1506                                 g->dirty_sectors        = a->dirty_sectors;
1507                                 g->cached_sectors       = a->cached_sectors;
1508                                 g->stripe               = a->stripe;
1509                                 g->stripe_redundancy    = a->stripe_redundancy;
1510                         }
1511
1512                         0;
1513                 })));
1514         bch_err_fn(c, ret);
1515         return ret;
1516 }
1517
1518 static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
1519 {
1520         for_each_member_device(c, ca) {
1521                 struct bucket_array *buckets = gc_bucket_array(ca);
1522                 struct bucket *g;
1523
1524                 for_each_bucket(g, buckets) {
1525                         if (metadata_only &&
1526                             (g->data_type == BCH_DATA_user ||
1527                              g->data_type == BCH_DATA_cached ||
1528                              g->data_type == BCH_DATA_parity))
1529                                 continue;
1530                         g->data_type = 0;
1531                         g->dirty_sectors = 0;
1532                         g->cached_sectors = 0;
1533                 }
1534         }
1535 }
1536
1537 static int bch2_gc_write_reflink_key(struct btree_trans *trans,
1538                                      struct btree_iter *iter,
1539                                      struct bkey_s_c k,
1540                                      size_t *idx)
1541 {
1542         struct bch_fs *c = trans->c;
1543         const __le64 *refcount = bkey_refcount_c(k);
1544         struct printbuf buf = PRINTBUF;
1545         struct reflink_gc *r;
1546         int ret = 0;
1547
1548         if (!refcount)
1549                 return 0;
1550
1551         while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
1552                r->offset < k.k->p.offset)
1553                 ++*idx;
1554
1555         if (!r ||
1556             r->offset != k.k->p.offset ||
1557             r->size != k.k->size) {
1558                 bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
1559                 return -EINVAL;
1560         }
1561
1562         if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
1563                         reflink_v_refcount_wrong,
1564                         "reflink key has wrong refcount:\n"
1565                         "  %s\n"
1566                         "  should be %u",
1567                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
1568                         r->refcount)) {
1569                 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1570                 ret = PTR_ERR_OR_ZERO(new);
1571                 if (ret)
1572                         return ret;
1573
1574                 if (!r->refcount)
1575                         new->k.type = KEY_TYPE_deleted;
1576                 else
1577                         *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
1578                 ret = bch2_trans_update(trans, iter, new, 0);
1579         }
1580 fsck_err:
1581         printbuf_exit(&buf);
1582         return ret;
1583 }
1584
1585 static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
1586 {
1587         size_t idx = 0;
1588
1589         if (metadata_only)
1590                 return 0;
1591
1592         int ret = bch2_trans_run(c,
1593                 for_each_btree_key_commit(trans, iter,
1594                                 BTREE_ID_reflink, POS_MIN,
1595                                 BTREE_ITER_PREFETCH, k,
1596                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1597                         bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
1598         c->reflink_gc_nr = 0;
1599         return ret;
1600 }
1601
1602 static int bch2_gc_reflink_start(struct bch_fs *c,
1603                                  bool metadata_only)
1604 {
1605
1606         if (metadata_only)
1607                 return 0;
1608
1609         c->reflink_gc_nr = 0;
1610
1611         int ret = bch2_trans_run(c,
1612                 for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
1613                                    BTREE_ITER_PREFETCH, k, ({
1614                         const __le64 *refcount = bkey_refcount_c(k);
1615
1616                         if (!refcount)
1617                                 continue;
1618
1619                         struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
1620                                                         c->reflink_gc_nr++, GFP_KERNEL);
1621                         if (!r) {
1622                                 ret = -BCH_ERR_ENOMEM_gc_reflink_start;
1623                                 break;
1624                         }
1625
1626                         r->offset       = k.k->p.offset;
1627                         r->size         = k.k->size;
1628                         r->refcount     = 0;
1629                         0;
1630                 })));
1631
1632         bch_err_fn(c, ret);
1633         return ret;
1634 }
1635
1636 static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
1637 {
1638         struct genradix_iter iter;
1639         struct reflink_gc *r;
1640
1641         genradix_for_each(&c->reflink_gc_table, iter, r)
1642                 r->refcount = 0;
1643 }
1644
1645 static int bch2_gc_write_stripes_key(struct btree_trans *trans,
1646                                      struct btree_iter *iter,
1647                                      struct bkey_s_c k)
1648 {
1649         struct bch_fs *c = trans->c;
1650         struct printbuf buf = PRINTBUF;
1651         const struct bch_stripe *s;
1652         struct gc_stripe *m;
1653         bool bad = false;
1654         unsigned i;
1655         int ret = 0;
1656
1657         if (k.k->type != KEY_TYPE_stripe)
1658                 return 0;
1659
1660         s = bkey_s_c_to_stripe(k).v;
1661         m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
1662
1663         for (i = 0; i < s->nr_blocks; i++) {
1664                 u32 old = stripe_blockcount_get(s, i);
1665                 u32 new = (m ? m->block_sectors[i] : 0);
1666
1667                 if (old != new) {
1668                         prt_printf(&buf, "stripe block %u has wrong sector count: got %u, should be %u\n",
1669                                    i, old, new);
1670                         bad = true;
1671                 }
1672         }
1673
1674         if (bad)
1675                 bch2_bkey_val_to_text(&buf, c, k);
1676
1677         if (fsck_err_on(bad, c, stripe_sector_count_wrong,
1678                         "%s", buf.buf)) {
1679                 struct bkey_i_stripe *new;
1680
1681                 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1682                 ret = PTR_ERR_OR_ZERO(new);
1683                 if (ret)
1684                         return ret;
1685
1686                 bkey_reassemble(&new->k_i, k);
1687
1688                 for (i = 0; i < new->v.nr_blocks; i++)
1689                         stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
1690
1691                 ret = bch2_trans_update(trans, iter, &new->k_i, 0);
1692         }
1693 fsck_err:
1694         printbuf_exit(&buf);
1695         return ret;
1696 }
1697
1698 static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
1699 {
1700         if (metadata_only)
1701                 return 0;
1702
1703         return bch2_trans_run(c,
1704                 for_each_btree_key_commit(trans, iter,
1705                                 BTREE_ID_stripes, POS_MIN,
1706                                 BTREE_ITER_PREFETCH, k,
1707                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1708                         bch2_gc_write_stripes_key(trans, &iter, k)));
1709 }
1710
1711 static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
1712 {
1713         genradix_free(&c->gc_stripes);
1714 }
1715
1716 /**
1717  * bch2_gc - walk _all_ references to buckets, and recompute them:
1718  *
1719  * @c:                  filesystem object
1720  * @initial:            are we in recovery?
1721  * @metadata_only:      are we just checking metadata references, or everything?
1722  *
1723  * Returns: 0 on success, or standard errcode on failure
1724  *
1725  * Order matters here:
1726  *  - Concurrent GC relies on the fact that we have a total ordering for
1727  *    everything that GC walks - see  gc_will_visit_node(),
1728  *    gc_will_visit_root()
1729  *
1730  *  - also, references move around in the course of index updates and
1731  *    various other crap: everything needs to agree on the ordering
1732  *    references are allowed to move around in - e.g., we're allowed to
1733  *    start with a reference owned by an open_bucket (the allocator) and
1734  *    move it to the btree, but not the reverse.
1735  *
1736  *    This is necessary to ensure that gc doesn't miss references that
1737  *    move around - if references move backwards in the ordering GC
1738  *    uses, GC could skip past them
1739  */
1740 int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
1741 {
1742         unsigned iter = 0;
1743         int ret;
1744
1745         lockdep_assert_held(&c->state_lock);
1746
1747         down_write(&c->gc_lock);
1748
1749         bch2_btree_interior_updates_flush(c);
1750
1751         ret   = bch2_gc_start(c) ?:
1752                 bch2_gc_alloc_start(c, metadata_only) ?:
1753                 bch2_gc_reflink_start(c, metadata_only);
1754         if (ret)
1755                 goto out;
1756 again:
1757         gc_pos_set(c, gc_phase(GC_PHASE_START));
1758
1759         bch2_mark_superblocks(c);
1760
1761         ret = bch2_gc_btrees(c, initial, metadata_only);
1762
1763         if (ret)
1764                 goto out;
1765
1766 #if 0
1767         bch2_mark_pending_btree_node_frees(c);
1768 #endif
1769         c->gc_count++;
1770
1771         if (test_bit(BCH_FS_need_another_gc, &c->flags) ||
1772             (!iter && bch2_test_restart_gc)) {
1773                 if (iter++ > 2) {
1774                         bch_info(c, "Unable to fix bucket gens, looping");
1775                         ret = -EINVAL;
1776                         goto out;
1777                 }
1778
1779                 /*
1780                  * XXX: make sure gens we fixed got saved
1781                  */
1782                 bch_info(c, "Second GC pass needed, restarting:");
1783                 clear_bit(BCH_FS_need_another_gc, &c->flags);
1784                 __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
1785
1786                 bch2_gc_stripes_reset(c, metadata_only);
1787                 bch2_gc_alloc_reset(c, metadata_only);
1788                 bch2_gc_reflink_reset(c, metadata_only);
1789                 ret = bch2_gc_reset(c);
1790                 if (ret)
1791                         goto out;
1792
1793                 /* flush fsck errors, reset counters */
1794                 bch2_flush_fsck_errs(c);
1795                 goto again;
1796         }
1797 out:
1798         if (!ret) {
1799                 bch2_journal_block(&c->journal);
1800
1801                 ret   = bch2_gc_alloc_done(c, metadata_only) ?:
1802                         bch2_gc_done(c, initial, metadata_only) ?:
1803                         bch2_gc_stripes_done(c, metadata_only) ?:
1804                         bch2_gc_reflink_done(c, metadata_only);
1805
1806                 bch2_journal_unblock(&c->journal);
1807         }
1808
1809         percpu_down_write(&c->mark_lock);
1810         /* Indicates that gc is no longer in progress: */
1811         __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
1812
1813         bch2_gc_free(c);
1814         percpu_up_write(&c->mark_lock);
1815
1816         up_write(&c->gc_lock);
1817
1818         /*
1819          * At startup, allocations can happen directly instead of via the
1820          * allocator thread - issue wakeup in case they blocked on gc_lock:
1821          */
1822         closure_wake_up(&c->freelist_wait);
1823         bch_err_fn(c, ret);
1824         return ret;
1825 }
1826
1827 static int gc_btree_gens_key(struct btree_trans *trans,
1828                              struct btree_iter *iter,
1829                              struct bkey_s_c k)
1830 {
1831         struct bch_fs *c = trans->c;
1832         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1833         struct bkey_i *u;
1834         int ret;
1835
1836         percpu_down_read(&c->mark_lock);
1837         bkey_for_each_ptr(ptrs, ptr) {
1838                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1839
1840                 if (ptr_stale(ca, ptr) > 16) {
1841                         percpu_up_read(&c->mark_lock);
1842                         goto update;
1843                 }
1844         }
1845
1846         bkey_for_each_ptr(ptrs, ptr) {
1847                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1848                 u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
1849
1850                 if (gen_after(*gen, ptr->gen))
1851                         *gen = ptr->gen;
1852         }
1853         percpu_up_read(&c->mark_lock);
1854         return 0;
1855 update:
1856         u = bch2_bkey_make_mut(trans, iter, &k, 0);
1857         ret = PTR_ERR_OR_ZERO(u);
1858         if (ret)
1859                 return ret;
1860
1861         bch2_extent_normalize(c, bkey_i_to_s(u));
1862         return 0;
1863 }
1864
1865 static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
1866                                        struct bkey_s_c k)
1867 {
1868         struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
1869         struct bch_alloc_v4 a_convert;
1870         const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1871         struct bkey_i_alloc_v4 *a_mut;
1872         int ret;
1873
1874         if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
1875                 return 0;
1876
1877         a_mut = bch2_alloc_to_v4_mut(trans, k);
1878         ret = PTR_ERR_OR_ZERO(a_mut);
1879         if (ret)
1880                 return ret;
1881
1882         a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
1883         a_mut->v.data_type = alloc_data_type(a_mut->v, a_mut->v.data_type);
1884
1885         return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
1886 }
1887
1888 int bch2_gc_gens(struct bch_fs *c)
1889 {
1890         u64 b, start_time = local_clock();
1891         int ret;
1892
1893         /*
1894          * Ideally we would be using state_lock and not gc_lock here, but that
1895          * introduces a deadlock in the RO path - we currently take the state
1896          * lock at the start of going RO, thus the gc thread may get stuck:
1897          */
1898         if (!mutex_trylock(&c->gc_gens_lock))
1899                 return 0;
1900
1901         trace_and_count(c, gc_gens_start, c);
1902         down_read(&c->gc_lock);
1903
1904         for_each_member_device(c, ca) {
1905                 struct bucket_gens *gens = bucket_gens(ca);
1906
1907                 BUG_ON(ca->oldest_gen);
1908
1909                 ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
1910                 if (!ca->oldest_gen) {
1911                         percpu_ref_put(&ca->ref);
1912                         ret = -BCH_ERR_ENOMEM_gc_gens;
1913                         goto err;
1914                 }
1915
1916                 for (b = gens->first_bucket;
1917                      b < gens->nbuckets; b++)
1918                         ca->oldest_gen[b] = gens->b[b];
1919         }
1920
1921         for (unsigned i = 0; i < BTREE_ID_NR; i++)
1922                 if (btree_type_has_ptrs(i)) {
1923                         c->gc_gens_btree = i;
1924                         c->gc_gens_pos = POS_MIN;
1925
1926                         ret = bch2_trans_run(c,
1927                                 for_each_btree_key_commit(trans, iter, i,
1928                                                 POS_MIN,
1929                                                 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1930                                                 k,
1931                                                 NULL, NULL,
1932                                                 BCH_TRANS_COMMIT_no_enospc,
1933                                         gc_btree_gens_key(trans, &iter, k)));
1934                         if (ret)
1935                                 goto err;
1936                 }
1937
1938         ret = bch2_trans_run(c,
1939                 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1940                                 POS_MIN,
1941                                 BTREE_ITER_PREFETCH,
1942                                 k,
1943                                 NULL, NULL,
1944                                 BCH_TRANS_COMMIT_no_enospc,
1945                         bch2_alloc_write_oldest_gen(trans, &iter, k)));
1946         if (ret)
1947                 goto err;
1948
1949         c->gc_gens_btree        = 0;
1950         c->gc_gens_pos          = POS_MIN;
1951
1952         c->gc_count++;
1953
1954         bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
1955         trace_and_count(c, gc_gens_end, c);
1956 err:
1957         for_each_member_device(c, ca) {
1958                 kvfree(ca->oldest_gen);
1959                 ca->oldest_gen = NULL;
1960         }
1961
1962         up_read(&c->gc_lock);
1963         mutex_unlock(&c->gc_gens_lock);
1964         if (!bch2_err_matches(ret, EROFS))
1965                 bch_err_fn(c, ret);
1966         return ret;
1967 }
1968
1969 static int bch2_gc_thread(void *arg)
1970 {
1971         struct bch_fs *c = arg;
1972         struct io_clock *clock = &c->io_clock[WRITE];
1973         unsigned long last = atomic64_read(&clock->now);
1974         unsigned last_kick = atomic_read(&c->kick_gc);
1975
1976         set_freezable();
1977
1978         while (1) {
1979                 while (1) {
1980                         set_current_state(TASK_INTERRUPTIBLE);
1981
1982                         if (kthread_should_stop()) {
1983                                 __set_current_state(TASK_RUNNING);
1984                                 return 0;
1985                         }
1986
1987                         if (atomic_read(&c->kick_gc) != last_kick)
1988                                 break;
1989
1990                         if (c->btree_gc_periodic) {
1991                                 unsigned long next = last + c->capacity / 16;
1992
1993                                 if (atomic64_read(&clock->now) >= next)
1994                                         break;
1995
1996                                 bch2_io_clock_schedule_timeout(clock, next);
1997                         } else {
1998                                 schedule();
1999                         }
2000
2001                         try_to_freeze();
2002                 }
2003                 __set_current_state(TASK_RUNNING);
2004
2005                 last = atomic64_read(&clock->now);
2006                 last_kick = atomic_read(&c->kick_gc);
2007
2008                 /*
2009                  * Full gc is currently incompatible with btree key cache:
2010                  */
2011 #if 0
2012                 ret = bch2_gc(c, false, false);
2013 #else
2014                 bch2_gc_gens(c);
2015 #endif
2016                 debug_check_no_locks_held();
2017         }
2018
2019         return 0;
2020 }
2021
2022 void bch2_gc_thread_stop(struct bch_fs *c)
2023 {
2024         struct task_struct *p;
2025
2026         p = c->gc_thread;
2027         c->gc_thread = NULL;
2028
2029         if (p) {
2030                 kthread_stop(p);
2031                 put_task_struct(p);
2032         }
2033 }
2034
2035 int bch2_gc_thread_start(struct bch_fs *c)
2036 {
2037         struct task_struct *p;
2038
2039         if (c->gc_thread)
2040                 return 0;
2041
2042         p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
2043         if (IS_ERR(p)) {
2044                 bch_err_fn(c, PTR_ERR(p));
2045                 return PTR_ERR(p);
2046         }
2047
2048         get_task_struct(p);
2049         c->gc_thread = p;
2050         wake_up_process(p);
2051         return 0;
2052 }