Merge tag 'rust-6.9' of https://github.com/Rust-for-Linux/linux
[sfrench/cifs-2.6.git] / fs / bcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
11 #include "btree_gc.h"
12 #include "btree_write_buffer.h"
13 #include "buckets.h"
14 #include "buckets_waiting_for_journal.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "lru.h"
20 #include "recovery.h"
21 #include "trace.h"
22 #include "varint.h"
23
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/random.h>
27 #include <linux/rculist.h>
28 #include <linux/rcupdate.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
31
32 /* Persistent alloc info: */
33
34 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
35 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
36         BCH_ALLOC_FIELDS_V1()
37 #undef x
38 };
39
40 struct bkey_alloc_unpacked {
41         u64             journal_seq;
42         u8              gen;
43         u8              oldest_gen;
44         u8              data_type;
45         bool            need_discard:1;
46         bool            need_inc_gen:1;
47 #define x(_name, _bits) u##_bits _name;
48         BCH_ALLOC_FIELDS_V2()
49 #undef  x
50 };
51
52 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
53                                      const void **p, unsigned field)
54 {
55         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
56         u64 v;
57
58         if (!(a->fields & (1 << field)))
59                 return 0;
60
61         switch (bytes) {
62         case 1:
63                 v = *((const u8 *) *p);
64                 break;
65         case 2:
66                 v = le16_to_cpup(*p);
67                 break;
68         case 4:
69                 v = le32_to_cpup(*p);
70                 break;
71         case 8:
72                 v = le64_to_cpup(*p);
73                 break;
74         default:
75                 BUG();
76         }
77
78         *p += bytes;
79         return v;
80 }
81
82 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
83                                  struct bkey_s_c k)
84 {
85         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
86         const void *d = in->data;
87         unsigned idx = 0;
88
89         out->gen = in->gen;
90
91 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
92         BCH_ALLOC_FIELDS_V1()
93 #undef  x
94 }
95
96 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
97                                 struct bkey_s_c k)
98 {
99         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
100         const u8 *in = a.v->data;
101         const u8 *end = bkey_val_end(a);
102         unsigned fieldnr = 0;
103         int ret;
104         u64 v;
105
106         out->gen        = a.v->gen;
107         out->oldest_gen = a.v->oldest_gen;
108         out->data_type  = a.v->data_type;
109
110 #define x(_name, _bits)                                                 \
111         if (fieldnr < a.v->nr_fields) {                                 \
112                 ret = bch2_varint_decode_fast(in, end, &v);             \
113                 if (ret < 0)                                            \
114                         return ret;                                     \
115                 in += ret;                                              \
116         } else {                                                        \
117                 v = 0;                                                  \
118         }                                                               \
119         out->_name = v;                                                 \
120         if (v != out->_name)                                            \
121                 return -1;                                              \
122         fieldnr++;
123
124         BCH_ALLOC_FIELDS_V2()
125 #undef  x
126         return 0;
127 }
128
129 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
130                                 struct bkey_s_c k)
131 {
132         struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
133         const u8 *in = a.v->data;
134         const u8 *end = bkey_val_end(a);
135         unsigned fieldnr = 0;
136         int ret;
137         u64 v;
138
139         out->gen        = a.v->gen;
140         out->oldest_gen = a.v->oldest_gen;
141         out->data_type  = a.v->data_type;
142         out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
143         out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
144         out->journal_seq = le64_to_cpu(a.v->journal_seq);
145
146 #define x(_name, _bits)                                                 \
147         if (fieldnr < a.v->nr_fields) {                                 \
148                 ret = bch2_varint_decode_fast(in, end, &v);             \
149                 if (ret < 0)                                            \
150                         return ret;                                     \
151                 in += ret;                                              \
152         } else {                                                        \
153                 v = 0;                                                  \
154         }                                                               \
155         out->_name = v;                                                 \
156         if (v != out->_name)                                            \
157                 return -1;                                              \
158         fieldnr++;
159
160         BCH_ALLOC_FIELDS_V2()
161 #undef  x
162         return 0;
163 }
164
165 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
166 {
167         struct bkey_alloc_unpacked ret = { .gen = 0 };
168
169         switch (k.k->type) {
170         case KEY_TYPE_alloc:
171                 bch2_alloc_unpack_v1(&ret, k);
172                 break;
173         case KEY_TYPE_alloc_v2:
174                 bch2_alloc_unpack_v2(&ret, k);
175                 break;
176         case KEY_TYPE_alloc_v3:
177                 bch2_alloc_unpack_v3(&ret, k);
178                 break;
179         }
180
181         return ret;
182 }
183
184 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
185 {
186         unsigned i, bytes = offsetof(struct bch_alloc, data);
187
188         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
189                 if (a->fields & (1 << i))
190                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
191
192         return DIV_ROUND_UP(bytes, sizeof(u64));
193 }
194
195 int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
196                           enum bkey_invalid_flags flags,
197                           struct printbuf *err)
198 {
199         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
200         int ret = 0;
201
202         /* allow for unknown fields */
203         bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
204                          alloc_v1_val_size_bad,
205                          "incorrect value size (%zu < %u)",
206                          bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
207 fsck_err:
208         return ret;
209 }
210
211 int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
212                           enum bkey_invalid_flags flags,
213                           struct printbuf *err)
214 {
215         struct bkey_alloc_unpacked u;
216         int ret = 0;
217
218         bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
219                          alloc_v2_unpack_error,
220                          "unpack error");
221 fsck_err:
222         return ret;
223 }
224
225 int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
226                           enum bkey_invalid_flags flags,
227                           struct printbuf *err)
228 {
229         struct bkey_alloc_unpacked u;
230         int ret = 0;
231
232         bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
233                          alloc_v2_unpack_error,
234                          "unpack error");
235 fsck_err:
236         return ret;
237 }
238
239 int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
240                           enum bkey_invalid_flags flags, struct printbuf *err)
241 {
242         struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
243         int ret = 0;
244
245         bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
246                          alloc_v4_val_size_bad,
247                          "bad val size (%u > %zu)",
248                          alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
249
250         bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
251                          BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
252                          alloc_v4_backpointers_start_bad,
253                          "invalid backpointers_start");
254
255         bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
256                          alloc_key_data_type_bad,
257                          "invalid data type (got %u should be %u)",
258                          a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
259
260         switch (a.v->data_type) {
261         case BCH_DATA_free:
262         case BCH_DATA_need_gc_gens:
263         case BCH_DATA_need_discard:
264                 bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
265                                  c, err, alloc_key_empty_but_have_data,
266                                  "empty data type free but have data");
267                 break;
268         case BCH_DATA_sb:
269         case BCH_DATA_journal:
270         case BCH_DATA_btree:
271         case BCH_DATA_user:
272         case BCH_DATA_parity:
273                 bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
274                                  c, err, alloc_key_dirty_sectors_0,
275                                  "data_type %s but dirty_sectors==0",
276                                  bch2_data_type_str(a.v->data_type));
277                 break;
278         case BCH_DATA_cached:
279                 bkey_fsck_err_on(!a.v->cached_sectors ||
280                                  bch2_bucket_sectors_dirty(*a.v) ||
281                                  a.v->stripe,
282                                  c, err, alloc_key_cached_inconsistency,
283                                  "data type inconsistency");
284
285                 bkey_fsck_err_on(!a.v->io_time[READ] &&
286                                  c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
287                                  c, err, alloc_key_cached_but_read_time_zero,
288                                  "cached bucket with read_time == 0");
289                 break;
290         case BCH_DATA_stripe:
291                 break;
292         }
293 fsck_err:
294         return ret;
295 }
296
297 void bch2_alloc_v4_swab(struct bkey_s k)
298 {
299         struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
300         struct bch_backpointer *bp, *bps;
301
302         a->journal_seq          = swab64(a->journal_seq);
303         a->flags                = swab32(a->flags);
304         a->dirty_sectors        = swab32(a->dirty_sectors);
305         a->cached_sectors       = swab32(a->cached_sectors);
306         a->io_time[0]           = swab64(a->io_time[0]);
307         a->io_time[1]           = swab64(a->io_time[1]);
308         a->stripe               = swab32(a->stripe);
309         a->nr_external_backpointers = swab32(a->nr_external_backpointers);
310         a->fragmentation_lru    = swab64(a->fragmentation_lru);
311
312         bps = alloc_v4_backpointers(a);
313         for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
314                 bp->bucket_offset       = swab40(bp->bucket_offset);
315                 bp->bucket_len          = swab32(bp->bucket_len);
316                 bch2_bpos_swab(&bp->pos);
317         }
318 }
319
320 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
321 {
322         struct bch_alloc_v4 _a;
323         const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
324
325         prt_newline(out);
326         printbuf_indent_add(out, 2);
327
328         prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
329         bch2_prt_data_type(out, a->data_type);
330         prt_newline(out);
331         prt_printf(out, "journal_seq       %llu",       a->journal_seq);
332         prt_newline(out);
333         prt_printf(out, "need_discard      %llu",       BCH_ALLOC_V4_NEED_DISCARD(a));
334         prt_newline(out);
335         prt_printf(out, "need_inc_gen      %llu",       BCH_ALLOC_V4_NEED_INC_GEN(a));
336         prt_newline(out);
337         prt_printf(out, "dirty_sectors     %u", a->dirty_sectors);
338         prt_newline(out);
339         prt_printf(out, "cached_sectors    %u", a->cached_sectors);
340         prt_newline(out);
341         prt_printf(out, "stripe            %u", a->stripe);
342         prt_newline(out);
343         prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
344         prt_newline(out);
345         prt_printf(out, "io_time[READ]     %llu",       a->io_time[READ]);
346         prt_newline(out);
347         prt_printf(out, "io_time[WRITE]    %llu",       a->io_time[WRITE]);
348         prt_newline(out);
349         prt_printf(out, "fragmentation     %llu",       a->fragmentation_lru);
350         prt_newline(out);
351         prt_printf(out, "bp_start          %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
352         printbuf_indent_sub(out, 2);
353 }
354
355 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
356 {
357         if (k.k->type == KEY_TYPE_alloc_v4) {
358                 void *src, *dst;
359
360                 *out = *bkey_s_c_to_alloc_v4(k).v;
361
362                 src = alloc_v4_backpointers(out);
363                 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
364                 dst = alloc_v4_backpointers(out);
365
366                 if (src < dst)
367                         memset(src, 0, dst - src);
368
369                 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
370         } else {
371                 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
372
373                 *out = (struct bch_alloc_v4) {
374                         .journal_seq            = u.journal_seq,
375                         .flags                  = u.need_discard,
376                         .gen                    = u.gen,
377                         .oldest_gen             = u.oldest_gen,
378                         .data_type              = u.data_type,
379                         .stripe_redundancy      = u.stripe_redundancy,
380                         .dirty_sectors          = u.dirty_sectors,
381                         .cached_sectors         = u.cached_sectors,
382                         .io_time[READ]          = u.read_time,
383                         .io_time[WRITE]         = u.write_time,
384                         .stripe                 = u.stripe,
385                 };
386
387                 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
388         }
389 }
390
391 static noinline struct bkey_i_alloc_v4 *
392 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
393 {
394         struct bkey_i_alloc_v4 *ret;
395
396         ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
397         if (IS_ERR(ret))
398                 return ret;
399
400         if (k.k->type == KEY_TYPE_alloc_v4) {
401                 void *src, *dst;
402
403                 bkey_reassemble(&ret->k_i, k);
404
405                 src = alloc_v4_backpointers(&ret->v);
406                 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
407                 dst = alloc_v4_backpointers(&ret->v);
408
409                 if (src < dst)
410                         memset(src, 0, dst - src);
411
412                 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
413                 set_alloc_v4_u64s(ret);
414         } else {
415                 bkey_alloc_v4_init(&ret->k_i);
416                 ret->k.p = k.k->p;
417                 bch2_alloc_to_v4(k, &ret->v);
418         }
419         return ret;
420 }
421
422 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
423 {
424         struct bkey_s_c_alloc_v4 a;
425
426         if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
427             ((a = bkey_s_c_to_alloc_v4(k), true) &&
428              BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
429                 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
430
431         return __bch2_alloc_to_v4_mut(trans, k);
432 }
433
434 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
435 {
436         return bch2_alloc_to_v4_mut_inlined(trans, k);
437 }
438
439 struct bkey_i_alloc_v4 *
440 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
441                               struct bpos pos)
442 {
443         struct bkey_s_c k;
444         struct bkey_i_alloc_v4 *a;
445         int ret;
446
447         k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
448                              BTREE_ITER_WITH_UPDATES|
449                              BTREE_ITER_CACHED|
450                              BTREE_ITER_INTENT);
451         ret = bkey_err(k);
452         if (unlikely(ret))
453                 return ERR_PTR(ret);
454
455         a = bch2_alloc_to_v4_mut_inlined(trans, k);
456         ret = PTR_ERR_OR_ZERO(a);
457         if (unlikely(ret))
458                 goto err;
459         return a;
460 err:
461         bch2_trans_iter_exit(trans, iter);
462         return ERR_PTR(ret);
463 }
464
465 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
466 {
467         *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
468
469         pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
470         return pos;
471 }
472
473 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
474 {
475         pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
476         pos.offset += offset;
477         return pos;
478 }
479
480 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
481 {
482         return k.k->type == KEY_TYPE_bucket_gens
483                 ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
484                 : 0;
485 }
486
487 int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
488                              enum bkey_invalid_flags flags,
489                              struct printbuf *err)
490 {
491         int ret = 0;
492
493         bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
494                          bucket_gens_val_size_bad,
495                          "bad val size (%zu != %zu)",
496                          bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
497 fsck_err:
498         return ret;
499 }
500
501 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
502 {
503         struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
504         unsigned i;
505
506         for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
507                 if (i)
508                         prt_char(out, ' ');
509                 prt_printf(out, "%u", g.v->gens[i]);
510         }
511 }
512
513 int bch2_bucket_gens_init(struct bch_fs *c)
514 {
515         struct btree_trans *trans = bch2_trans_get(c);
516         struct bkey_i_bucket_gens g;
517         bool have_bucket_gens_key = false;
518         int ret;
519
520         ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
521                                  BTREE_ITER_PREFETCH, k, ({
522                 /*
523                  * Not a fsck error because this is checked/repaired by
524                  * bch2_check_alloc_key() which runs later:
525                  */
526                 if (!bch2_dev_bucket_exists(c, k.k->p))
527                         continue;
528
529                 struct bch_alloc_v4 a;
530                 u8 gen = bch2_alloc_to_v4(k, &a)->gen;
531                 unsigned offset;
532                 struct bpos pos = alloc_gens_pos(iter.pos, &offset);
533
534                 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
535                         ret = commit_do(trans, NULL, NULL,
536                                         BCH_TRANS_COMMIT_no_enospc,
537                                 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
538                         if (ret)
539                                 break;
540                         have_bucket_gens_key = false;
541                 }
542
543                 if (!have_bucket_gens_key) {
544                         bkey_bucket_gens_init(&g.k_i);
545                         g.k.p = pos;
546                         have_bucket_gens_key = true;
547                 }
548
549                 g.v.gens[offset] = gen;
550                 0;
551         }));
552
553         if (have_bucket_gens_key && !ret)
554                 ret = commit_do(trans, NULL, NULL,
555                                 BCH_TRANS_COMMIT_no_enospc,
556                         bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
557
558         bch2_trans_put(trans);
559
560         bch_err_fn(c, ret);
561         return ret;
562 }
563
564 int bch2_alloc_read(struct bch_fs *c)
565 {
566         struct btree_trans *trans = bch2_trans_get(c);
567         int ret;
568
569         down_read(&c->gc_lock);
570
571         if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
572                 ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
573                                          BTREE_ITER_PREFETCH, k, ({
574                         u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
575                         u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
576
577                         if (k.k->type != KEY_TYPE_bucket_gens)
578                                 continue;
579
580                         const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
581
582                         /*
583                          * Not a fsck error because this is checked/repaired by
584                          * bch2_check_alloc_key() which runs later:
585                          */
586                         if (!bch2_dev_exists2(c, k.k->p.inode))
587                                 continue;
588
589                         struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
590
591                         for (u64 b = max_t(u64, ca->mi.first_bucket, start);
592                              b < min_t(u64, ca->mi.nbuckets, end);
593                              b++)
594                                 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
595                         0;
596                 }));
597         } else {
598                 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
599                                          BTREE_ITER_PREFETCH, k, ({
600                         /*
601                          * Not a fsck error because this is checked/repaired by
602                          * bch2_check_alloc_key() which runs later:
603                          */
604                         if (!bch2_dev_bucket_exists(c, k.k->p))
605                                 continue;
606
607                         struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
608
609                         struct bch_alloc_v4 a;
610                         *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
611                         0;
612                 }));
613         }
614
615         bch2_trans_put(trans);
616         up_read(&c->gc_lock);
617
618         bch_err_fn(c, ret);
619         return ret;
620 }
621
622 /* Free space/discard btree: */
623
624 static int bch2_bucket_do_index(struct btree_trans *trans,
625                                 struct bkey_s_c alloc_k,
626                                 const struct bch_alloc_v4 *a,
627                                 bool set)
628 {
629         struct bch_fs *c = trans->c;
630         struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
631         struct btree_iter iter;
632         struct bkey_s_c old;
633         struct bkey_i *k;
634         enum btree_id btree;
635         enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
636         enum bch_bkey_type new_type =  set ? KEY_TYPE_set : KEY_TYPE_deleted;
637         struct printbuf buf = PRINTBUF;
638         int ret;
639
640         if (a->data_type != BCH_DATA_free &&
641             a->data_type != BCH_DATA_need_discard)
642                 return 0;
643
644         k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
645         if (IS_ERR(k))
646                 return PTR_ERR(k);
647
648         bkey_init(&k->k);
649         k->k.type = new_type;
650
651         switch (a->data_type) {
652         case BCH_DATA_free:
653                 btree = BTREE_ID_freespace;
654                 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
655                 bch2_key_resize(&k->k, 1);
656                 break;
657         case BCH_DATA_need_discard:
658                 btree = BTREE_ID_need_discard;
659                 k->k.p = alloc_k.k->p;
660                 break;
661         default:
662                 return 0;
663         }
664
665         old = bch2_bkey_get_iter(trans, &iter, btree,
666                              bkey_start_pos(&k->k),
667                              BTREE_ITER_INTENT);
668         ret = bkey_err(old);
669         if (ret)
670                 return ret;
671
672         if (ca->mi.freespace_initialized &&
673             c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
674             bch2_trans_inconsistent_on(old.k->type != old_type, trans,
675                         "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
676                         "  for %s",
677                         set ? "setting" : "clearing",
678                         bch2_btree_id_str(btree),
679                         iter.pos.inode,
680                         iter.pos.offset,
681                         bch2_bkey_types[old.k->type],
682                         bch2_bkey_types[old_type],
683                         (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
684                 ret = -EIO;
685                 goto err;
686         }
687
688         ret = bch2_trans_update(trans, &iter, k, 0);
689 err:
690         bch2_trans_iter_exit(trans, &iter);
691         printbuf_exit(&buf);
692         return ret;
693 }
694
695 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
696                                            struct bpos bucket, u8 gen)
697 {
698         struct btree_iter iter;
699         unsigned offset;
700         struct bpos pos = alloc_gens_pos(bucket, &offset);
701         struct bkey_i_bucket_gens *g;
702         struct bkey_s_c k;
703         int ret;
704
705         g = bch2_trans_kmalloc(trans, sizeof(*g));
706         ret = PTR_ERR_OR_ZERO(g);
707         if (ret)
708                 return ret;
709
710         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
711                                BTREE_ITER_INTENT|
712                                BTREE_ITER_WITH_UPDATES);
713         ret = bkey_err(k);
714         if (ret)
715                 return ret;
716
717         if (k.k->type != KEY_TYPE_bucket_gens) {
718                 bkey_bucket_gens_init(&g->k_i);
719                 g->k.p = iter.pos;
720         } else {
721                 bkey_reassemble(&g->k_i, k);
722         }
723
724         g->v.gens[offset] = gen;
725
726         ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
727         bch2_trans_iter_exit(trans, &iter);
728         return ret;
729 }
730
731 int bch2_trigger_alloc(struct btree_trans *trans,
732                        enum btree_id btree, unsigned level,
733                        struct bkey_s_c old, struct bkey_s new,
734                        unsigned flags)
735 {
736         struct bch_fs *c = trans->c;
737         int ret = 0;
738
739         if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
740                                        "alloc key for invalid device or bucket"))
741                 return -EIO;
742
743         struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
744
745         struct bch_alloc_v4 old_a_convert;
746         const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
747
748         if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
749                 struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
750
751                 new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
752
753                 if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
754                         new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
755                         new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
756                         SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
757                         SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
758                 }
759
760                 if (data_type_is_empty(new_a->data_type) &&
761                     BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
762                     !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
763                         new_a->gen++;
764                         SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
765                 }
766
767                 if (old_a->data_type != new_a->data_type ||
768                     (new_a->data_type == BCH_DATA_free &&
769                      alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
770                         ret =   bch2_bucket_do_index(trans, old, old_a, false) ?:
771                                 bch2_bucket_do_index(trans, new.s_c, new_a, true);
772                         if (ret)
773                                 return ret;
774                 }
775
776                 if (new_a->data_type == BCH_DATA_cached &&
777                     !new_a->io_time[READ])
778                         new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
779
780                 u64 old_lru = alloc_lru_idx_read(*old_a);
781                 u64 new_lru = alloc_lru_idx_read(*new_a);
782                 if (old_lru != new_lru) {
783                         ret = bch2_lru_change(trans, new.k->p.inode,
784                                               bucket_to_u64(new.k->p),
785                                               old_lru, new_lru);
786                         if (ret)
787                                 return ret;
788                 }
789
790                 new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
791                                                 bch_dev_bkey_exists(c, new.k->p.inode));
792                 if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
793                         ret = bch2_lru_change(trans,
794                                         BCH_LRU_FRAGMENTATION_START,
795                                         bucket_to_u64(new.k->p),
796                                         old_a->fragmentation_lru, new_a->fragmentation_lru);
797                         if (ret)
798                                 return ret;
799                 }
800
801                 if (old_a->gen != new_a->gen) {
802                         ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
803                         if (ret)
804                                 return ret;
805                 }
806
807                 /*
808                  * need to know if we're getting called from the invalidate path or
809                  * not:
810                  */
811
812                 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
813                     old_a->cached_sectors) {
814                         ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
815                                                               -((s64) old_a->cached_sectors));
816                         if (ret)
817                                 return ret;
818                 }
819         }
820
821         if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
822                 struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
823                 u64 journal_seq = trans->journal_res.seq;
824                 u64 bucket_journal_seq = new_a->journal_seq;
825
826                 if ((flags & BTREE_TRIGGER_INSERT) &&
827                     data_type_is_empty(old_a->data_type) !=
828                     data_type_is_empty(new_a->data_type) &&
829                     new.k->type == KEY_TYPE_alloc_v4) {
830                         struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
831
832                         /*
833                          * If the btree updates referring to a bucket weren't flushed
834                          * before the bucket became empty again, then the we don't have
835                          * to wait on a journal flush before we can reuse the bucket:
836                          */
837                         v->journal_seq = bucket_journal_seq =
838                                 data_type_is_empty(new_a->data_type) &&
839                                 (journal_seq == v->journal_seq ||
840                                  bch2_journal_noflush_seq(&c->journal, v->journal_seq))
841                                 ? 0 : journal_seq;
842                 }
843
844                 if (!data_type_is_empty(old_a->data_type) &&
845                     data_type_is_empty(new_a->data_type) &&
846                     bucket_journal_seq) {
847                         ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
848                                         c->journal.flushed_seq_ondisk,
849                                         new.k->p.inode, new.k->p.offset,
850                                         bucket_journal_seq);
851                         if (ret) {
852                                 bch2_fs_fatal_error(c,
853                                         "error setting bucket_needs_journal_commit: %i", ret);
854                                 return ret;
855                         }
856                 }
857
858                 percpu_down_read(&c->mark_lock);
859                 if (new_a->gen != old_a->gen)
860                         *bucket_gen(ca, new.k->p.offset) = new_a->gen;
861
862                 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
863
864                 if (new_a->data_type == BCH_DATA_free &&
865                     (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
866                         closure_wake_up(&c->freelist_wait);
867
868                 if (new_a->data_type == BCH_DATA_need_discard &&
869                     (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
870                         bch2_do_discards(c);
871
872                 if (old_a->data_type != BCH_DATA_cached &&
873                     new_a->data_type == BCH_DATA_cached &&
874                     should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
875                         bch2_do_invalidates(c);
876
877                 if (new_a->data_type == BCH_DATA_need_gc_gens)
878                         bch2_do_gc_gens(c);
879                 percpu_up_read(&c->mark_lock);
880         }
881
882         if ((flags & BTREE_TRIGGER_GC) &&
883             (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
884                 struct bch_alloc_v4 new_a_convert;
885                 const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
886
887                 percpu_down_read(&c->mark_lock);
888                 struct bucket *g = gc_bucket(ca, new.k->p.offset);
889
890                 bucket_lock(g);
891
892                 g->gen_valid            = 1;
893                 g->gen                  = new_a->gen;
894                 g->data_type            = new_a->data_type;
895                 g->stripe               = new_a->stripe;
896                 g->stripe_redundancy    = new_a->stripe_redundancy;
897                 g->dirty_sectors        = new_a->dirty_sectors;
898                 g->cached_sectors       = new_a->cached_sectors;
899
900                 bucket_unlock(g);
901                 percpu_up_read(&c->mark_lock);
902         }
903
904         return 0;
905 }
906
907 /*
908  * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
909  * extents style btrees, but works on non-extents btrees:
910  */
911 static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
912 {
913         struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
914
915         if (bkey_err(k))
916                 return k;
917
918         if (k.k->type) {
919                 return k;
920         } else {
921                 struct btree_iter iter2;
922                 struct bpos next;
923
924                 bch2_trans_copy_iter(&iter2, iter);
925
926                 struct btree_path *path = btree_iter_path(iter->trans, iter);
927                 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
928                         end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
929
930                 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
931
932                 /*
933                  * btree node min/max is a closed interval, upto takes a half
934                  * open interval:
935                  */
936                 k = bch2_btree_iter_peek_upto(&iter2, end);
937                 next = iter2.pos;
938                 bch2_trans_iter_exit(iter->trans, &iter2);
939
940                 BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
941
942                 if (bkey_err(k))
943                         return k;
944
945                 bkey_init(hole);
946                 hole->p = iter->pos;
947
948                 bch2_key_resize(hole, next.offset - iter->pos.offset);
949                 return (struct bkey_s_c) { hole, NULL };
950         }
951 }
952
953 static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
954 {
955         struct bch_dev *ca;
956
957         if (bch2_dev_bucket_exists(c, *bucket))
958                 return true;
959
960         if (bch2_dev_exists2(c, bucket->inode)) {
961                 ca = bch_dev_bkey_exists(c, bucket->inode);
962
963                 if (bucket->offset < ca->mi.first_bucket) {
964                         bucket->offset = ca->mi.first_bucket;
965                         return true;
966                 }
967
968                 bucket->inode++;
969                 bucket->offset = 0;
970         }
971
972         rcu_read_lock();
973         ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
974         if (ca)
975                 *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
976         rcu_read_unlock();
977
978         return ca != NULL;
979 }
980
981 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
982 {
983         struct bch_fs *c = iter->trans->c;
984         struct bkey_s_c k;
985 again:
986         k = bch2_get_key_or_hole(iter, POS_MAX, hole);
987         if (bkey_err(k))
988                 return k;
989
990         if (!k.k->type) {
991                 struct bpos bucket = bkey_start_pos(k.k);
992
993                 if (!bch2_dev_bucket_exists(c, bucket)) {
994                         if (!next_bucket(c, &bucket))
995                                 return bkey_s_c_null;
996
997                         bch2_btree_iter_set_pos(iter, bucket);
998                         goto again;
999                 }
1000
1001                 if (!bch2_dev_bucket_exists(c, k.k->p)) {
1002                         struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
1003
1004                         bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
1005                 }
1006         }
1007
1008         return k;
1009 }
1010
1011 static noinline_for_stack
1012 int bch2_check_alloc_key(struct btree_trans *trans,
1013                          struct bkey_s_c alloc_k,
1014                          struct btree_iter *alloc_iter,
1015                          struct btree_iter *discard_iter,
1016                          struct btree_iter *freespace_iter,
1017                          struct btree_iter *bucket_gens_iter)
1018 {
1019         struct bch_fs *c = trans->c;
1020         struct bch_dev *ca;
1021         struct bch_alloc_v4 a_convert;
1022         const struct bch_alloc_v4 *a;
1023         unsigned discard_key_type, freespace_key_type;
1024         unsigned gens_offset;
1025         struct bkey_s_c k;
1026         struct printbuf buf = PRINTBUF;
1027         int ret;
1028
1029         if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
1030                         alloc_key_to_missing_dev_bucket,
1031                         "alloc key for invalid device:bucket %llu:%llu",
1032                         alloc_k.k->p.inode, alloc_k.k->p.offset))
1033                 return bch2_btree_delete_at(trans, alloc_iter, 0);
1034
1035         ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
1036         if (!ca->mi.freespace_initialized)
1037                 return 0;
1038
1039         a = bch2_alloc_to_v4(alloc_k, &a_convert);
1040
1041         discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1042         bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1043         k = bch2_btree_iter_peek_slot(discard_iter);
1044         ret = bkey_err(k);
1045         if (ret)
1046                 goto err;
1047
1048         if (k.k->type != discard_key_type &&
1049             (c->opts.reconstruct_alloc ||
1050              fsck_err(c, need_discard_key_wrong,
1051                       "incorrect key in need_discard btree (got %s should be %s)\n"
1052                       "  %s",
1053                       bch2_bkey_types[k.k->type],
1054                       bch2_bkey_types[discard_key_type],
1055                       (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1056                 struct bkey_i *update =
1057                         bch2_trans_kmalloc(trans, sizeof(*update));
1058
1059                 ret = PTR_ERR_OR_ZERO(update);
1060                 if (ret)
1061                         goto err;
1062
1063                 bkey_init(&update->k);
1064                 update->k.type  = discard_key_type;
1065                 update->k.p     = discard_iter->pos;
1066
1067                 ret = bch2_trans_update(trans, discard_iter, update, 0);
1068                 if (ret)
1069                         goto err;
1070         }
1071
1072         freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1073         bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1074         k = bch2_btree_iter_peek_slot(freespace_iter);
1075         ret = bkey_err(k);
1076         if (ret)
1077                 goto err;
1078
1079         if (k.k->type != freespace_key_type &&
1080             (c->opts.reconstruct_alloc ||
1081              fsck_err(c, freespace_key_wrong,
1082                       "incorrect key in freespace btree (got %s should be %s)\n"
1083                       "  %s",
1084                       bch2_bkey_types[k.k->type],
1085                       bch2_bkey_types[freespace_key_type],
1086                       (printbuf_reset(&buf),
1087                        bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1088                 struct bkey_i *update =
1089                         bch2_trans_kmalloc(trans, sizeof(*update));
1090
1091                 ret = PTR_ERR_OR_ZERO(update);
1092                 if (ret)
1093                         goto err;
1094
1095                 bkey_init(&update->k);
1096                 update->k.type  = freespace_key_type;
1097                 update->k.p     = freespace_iter->pos;
1098                 bch2_key_resize(&update->k, 1);
1099
1100                 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1101                 if (ret)
1102                         goto err;
1103         }
1104
1105         bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1106         k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1107         ret = bkey_err(k);
1108         if (ret)
1109                 goto err;
1110
1111         if (a->gen != alloc_gen(k, gens_offset) &&
1112             (c->opts.reconstruct_alloc ||
1113              fsck_err(c, bucket_gens_key_wrong,
1114                       "incorrect gen in bucket_gens btree (got %u should be %u)\n"
1115                       "  %s",
1116                       alloc_gen(k, gens_offset), a->gen,
1117                       (printbuf_reset(&buf),
1118                        bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1119                 struct bkey_i_bucket_gens *g =
1120                         bch2_trans_kmalloc(trans, sizeof(*g));
1121
1122                 ret = PTR_ERR_OR_ZERO(g);
1123                 if (ret)
1124                         goto err;
1125
1126                 if (k.k->type == KEY_TYPE_bucket_gens) {
1127                         bkey_reassemble(&g->k_i, k);
1128                 } else {
1129                         bkey_bucket_gens_init(&g->k_i);
1130                         g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1131                 }
1132
1133                 g->v.gens[gens_offset] = a->gen;
1134
1135                 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1136                 if (ret)
1137                         goto err;
1138         }
1139 err:
1140 fsck_err:
1141         printbuf_exit(&buf);
1142         return ret;
1143 }
1144
1145 static noinline_for_stack
1146 int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1147                                     struct bpos start,
1148                                     struct bpos *end,
1149                                     struct btree_iter *freespace_iter)
1150 {
1151         struct bch_fs *c = trans->c;
1152         struct bch_dev *ca;
1153         struct bkey_s_c k;
1154         struct printbuf buf = PRINTBUF;
1155         int ret;
1156
1157         ca = bch_dev_bkey_exists(c, start.inode);
1158         if (!ca->mi.freespace_initialized)
1159                 return 0;
1160
1161         bch2_btree_iter_set_pos(freespace_iter, start);
1162
1163         k = bch2_btree_iter_peek_slot(freespace_iter);
1164         ret = bkey_err(k);
1165         if (ret)
1166                 goto err;
1167
1168         *end = bkey_min(k.k->p, *end);
1169
1170         if (k.k->type != KEY_TYPE_set &&
1171             (c->opts.reconstruct_alloc ||
1172              fsck_err(c, freespace_hole_missing,
1173                       "hole in alloc btree missing in freespace btree\n"
1174                       "  device %llu buckets %llu-%llu",
1175                       freespace_iter->pos.inode,
1176                       freespace_iter->pos.offset,
1177                       end->offset))) {
1178                 struct bkey_i *update =
1179                         bch2_trans_kmalloc(trans, sizeof(*update));
1180
1181                 ret = PTR_ERR_OR_ZERO(update);
1182                 if (ret)
1183                         goto err;
1184
1185                 bkey_init(&update->k);
1186                 update->k.type  = KEY_TYPE_set;
1187                 update->k.p     = freespace_iter->pos;
1188                 bch2_key_resize(&update->k,
1189                                 min_t(u64, U32_MAX, end->offset -
1190                                       freespace_iter->pos.offset));
1191
1192                 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1193                 if (ret)
1194                         goto err;
1195         }
1196 err:
1197 fsck_err:
1198         printbuf_exit(&buf);
1199         return ret;
1200 }
1201
1202 static noinline_for_stack
1203 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1204                                       struct bpos start,
1205                                       struct bpos *end,
1206                                       struct btree_iter *bucket_gens_iter)
1207 {
1208         struct bch_fs *c = trans->c;
1209         struct bkey_s_c k;
1210         struct printbuf buf = PRINTBUF;
1211         unsigned i, gens_offset, gens_end_offset;
1212         int ret;
1213
1214         bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1215
1216         k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1217         ret = bkey_err(k);
1218         if (ret)
1219                 goto err;
1220
1221         if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1222                      alloc_gens_pos(*end,  &gens_end_offset)))
1223                 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1224
1225         if (k.k->type == KEY_TYPE_bucket_gens) {
1226                 struct bkey_i_bucket_gens g;
1227                 bool need_update = false;
1228
1229                 bkey_reassemble(&g.k_i, k);
1230
1231                 for (i = gens_offset; i < gens_end_offset; i++) {
1232                         if (fsck_err_on(g.v.gens[i], c,
1233                                         bucket_gens_hole_wrong,
1234                                         "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1235                                         bucket_gens_pos_to_alloc(k.k->p, i).inode,
1236                                         bucket_gens_pos_to_alloc(k.k->p, i).offset,
1237                                         g.v.gens[i])) {
1238                                 g.v.gens[i] = 0;
1239                                 need_update = true;
1240                         }
1241                 }
1242
1243                 if (need_update) {
1244                         struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1245
1246                         ret = PTR_ERR_OR_ZERO(u);
1247                         if (ret)
1248                                 goto err;
1249
1250                         memcpy(u, &g, sizeof(g));
1251
1252                         ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
1253                         if (ret)
1254                                 goto err;
1255                 }
1256         }
1257
1258         *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1259 err:
1260 fsck_err:
1261         printbuf_exit(&buf);
1262         return ret;
1263 }
1264
1265 static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
1266                                               struct btree_iter *iter)
1267 {
1268         struct bch_fs *c = trans->c;
1269         struct btree_iter alloc_iter;
1270         struct bkey_s_c alloc_k;
1271         struct bch_alloc_v4 a_convert;
1272         const struct bch_alloc_v4 *a;
1273         u64 genbits;
1274         struct bpos pos;
1275         enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1276                 ? BCH_DATA_need_discard
1277                 : BCH_DATA_free;
1278         struct printbuf buf = PRINTBUF;
1279         int ret;
1280
1281         pos = iter->pos;
1282         pos.offset &= ~(~0ULL << 56);
1283         genbits = iter->pos.offset & (~0ULL << 56);
1284
1285         alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1286         ret = bkey_err(alloc_k);
1287         if (ret)
1288                 return ret;
1289
1290         if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1291                         need_discard_freespace_key_to_invalid_dev_bucket,
1292                         "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1293                         bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
1294                 goto delete;
1295
1296         a = bch2_alloc_to_v4(alloc_k, &a_convert);
1297
1298         if (fsck_err_on(a->data_type != state ||
1299                         (state == BCH_DATA_free &&
1300                          genbits != alloc_freespace_genbits(*a)), c,
1301                         need_discard_freespace_key_bad,
1302                         "%s\n  incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1303                         (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1304                         bch2_btree_id_str(iter->btree_id),
1305                         iter->pos.inode,
1306                         iter->pos.offset,
1307                         a->data_type == state,
1308                         genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1309                 goto delete;
1310 out:
1311 fsck_err:
1312         set_btree_iter_dontneed(&alloc_iter);
1313         bch2_trans_iter_exit(trans, &alloc_iter);
1314         printbuf_exit(&buf);
1315         return ret;
1316 delete:
1317         ret =   bch2_btree_delete_extent_at(trans, iter,
1318                         iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
1319                 bch2_trans_commit(trans, NULL, NULL,
1320                         BCH_TRANS_COMMIT_no_enospc);
1321         goto out;
1322 }
1323
1324 /*
1325  * We've already checked that generation numbers in the bucket_gens btree are
1326  * valid for buckets that exist; this just checks for keys for nonexistent
1327  * buckets.
1328  */
1329 static noinline_for_stack
1330 int bch2_check_bucket_gens_key(struct btree_trans *trans,
1331                                struct btree_iter *iter,
1332                                struct bkey_s_c k)
1333 {
1334         struct bch_fs *c = trans->c;
1335         struct bkey_i_bucket_gens g;
1336         struct bch_dev *ca;
1337         u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1338         u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1339         u64 b;
1340         bool need_update = false, dev_exists;
1341         struct printbuf buf = PRINTBUF;
1342         int ret = 0;
1343
1344         BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1345         bkey_reassemble(&g.k_i, k);
1346
1347         /* if no bch_dev, skip out whether we repair or not */
1348         dev_exists = bch2_dev_exists2(c, k.k->p.inode);
1349         if (!dev_exists) {
1350                 if (fsck_err_on(!dev_exists, c,
1351                                 bucket_gens_to_invalid_dev,
1352                                 "bucket_gens key for invalid device:\n  %s",
1353                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1354                         ret = bch2_btree_delete_at(trans, iter, 0);
1355                 }
1356                 goto out;
1357         }
1358
1359         ca = bch_dev_bkey_exists(c, k.k->p.inode);
1360         if (fsck_err_on(end <= ca->mi.first_bucket ||
1361                         start >= ca->mi.nbuckets, c,
1362                         bucket_gens_to_invalid_buckets,
1363                         "bucket_gens key for invalid buckets:\n  %s",
1364                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1365                 ret = bch2_btree_delete_at(trans, iter, 0);
1366                 goto out;
1367         }
1368
1369         for (b = start; b < ca->mi.first_bucket; b++)
1370                 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1371                                 bucket_gens_nonzero_for_invalid_buckets,
1372                                 "bucket_gens key has nonzero gen for invalid bucket")) {
1373                         g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1374                         need_update = true;
1375                 }
1376
1377         for (b = ca->mi.nbuckets; b < end; b++)
1378                 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1379                                 bucket_gens_nonzero_for_invalid_buckets,
1380                                 "bucket_gens key has nonzero gen for invalid bucket")) {
1381                         g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1382                         need_update = true;
1383                 }
1384
1385         if (need_update) {
1386                 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1387
1388                 ret = PTR_ERR_OR_ZERO(u);
1389                 if (ret)
1390                         goto out;
1391
1392                 memcpy(u, &g, sizeof(g));
1393                 ret = bch2_trans_update(trans, iter, u, 0);
1394         }
1395 out:
1396 fsck_err:
1397         printbuf_exit(&buf);
1398         return ret;
1399 }
1400
1401 int bch2_check_alloc_info(struct bch_fs *c)
1402 {
1403         struct btree_trans *trans = bch2_trans_get(c);
1404         struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1405         struct bkey hole;
1406         struct bkey_s_c k;
1407         int ret = 0;
1408
1409         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
1410                              BTREE_ITER_PREFETCH);
1411         bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1412                              BTREE_ITER_PREFETCH);
1413         bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1414                              BTREE_ITER_PREFETCH);
1415         bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1416                              BTREE_ITER_PREFETCH);
1417
1418         while (1) {
1419                 struct bpos next;
1420
1421                 bch2_trans_begin(trans);
1422
1423                 k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
1424                 ret = bkey_err(k);
1425                 if (ret)
1426                         goto bkey_err;
1427
1428                 if (!k.k)
1429                         break;
1430
1431                 if (k.k->type) {
1432                         next = bpos_nosnap_successor(k.k->p);
1433
1434                         ret = bch2_check_alloc_key(trans,
1435                                                    k, &iter,
1436                                                    &discard_iter,
1437                                                    &freespace_iter,
1438                                                    &bucket_gens_iter);
1439                         if (ret)
1440                                 goto bkey_err;
1441                 } else {
1442                         next = k.k->p;
1443
1444                         ret = bch2_check_alloc_hole_freespace(trans,
1445                                                     bkey_start_pos(k.k),
1446                                                     &next,
1447                                                     &freespace_iter) ?:
1448                                 bch2_check_alloc_hole_bucket_gens(trans,
1449                                                     bkey_start_pos(k.k),
1450                                                     &next,
1451                                                     &bucket_gens_iter);
1452                         if (ret)
1453                                 goto bkey_err;
1454                 }
1455
1456                 ret = bch2_trans_commit(trans, NULL, NULL,
1457                                         BCH_TRANS_COMMIT_no_enospc);
1458                 if (ret)
1459                         goto bkey_err;
1460
1461                 bch2_btree_iter_set_pos(&iter, next);
1462 bkey_err:
1463                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1464                         continue;
1465                 if (ret)
1466                         break;
1467         }
1468         bch2_trans_iter_exit(trans, &bucket_gens_iter);
1469         bch2_trans_iter_exit(trans, &freespace_iter);
1470         bch2_trans_iter_exit(trans, &discard_iter);
1471         bch2_trans_iter_exit(trans, &iter);
1472
1473         if (ret < 0)
1474                 goto err;
1475
1476         ret = for_each_btree_key(trans, iter,
1477                         BTREE_ID_need_discard, POS_MIN,
1478                         BTREE_ITER_PREFETCH, k,
1479                 bch2_check_discard_freespace_key(trans, &iter));
1480         if (ret)
1481                 goto err;
1482
1483         bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
1484                              BTREE_ITER_PREFETCH);
1485         while (1) {
1486                 bch2_trans_begin(trans);
1487                 k = bch2_btree_iter_peek(&iter);
1488                 if (!k.k)
1489                         break;
1490
1491                 ret = bkey_err(k) ?:
1492                         bch2_check_discard_freespace_key(trans, &iter);
1493                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1494                         ret = 0;
1495                         continue;
1496                 }
1497                 if (ret) {
1498                         struct printbuf buf = PRINTBUF;
1499                         bch2_bkey_val_to_text(&buf, c, k);
1500
1501                         bch_err(c, "while checking %s", buf.buf);
1502                         printbuf_exit(&buf);
1503                         break;
1504                 }
1505
1506                 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
1507         }
1508         bch2_trans_iter_exit(trans, &iter);
1509         if (ret)
1510                 goto err;
1511
1512         ret = for_each_btree_key_commit(trans, iter,
1513                         BTREE_ID_bucket_gens, POS_MIN,
1514                         BTREE_ITER_PREFETCH, k,
1515                         NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1516                 bch2_check_bucket_gens_key(trans, &iter, k));
1517 err:
1518         bch2_trans_put(trans);
1519         bch_err_fn(c, ret);
1520         return ret;
1521 }
1522
1523 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1524                                        struct btree_iter *alloc_iter)
1525 {
1526         struct bch_fs *c = trans->c;
1527         struct btree_iter lru_iter;
1528         struct bch_alloc_v4 a_convert;
1529         const struct bch_alloc_v4 *a;
1530         struct bkey_s_c alloc_k, lru_k;
1531         struct printbuf buf = PRINTBUF;
1532         int ret;
1533
1534         alloc_k = bch2_btree_iter_peek(alloc_iter);
1535         if (!alloc_k.k)
1536                 return 0;
1537
1538         ret = bkey_err(alloc_k);
1539         if (ret)
1540                 return ret;
1541
1542         a = bch2_alloc_to_v4(alloc_k, &a_convert);
1543
1544         if (a->data_type != BCH_DATA_cached)
1545                 return 0;
1546
1547         if (fsck_err_on(!a->io_time[READ], c,
1548                         alloc_key_cached_but_read_time_zero,
1549                         "cached bucket with read_time 0\n"
1550                         "  %s",
1551                 (printbuf_reset(&buf),
1552                  bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1553                 struct bkey_i_alloc_v4 *a_mut =
1554                         bch2_alloc_to_v4_mut(trans, alloc_k);
1555                 ret = PTR_ERR_OR_ZERO(a_mut);
1556                 if (ret)
1557                         goto err;
1558
1559                 a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1560                 ret = bch2_trans_update(trans, alloc_iter,
1561                                         &a_mut->k_i, BTREE_TRIGGER_NORUN);
1562                 if (ret)
1563                         goto err;
1564
1565                 a = &a_mut->v;
1566         }
1567
1568         lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
1569                              lru_pos(alloc_k.k->p.inode,
1570                                      bucket_to_u64(alloc_k.k->p),
1571                                      a->io_time[READ]), 0);
1572         ret = bkey_err(lru_k);
1573         if (ret)
1574                 return ret;
1575
1576         if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
1577                         alloc_key_to_missing_lru_entry,
1578                         "missing lru entry\n"
1579                         "  %s",
1580                         (printbuf_reset(&buf),
1581                          bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1582                 ret = bch2_lru_set(trans,
1583                                    alloc_k.k->p.inode,
1584                                    bucket_to_u64(alloc_k.k->p),
1585                                    a->io_time[READ]);
1586                 if (ret)
1587                         goto err;
1588         }
1589 err:
1590 fsck_err:
1591         bch2_trans_iter_exit(trans, &lru_iter);
1592         printbuf_exit(&buf);
1593         return ret;
1594 }
1595
1596 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1597 {
1598         int ret = bch2_trans_run(c,
1599                 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1600                                 POS_MIN, BTREE_ITER_PREFETCH, k,
1601                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1602                         bch2_check_alloc_to_lru_ref(trans, &iter)));
1603         bch_err_fn(c, ret);
1604         return ret;
1605 }
1606
1607 struct discard_buckets_state {
1608         u64             seen;
1609         u64             open;
1610         u64             need_journal_commit;
1611         u64             discarded;
1612         struct bch_dev  *ca;
1613         u64             need_journal_commit_this_dev;
1614 };
1615
1616 static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
1617 {
1618         if (s->ca == ca)
1619                 return;
1620
1621         if (s->ca && s->need_journal_commit_this_dev >
1622             bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
1623                 bch2_journal_flush_async(&c->journal, NULL);
1624
1625         if (s->ca)
1626                 percpu_ref_put(&s->ca->ref);
1627         if (ca)
1628                 percpu_ref_get(&ca->ref);
1629         s->ca = ca;
1630         s->need_journal_commit_this_dev = 0;
1631 }
1632
1633 static int bch2_discard_one_bucket(struct btree_trans *trans,
1634                                    struct btree_iter *need_discard_iter,
1635                                    struct bpos *discard_pos_done,
1636                                    struct discard_buckets_state *s)
1637 {
1638         struct bch_fs *c = trans->c;
1639         struct bpos pos = need_discard_iter->pos;
1640         struct btree_iter iter = { NULL };
1641         struct bkey_s_c k;
1642         struct bch_dev *ca;
1643         struct bkey_i_alloc_v4 *a;
1644         struct printbuf buf = PRINTBUF;
1645         int ret = 0;
1646
1647         ca = bch_dev_bkey_exists(c, pos.inode);
1648
1649         if (!percpu_ref_tryget(&ca->io_ref)) {
1650                 bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1651                 return 0;
1652         }
1653
1654         discard_buckets_next_dev(c, s, ca);
1655
1656         if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1657                 s->open++;
1658                 goto out;
1659         }
1660
1661         if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1662                         c->journal.flushed_seq_ondisk,
1663                         pos.inode, pos.offset)) {
1664                 s->need_journal_commit++;
1665                 s->need_journal_commit_this_dev++;
1666                 goto out;
1667         }
1668
1669         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1670                                need_discard_iter->pos,
1671                                BTREE_ITER_CACHED);
1672         ret = bkey_err(k);
1673         if (ret)
1674                 goto out;
1675
1676         a = bch2_alloc_to_v4_mut(trans, k);
1677         ret = PTR_ERR_OR_ZERO(a);
1678         if (ret)
1679                 goto out;
1680
1681         if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1682                 a->v.gen++;
1683                 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1684                 goto write;
1685         }
1686
1687         if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1688                 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1689                         bch2_trans_inconsistent(trans,
1690                                 "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
1691                                 "%s",
1692                                 a->v.journal_seq,
1693                                 c->journal.flushed_seq_ondisk,
1694                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1695                         ret = -EIO;
1696                 }
1697                 goto out;
1698         }
1699
1700         if (a->v.data_type != BCH_DATA_need_discard) {
1701                 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1702                         bch2_trans_inconsistent(trans,
1703                                 "bucket incorrectly set in need_discard btree\n"
1704                                 "%s",
1705                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1706                         ret = -EIO;
1707                 }
1708
1709                 goto out;
1710         }
1711
1712         if (!bkey_eq(*discard_pos_done, iter.pos) &&
1713             ca->mi.discard && !c->opts.nochanges) {
1714                 /*
1715                  * This works without any other locks because this is the only
1716                  * thread that removes items from the need_discard tree
1717                  */
1718                 bch2_trans_unlock_long(trans);
1719                 blkdev_issue_discard(ca->disk_sb.bdev,
1720                                      k.k->p.offset * ca->mi.bucket_size,
1721                                      ca->mi.bucket_size,
1722                                      GFP_KERNEL);
1723                 *discard_pos_done = iter.pos;
1724
1725                 ret = bch2_trans_relock_notrace(trans);
1726                 if (ret)
1727                         goto out;
1728         }
1729
1730         SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1731         a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1732 write:
1733         ret =   bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1734                 bch2_trans_commit(trans, NULL, NULL,
1735                                   BCH_WATERMARK_btree|
1736                                   BCH_TRANS_COMMIT_no_enospc);
1737         if (ret)
1738                 goto out;
1739
1740         count_event(c, bucket_discard);
1741         s->discarded++;
1742 out:
1743         s->seen++;
1744         bch2_trans_iter_exit(trans, &iter);
1745         percpu_ref_put(&ca->io_ref);
1746         printbuf_exit(&buf);
1747         return ret;
1748 }
1749
1750 static void bch2_do_discards_work(struct work_struct *work)
1751 {
1752         struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1753         struct discard_buckets_state s = {};
1754         struct bpos discard_pos_done = POS_MAX;
1755         int ret;
1756
1757         /*
1758          * We're doing the commit in bch2_discard_one_bucket instead of using
1759          * for_each_btree_key_commit() so that we can increment counters after
1760          * successful commit:
1761          */
1762         ret = bch2_trans_run(c,
1763                 for_each_btree_key(trans, iter,
1764                                    BTREE_ID_need_discard, POS_MIN, 0, k,
1765                         bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
1766
1767         discard_buckets_next_dev(c, &s, NULL);
1768
1769         trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
1770                               bch2_err_str(ret));
1771
1772         bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1773 }
1774
1775 void bch2_do_discards(struct bch_fs *c)
1776 {
1777         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
1778             !queue_work(c->write_ref_wq, &c->discard_work))
1779                 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1780 }
1781
1782 static int invalidate_one_bucket(struct btree_trans *trans,
1783                                  struct btree_iter *lru_iter,
1784                                  struct bkey_s_c lru_k,
1785                                  s64 *nr_to_invalidate)
1786 {
1787         struct bch_fs *c = trans->c;
1788         struct btree_iter alloc_iter = { NULL };
1789         struct bkey_i_alloc_v4 *a = NULL;
1790         struct printbuf buf = PRINTBUF;
1791         struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
1792         unsigned cached_sectors;
1793         int ret = 0;
1794
1795         if (*nr_to_invalidate <= 0)
1796                 return 1;
1797
1798         if (!bch2_dev_bucket_exists(c, bucket)) {
1799                 prt_str(&buf, "lru entry points to invalid bucket");
1800                 goto err;
1801         }
1802
1803         if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
1804                 return 0;
1805
1806         a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
1807         ret = PTR_ERR_OR_ZERO(a);
1808         if (ret)
1809                 goto out;
1810
1811         /* We expect harmless races here due to the btree write buffer: */
1812         if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
1813                 goto out;
1814
1815         BUG_ON(a->v.data_type != BCH_DATA_cached);
1816
1817         if (!a->v.cached_sectors)
1818                 bch_err(c, "invalidating empty bucket, confused");
1819
1820         cached_sectors = a->v.cached_sectors;
1821
1822         SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1823         a->v.gen++;
1824         a->v.data_type          = 0;
1825         a->v.dirty_sectors      = 0;
1826         a->v.cached_sectors     = 0;
1827         a->v.io_time[READ]      = atomic64_read(&c->io_clock[READ].now);
1828         a->v.io_time[WRITE]     = atomic64_read(&c->io_clock[WRITE].now);
1829
1830         ret =   bch2_trans_update(trans, &alloc_iter, &a->k_i,
1831                                 BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
1832                 bch2_trans_commit(trans, NULL, NULL,
1833                                   BCH_WATERMARK_btree|
1834                                   BCH_TRANS_COMMIT_no_enospc);
1835         if (ret)
1836                 goto out;
1837
1838         trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1839         --*nr_to_invalidate;
1840 out:
1841         bch2_trans_iter_exit(trans, &alloc_iter);
1842         printbuf_exit(&buf);
1843         return ret;
1844 err:
1845         prt_str(&buf, "\n  lru key: ");
1846         bch2_bkey_val_to_text(&buf, c, lru_k);
1847
1848         prt_str(&buf, "\n  lru entry: ");
1849         bch2_lru_pos_to_text(&buf, lru_iter->pos);
1850
1851         prt_str(&buf, "\n  alloc key: ");
1852         if (!a)
1853                 bch2_bpos_to_text(&buf, bucket);
1854         else
1855                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1856
1857         bch_err(c, "%s", buf.buf);
1858         if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
1859                 bch2_inconsistent_error(c);
1860                 ret = -EINVAL;
1861         }
1862
1863         goto out;
1864 }
1865
1866 static void bch2_do_invalidates_work(struct work_struct *work)
1867 {
1868         struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1869         struct btree_trans *trans = bch2_trans_get(c);
1870         int ret = 0;
1871
1872         ret = bch2_btree_write_buffer_tryflush(trans);
1873         if (ret)
1874                 goto err;
1875
1876         for_each_member_device(c, ca) {
1877                 s64 nr_to_invalidate =
1878                         should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
1879
1880                 ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
1881                                 lru_pos(ca->dev_idx, 0, 0),
1882                                 lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
1883                                 BTREE_ITER_INTENT, k,
1884                         invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
1885
1886                 if (ret < 0) {
1887                         percpu_ref_put(&ca->ref);
1888                         break;
1889                 }
1890         }
1891 err:
1892         bch2_trans_put(trans);
1893         bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1894 }
1895
1896 void bch2_do_invalidates(struct bch_fs *c)
1897 {
1898         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
1899             !queue_work(c->write_ref_wq, &c->invalidate_work))
1900                 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1901 }
1902
1903 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
1904                             u64 bucket_start, u64 bucket_end)
1905 {
1906         struct btree_trans *trans = bch2_trans_get(c);
1907         struct btree_iter iter;
1908         struct bkey_s_c k;
1909         struct bkey hole;
1910         struct bpos end = POS(ca->dev_idx, bucket_end);
1911         struct bch_member *m;
1912         unsigned long last_updated = jiffies;
1913         int ret;
1914
1915         BUG_ON(bucket_start > bucket_end);
1916         BUG_ON(bucket_end > ca->mi.nbuckets);
1917
1918         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
1919                 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
1920                 BTREE_ITER_PREFETCH);
1921         /*
1922          * Scan the alloc btree for every bucket on @ca, and add buckets to the
1923          * freespace/need_discard/need_gc_gens btrees as needed:
1924          */
1925         while (1) {
1926                 if (last_updated + HZ * 10 < jiffies) {
1927                         bch_info(ca, "%s: currently at %llu/%llu",
1928                                  __func__, iter.pos.offset, ca->mi.nbuckets);
1929                         last_updated = jiffies;
1930                 }
1931
1932                 bch2_trans_begin(trans);
1933
1934                 if (bkey_ge(iter.pos, end)) {
1935                         ret = 0;
1936                         break;
1937                 }
1938
1939                 k = bch2_get_key_or_hole(&iter, end, &hole);
1940                 ret = bkey_err(k);
1941                 if (ret)
1942                         goto bkey_err;
1943
1944                 if (k.k->type) {
1945                         /*
1946                          * We process live keys in the alloc btree one at a
1947                          * time:
1948                          */
1949                         struct bch_alloc_v4 a_convert;
1950                         const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1951
1952                         ret =   bch2_bucket_do_index(trans, k, a, true) ?:
1953                                 bch2_trans_commit(trans, NULL, NULL,
1954                                                   BCH_TRANS_COMMIT_no_enospc);
1955                         if (ret)
1956                                 goto bkey_err;
1957
1958                         bch2_btree_iter_advance(&iter);
1959                 } else {
1960                         struct bkey_i *freespace;
1961
1962                         freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
1963                         ret = PTR_ERR_OR_ZERO(freespace);
1964                         if (ret)
1965                                 goto bkey_err;
1966
1967                         bkey_init(&freespace->k);
1968                         freespace->k.type       = KEY_TYPE_set;
1969                         freespace->k.p          = k.k->p;
1970                         freespace->k.size       = k.k->size;
1971
1972                         ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
1973                                 bch2_trans_commit(trans, NULL, NULL,
1974                                                   BCH_TRANS_COMMIT_no_enospc);
1975                         if (ret)
1976                                 goto bkey_err;
1977
1978                         bch2_btree_iter_set_pos(&iter, k.k->p);
1979                 }
1980 bkey_err:
1981                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1982                         continue;
1983                 if (ret)
1984                         break;
1985         }
1986
1987         bch2_trans_iter_exit(trans, &iter);
1988         bch2_trans_put(trans);
1989
1990         if (ret < 0) {
1991                 bch_err_msg(ca, ret, "initializing free space");
1992                 return ret;
1993         }
1994
1995         mutex_lock(&c->sb_lock);
1996         m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
1997         SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
1998         mutex_unlock(&c->sb_lock);
1999
2000         return 0;
2001 }
2002
2003 int bch2_fs_freespace_init(struct bch_fs *c)
2004 {
2005         int ret = 0;
2006         bool doing_init = false;
2007
2008         /*
2009          * We can crash during the device add path, so we need to check this on
2010          * every mount:
2011          */
2012
2013         for_each_member_device(c, ca) {
2014                 if (ca->mi.freespace_initialized)
2015                         continue;
2016
2017                 if (!doing_init) {
2018                         bch_info(c, "initializing freespace");
2019                         doing_init = true;
2020                 }
2021
2022                 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2023                 if (ret) {
2024                         percpu_ref_put(&ca->ref);
2025                         bch_err_fn(c, ret);
2026                         return ret;
2027                 }
2028         }
2029
2030         if (doing_init) {
2031                 mutex_lock(&c->sb_lock);
2032                 bch2_write_super(c);
2033                 mutex_unlock(&c->sb_lock);
2034                 bch_verbose(c, "done initializing freespace");
2035         }
2036
2037         return 0;
2038 }
2039
2040 /* Bucket IO clocks: */
2041
2042 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2043                               size_t bucket_nr, int rw)
2044 {
2045         struct bch_fs *c = trans->c;
2046         struct btree_iter iter;
2047         struct bkey_i_alloc_v4 *a;
2048         u64 now;
2049         int ret = 0;
2050
2051         a = bch2_trans_start_alloc_update(trans, &iter,  POS(dev, bucket_nr));
2052         ret = PTR_ERR_OR_ZERO(a);
2053         if (ret)
2054                 return ret;
2055
2056         now = atomic64_read(&c->io_clock[rw].now);
2057         if (a->v.io_time[rw] == now)
2058                 goto out;
2059
2060         a->v.io_time[rw] = now;
2061
2062         ret   = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2063                 bch2_trans_commit(trans, NULL, NULL, 0);
2064 out:
2065         bch2_trans_iter_exit(trans, &iter);
2066         return ret;
2067 }
2068
2069 /* Startup/shutdown (ro/rw): */
2070
2071 void bch2_recalc_capacity(struct bch_fs *c)
2072 {
2073         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2074         unsigned bucket_size_max = 0;
2075         unsigned long ra_pages = 0;
2076
2077         lockdep_assert_held(&c->state_lock);
2078
2079         for_each_online_member(c, ca) {
2080                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2081
2082                 ra_pages += bdi->ra_pages;
2083         }
2084
2085         bch2_set_ra_pages(c, ra_pages);
2086
2087         for_each_rw_member(c, ca) {
2088                 u64 dev_reserve = 0;
2089
2090                 /*
2091                  * We need to reserve buckets (from the number
2092                  * of currently available buckets) against
2093                  * foreground writes so that mainly copygc can
2094                  * make forward progress.
2095                  *
2096                  * We need enough to refill the various reserves
2097                  * from scratch - copygc will use its entire
2098                  * reserve all at once, then run against when
2099                  * its reserve is refilled (from the formerly
2100                  * available buckets).
2101                  *
2102                  * This reserve is just used when considering if
2103                  * allocations for foreground writes must wait -
2104                  * not -ENOSPC calculations.
2105                  */
2106
2107                 dev_reserve += ca->nr_btree_reserve * 2;
2108                 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2109
2110                 dev_reserve += 1;       /* btree write point */
2111                 dev_reserve += 1;       /* copygc write point */
2112                 dev_reserve += 1;       /* rebalance write point */
2113
2114                 dev_reserve *= ca->mi.bucket_size;
2115
2116                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2117                                              ca->mi.first_bucket);
2118
2119                 reserved_sectors += dev_reserve * 2;
2120
2121                 bucket_size_max = max_t(unsigned, bucket_size_max,
2122                                         ca->mi.bucket_size);
2123         }
2124
2125         gc_reserve = c->opts.gc_reserve_bytes
2126                 ? c->opts.gc_reserve_bytes >> 9
2127                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2128
2129         reserved_sectors = max(gc_reserve, reserved_sectors);
2130
2131         reserved_sectors = min(reserved_sectors, capacity);
2132
2133         c->capacity = capacity - reserved_sectors;
2134
2135         c->bucket_size_max = bucket_size_max;
2136
2137         /* Wake up case someone was waiting for buckets */
2138         closure_wake_up(&c->freelist_wait);
2139 }
2140
2141 u64 bch2_min_rw_member_capacity(struct bch_fs *c)
2142 {
2143         u64 ret = U64_MAX;
2144
2145         for_each_rw_member(c, ca)
2146                 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2147         return ret;
2148 }
2149
2150 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2151 {
2152         struct open_bucket *ob;
2153         bool ret = false;
2154
2155         for (ob = c->open_buckets;
2156              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2157              ob++) {
2158                 spin_lock(&ob->lock);
2159                 if (ob->valid && !ob->on_partial_list &&
2160                     ob->dev == ca->dev_idx)
2161                         ret = true;
2162                 spin_unlock(&ob->lock);
2163         }
2164
2165         return ret;
2166 }
2167
2168 /* device goes ro: */
2169 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2170 {
2171         unsigned i;
2172
2173         /* First, remove device from allocation groups: */
2174
2175         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2176                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2177
2178         /*
2179          * Capacity is calculated based off of devices in allocation groups:
2180          */
2181         bch2_recalc_capacity(c);
2182
2183         bch2_open_buckets_stop(c, ca, false);
2184
2185         /*
2186          * Wake up threads that were blocked on allocation, so they can notice
2187          * the device can no longer be removed and the capacity has changed:
2188          */
2189         closure_wake_up(&c->freelist_wait);
2190
2191         /*
2192          * journal_res_get() can block waiting for free space in the journal -
2193          * it needs to notice there may not be devices to allocate from anymore:
2194          */
2195         wake_up(&c->journal.wait);
2196
2197         /* Now wait for any in flight writes: */
2198
2199         closure_wait_event(&c->open_buckets_wait,
2200                            !bch2_dev_has_open_write_point(c, ca));
2201 }
2202
2203 /* device goes rw: */
2204 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2205 {
2206         unsigned i;
2207
2208         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2209                 if (ca->mi.data_allowed & (1 << i))
2210                         set_bit(ca->dev_idx, c->rw_devs[i].d);
2211 }
2212
2213 void bch2_fs_allocator_background_init(struct bch_fs *c)
2214 {
2215         spin_lock_init(&c->freelist_lock);
2216         INIT_WORK(&c->discard_work, bch2_do_discards_work);
2217         INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
2218 }