HID: lenovo: Add middleclick_workaround sysfs knob for cptkbd
[sfrench/cifs-2.6.git] / fs / bcachefs / buckets.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_background.h"
10 #include "backpointers.h"
11 #include "bset.h"
12 #include "btree_gc.h"
13 #include "btree_update.h"
14 #include "buckets.h"
15 #include "buckets_waiting_for_journal.h"
16 #include "ec.h"
17 #include "error.h"
18 #include "inode.h"
19 #include "movinggc.h"
20 #include "recovery.h"
21 #include "reflink.h"
22 #include "replicas.h"
23 #include "subvolume.h"
24 #include "trace.h"
25
26 #include <linux/preempt.h>
27
28 static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
29                                               enum bch_data_type data_type,
30                                               s64 sectors)
31 {
32         switch (data_type) {
33         case BCH_DATA_btree:
34                 fs_usage->btree         += sectors;
35                 break;
36         case BCH_DATA_user:
37         case BCH_DATA_parity:
38                 fs_usage->data          += sectors;
39                 break;
40         case BCH_DATA_cached:
41                 fs_usage->cached        += sectors;
42                 break;
43         default:
44                 break;
45         }
46 }
47
48 void bch2_fs_usage_initialize(struct bch_fs *c)
49 {
50         percpu_down_write(&c->mark_lock);
51         struct bch_fs_usage *usage = c->usage_base;
52
53         for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++)
54                 bch2_fs_usage_acc_to_base(c, i);
55
56         for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
57                 usage->reserved += usage->persistent_reserved[i];
58
59         for (unsigned i = 0; i < c->replicas.nr; i++) {
60                 struct bch_replicas_entry_v1 *e =
61                         cpu_replicas_entry(&c->replicas, i);
62
63                 fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
64         }
65
66         for_each_member_device(c, ca) {
67                 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
68
69                 usage->hidden += (dev.d[BCH_DATA_sb].buckets +
70                                   dev.d[BCH_DATA_journal].buckets) *
71                         ca->mi.bucket_size;
72         }
73
74         percpu_up_write(&c->mark_lock);
75 }
76
77 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
78                                                   unsigned journal_seq,
79                                                   bool gc)
80 {
81         BUG_ON(!gc && !journal_seq);
82
83         return this_cpu_ptr(gc
84                             ? ca->usage_gc
85                             : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
86 }
87
88 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
89 {
90         struct bch_fs *c = ca->fs;
91         unsigned seq, i, u64s = dev_usage_u64s();
92
93         do {
94                 seq = read_seqcount_begin(&c->usage_lock);
95                 memcpy(usage, ca->usage_base, u64s * sizeof(u64));
96                 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
97                         acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
98         } while (read_seqcount_retry(&c->usage_lock, seq));
99 }
100
101 u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
102 {
103         ssize_t offset = v - (u64 *) c->usage_base;
104         unsigned i, seq;
105         u64 ret;
106
107         BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
108         percpu_rwsem_assert_held(&c->mark_lock);
109
110         do {
111                 seq = read_seqcount_begin(&c->usage_lock);
112                 ret = *v;
113
114                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
115                         ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
116         } while (read_seqcount_retry(&c->usage_lock, seq));
117
118         return ret;
119 }
120
121 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
122 {
123         struct bch_fs_usage_online *ret;
124         unsigned nr_replicas = READ_ONCE(c->replicas.nr);
125         unsigned seq, i;
126 retry:
127         ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL);
128         if (unlikely(!ret))
129                 return NULL;
130
131         percpu_down_read(&c->mark_lock);
132
133         if (nr_replicas != c->replicas.nr) {
134                 nr_replicas = c->replicas.nr;
135                 percpu_up_read(&c->mark_lock);
136                 kfree(ret);
137                 goto retry;
138         }
139
140         ret->online_reserved = percpu_u64_get(c->online_reserved);
141
142         do {
143                 seq = read_seqcount_begin(&c->usage_lock);
144                 unsafe_memcpy(&ret->u, c->usage_base,
145                               __fs_usage_u64s(nr_replicas) * sizeof(u64),
146                               "embedded variable length struct");
147                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
148                         acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
149                                         __fs_usage_u64s(nr_replicas));
150         } while (read_seqcount_retry(&c->usage_lock, seq));
151
152         return ret;
153 }
154
155 void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
156 {
157         unsigned u64s = fs_usage_u64s(c);
158
159         BUG_ON(idx >= ARRAY_SIZE(c->usage));
160
161         preempt_disable();
162         write_seqcount_begin(&c->usage_lock);
163
164         acc_u64s_percpu((u64 *) c->usage_base,
165                         (u64 __percpu *) c->usage[idx], u64s);
166         percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
167
168         rcu_read_lock();
169         for_each_member_device_rcu(c, ca, NULL) {
170                 u64s = dev_usage_u64s();
171
172                 acc_u64s_percpu((u64 *) ca->usage_base,
173                                 (u64 __percpu *) ca->usage[idx], u64s);
174                 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
175         }
176         rcu_read_unlock();
177
178         write_seqcount_end(&c->usage_lock);
179         preempt_enable();
180 }
181
182 void bch2_fs_usage_to_text(struct printbuf *out,
183                            struct bch_fs *c,
184                            struct bch_fs_usage_online *fs_usage)
185 {
186         unsigned i;
187
188         prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
189
190         prt_printf(out, "hidden:\t\t\t\t%llu\n",
191                fs_usage->u.hidden);
192         prt_printf(out, "data:\t\t\t\t%llu\n",
193                fs_usage->u.data);
194         prt_printf(out, "cached:\t\t\t\t%llu\n",
195                fs_usage->u.cached);
196         prt_printf(out, "reserved:\t\t\t%llu\n",
197                fs_usage->u.reserved);
198         prt_printf(out, "nr_inodes:\t\t\t%llu\n",
199                fs_usage->u.nr_inodes);
200         prt_printf(out, "online reserved:\t\t%llu\n",
201                fs_usage->online_reserved);
202
203         for (i = 0;
204              i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
205              i++) {
206                 prt_printf(out, "%u replicas:\n", i + 1);
207                 prt_printf(out, "\treserved:\t\t%llu\n",
208                        fs_usage->u.persistent_reserved[i]);
209         }
210
211         for (i = 0; i < c->replicas.nr; i++) {
212                 struct bch_replicas_entry_v1 *e =
213                         cpu_replicas_entry(&c->replicas, i);
214
215                 prt_printf(out, "\t");
216                 bch2_replicas_entry_to_text(out, e);
217                 prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
218         }
219 }
220
221 static u64 reserve_factor(u64 r)
222 {
223         return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
224 }
225
226 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
227 {
228         return min(fs_usage->u.hidden +
229                    fs_usage->u.btree +
230                    fs_usage->u.data +
231                    reserve_factor(fs_usage->u.reserved +
232                                   fs_usage->online_reserved),
233                    c->capacity);
234 }
235
236 static struct bch_fs_usage_short
237 __bch2_fs_usage_read_short(struct bch_fs *c)
238 {
239         struct bch_fs_usage_short ret;
240         u64 data, reserved;
241
242         ret.capacity = c->capacity -
243                 bch2_fs_usage_read_one(c, &c->usage_base->hidden);
244
245         data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
246                 bch2_fs_usage_read_one(c, &c->usage_base->btree);
247         reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
248                 percpu_u64_get(c->online_reserved);
249
250         ret.used        = min(ret.capacity, data + reserve_factor(reserved));
251         ret.free        = ret.capacity - ret.used;
252
253         ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
254
255         return ret;
256 }
257
258 struct bch_fs_usage_short
259 bch2_fs_usage_read_short(struct bch_fs *c)
260 {
261         struct bch_fs_usage_short ret;
262
263         percpu_down_read(&c->mark_lock);
264         ret = __bch2_fs_usage_read_short(c);
265         percpu_up_read(&c->mark_lock);
266
267         return ret;
268 }
269
270 void bch2_dev_usage_init(struct bch_dev *ca)
271 {
272         ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
273 }
274
275 void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
276 {
277         prt_tab(out);
278         prt_str(out, "buckets");
279         prt_tab_rjust(out);
280         prt_str(out, "sectors");
281         prt_tab_rjust(out);
282         prt_str(out, "fragmented");
283         prt_tab_rjust(out);
284         prt_newline(out);
285
286         for (unsigned i = 0; i < BCH_DATA_NR; i++) {
287                 prt_str(out, bch2_data_types[i]);
288                 prt_tab(out);
289                 prt_u64(out, usage->d[i].buckets);
290                 prt_tab_rjust(out);
291                 prt_u64(out, usage->d[i].sectors);
292                 prt_tab_rjust(out);
293                 prt_u64(out, usage->d[i].fragmented);
294                 prt_tab_rjust(out);
295                 prt_newline(out);
296         }
297 }
298
299 void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
300                            const struct bch_alloc_v4 *old,
301                            const struct bch_alloc_v4 *new,
302                            u64 journal_seq, bool gc)
303 {
304         struct bch_fs_usage *fs_usage;
305         struct bch_dev_usage *u;
306
307         preempt_disable();
308         fs_usage = fs_usage_ptr(c, journal_seq, gc);
309
310         if (data_type_is_hidden(old->data_type))
311                 fs_usage->hidden -= ca->mi.bucket_size;
312         if (data_type_is_hidden(new->data_type))
313                 fs_usage->hidden += ca->mi.bucket_size;
314
315         u = dev_usage_ptr(ca, journal_seq, gc);
316
317         u->d[old->data_type].buckets--;
318         u->d[new->data_type].buckets++;
319
320         u->d[old->data_type].sectors -= bch2_bucket_sectors_dirty(*old);
321         u->d[new->data_type].sectors += bch2_bucket_sectors_dirty(*new);
322
323         u->d[BCH_DATA_cached].sectors += new->cached_sectors;
324         u->d[BCH_DATA_cached].sectors -= old->cached_sectors;
325
326         u->d[old->data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, *old);
327         u->d[new->data_type].fragmented += bch2_bucket_sectors_fragmented(ca, *new);
328
329         preempt_enable();
330 }
331
332 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
333 {
334         return (struct bch_alloc_v4) {
335                 .gen            = b.gen,
336                 .data_type      = b.data_type,
337                 .dirty_sectors  = b.dirty_sectors,
338                 .cached_sectors = b.cached_sectors,
339                 .stripe         = b.stripe,
340         };
341 }
342
343 void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
344                              struct bucket *old, struct bucket *new)
345 {
346         struct bch_alloc_v4 old_a = bucket_m_to_alloc(*old);
347         struct bch_alloc_v4 new_a = bucket_m_to_alloc(*new);
348
349         bch2_dev_usage_update(c, ca, &old_a, &new_a, 0, true);
350 }
351
352 static inline int __update_replicas(struct bch_fs *c,
353                                     struct bch_fs_usage *fs_usage,
354                                     struct bch_replicas_entry_v1 *r,
355                                     s64 sectors)
356 {
357         int idx = bch2_replicas_entry_idx(c, r);
358
359         if (idx < 0)
360                 return -1;
361
362         fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
363         fs_usage->replicas[idx]         += sectors;
364         return 0;
365 }
366
367 int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k,
368                          struct bch_replicas_entry_v1 *r, s64 sectors,
369                          unsigned journal_seq, bool gc)
370 {
371         struct bch_fs_usage *fs_usage;
372         int idx, ret = 0;
373         struct printbuf buf = PRINTBUF;
374
375         percpu_down_read(&c->mark_lock);
376
377         idx = bch2_replicas_entry_idx(c, r);
378         if (idx < 0 &&
379             fsck_err(c, ptr_to_missing_replicas_entry,
380                      "no replicas entry\n  while marking %s",
381                      (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
382                 percpu_up_read(&c->mark_lock);
383                 ret = bch2_mark_replicas(c, r);
384                 percpu_down_read(&c->mark_lock);
385
386                 if (ret)
387                         goto err;
388                 idx = bch2_replicas_entry_idx(c, r);
389         }
390         if (idx < 0) {
391                 ret = -1;
392                 goto err;
393         }
394
395         preempt_disable();
396         fs_usage = fs_usage_ptr(c, journal_seq, gc);
397         fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
398         fs_usage->replicas[idx]         += sectors;
399         preempt_enable();
400 err:
401 fsck_err:
402         percpu_up_read(&c->mark_lock);
403         printbuf_exit(&buf);
404         return ret;
405 }
406
407 static inline int update_cached_sectors(struct bch_fs *c,
408                         struct bkey_s_c k,
409                         unsigned dev, s64 sectors,
410                         unsigned journal_seq, bool gc)
411 {
412         struct bch_replicas_padded r;
413
414         bch2_replicas_entry_cached(&r.e, dev);
415
416         return bch2_update_replicas(c, k, &r.e, sectors, journal_seq, gc);
417 }
418
419 static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
420                                      gfp_t gfp)
421 {
422         struct replicas_delta_list *d = trans->fs_usage_deltas;
423         unsigned new_size = d ? (d->size + more) * 2 : 128;
424         unsigned alloc_size = sizeof(*d) + new_size;
425
426         WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
427
428         if (!d || d->used + more > d->size) {
429                 d = krealloc(d, alloc_size, gfp|__GFP_ZERO);
430
431                 if (unlikely(!d)) {
432                         if (alloc_size > REPLICAS_DELTA_LIST_MAX)
433                                 return -ENOMEM;
434
435                         d = mempool_alloc(&trans->c->replicas_delta_pool, gfp);
436                         if (!d)
437                                 return -ENOMEM;
438
439                         memset(d, 0, REPLICAS_DELTA_LIST_MAX);
440
441                         if (trans->fs_usage_deltas)
442                                 memcpy(d, trans->fs_usage_deltas,
443                                        trans->fs_usage_deltas->size + sizeof(*d));
444
445                         new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
446                         kfree(trans->fs_usage_deltas);
447                 }
448
449                 d->size = new_size;
450                 trans->fs_usage_deltas = d;
451         }
452
453         return 0;
454 }
455
456 int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
457 {
458         return allocate_dropping_locks_errcode(trans,
459                                 __replicas_deltas_realloc(trans, more, _gfp));
460 }
461
462 int bch2_update_replicas_list(struct btree_trans *trans,
463                          struct bch_replicas_entry_v1 *r,
464                          s64 sectors)
465 {
466         struct replicas_delta_list *d;
467         struct replicas_delta *n;
468         unsigned b;
469         int ret;
470
471         if (!sectors)
472                 return 0;
473
474         b = replicas_entry_bytes(r) + 8;
475         ret = bch2_replicas_deltas_realloc(trans, b);
476         if (ret)
477                 return ret;
478
479         d = trans->fs_usage_deltas;
480         n = (void *) d->d + d->used;
481         n->delta = sectors;
482         unsafe_memcpy((void *) n + offsetof(struct replicas_delta, r),
483                       r, replicas_entry_bytes(r),
484                       "flexible array member embedded in strcuct with padding");
485         bch2_replicas_entry_sort(&n->r);
486         d->used += b;
487         return 0;
488 }
489
490 int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 sectors)
491 {
492         struct bch_replicas_padded r;
493
494         bch2_replicas_entry_cached(&r.e, dev);
495
496         return bch2_update_replicas_list(trans, &r.e, sectors);
497 }
498
499 int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
500                               size_t b, enum bch_data_type data_type,
501                               unsigned sectors, struct gc_pos pos,
502                               unsigned flags)
503 {
504         struct bucket old, new, *g;
505         int ret = 0;
506
507         BUG_ON(!(flags & BTREE_TRIGGER_GC));
508         BUG_ON(data_type != BCH_DATA_sb &&
509                data_type != BCH_DATA_journal);
510
511         /*
512          * Backup superblock might be past the end of our normal usable space:
513          */
514         if (b >= ca->mi.nbuckets)
515                 return 0;
516
517         percpu_down_read(&c->mark_lock);
518         g = gc_bucket(ca, b);
519
520         bucket_lock(g);
521         old = *g;
522
523         if (bch2_fs_inconsistent_on(g->data_type &&
524                         g->data_type != data_type, c,
525                         "different types of data in same bucket: %s, %s",
526                         bch2_data_types[g->data_type],
527                         bch2_data_types[data_type])) {
528                 ret = -EIO;
529                 goto err;
530         }
531
532         if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
533                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
534                         ca->dev_idx, b, g->gen,
535                         bch2_data_types[g->data_type ?: data_type],
536                         g->dirty_sectors, sectors)) {
537                 ret = -EIO;
538                 goto err;
539         }
540
541         g->data_type = data_type;
542         g->dirty_sectors += sectors;
543         new = *g;
544 err:
545         bucket_unlock(g);
546         if (!ret)
547                 bch2_dev_usage_update_m(c, ca, &old, &new);
548         percpu_up_read(&c->mark_lock);
549         return ret;
550 }
551
552 int bch2_check_bucket_ref(struct btree_trans *trans,
553                           struct bkey_s_c k,
554                           const struct bch_extent_ptr *ptr,
555                           s64 sectors, enum bch_data_type ptr_data_type,
556                           u8 b_gen, u8 bucket_data_type,
557                           u32 bucket_sectors)
558 {
559         struct bch_fs *c = trans->c;
560         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
561         size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
562         struct printbuf buf = PRINTBUF;
563         int ret = 0;
564
565         if (bucket_data_type == BCH_DATA_cached)
566                 bucket_data_type = BCH_DATA_user;
567
568         if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
569             (bucket_data_type == BCH_DATA_user   && ptr_data_type == BCH_DATA_stripe))
570                 bucket_data_type = ptr_data_type = BCH_DATA_stripe;
571
572         if (gen_after(ptr->gen, b_gen)) {
573                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
574                               BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen,
575                         "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
576                         "while marking %s",
577                         ptr->dev, bucket_nr, b_gen,
578                         bch2_data_types[bucket_data_type ?: ptr_data_type],
579                         ptr->gen,
580                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
581                 ret = -EIO;
582                 goto err;
583         }
584
585         if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
586                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
587                               BCH_FSCK_ERR_ptr_too_stale,
588                         "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
589                         "while marking %s",
590                         ptr->dev, bucket_nr, b_gen,
591                         bch2_data_types[bucket_data_type ?: ptr_data_type],
592                         ptr->gen,
593                         (printbuf_reset(&buf),
594                          bch2_bkey_val_to_text(&buf, c, k), buf.buf));
595                 ret = -EIO;
596                 goto err;
597         }
598
599         if (b_gen != ptr->gen && !ptr->cached) {
600                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
601                               BCH_FSCK_ERR_stale_dirty_ptr,
602                         "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
603                         "while marking %s",
604                         ptr->dev, bucket_nr, b_gen,
605                         *bucket_gen(ca, bucket_nr),
606                         bch2_data_types[bucket_data_type ?: ptr_data_type],
607                         ptr->gen,
608                         (printbuf_reset(&buf),
609                          bch2_bkey_val_to_text(&buf, c, k), buf.buf));
610                 ret = -EIO;
611                 goto err;
612         }
613
614         if (b_gen != ptr->gen) {
615                 ret = 1;
616                 goto out;
617         }
618
619         if (!data_type_is_empty(bucket_data_type) &&
620             ptr_data_type &&
621             bucket_data_type != ptr_data_type) {
622                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
623                               BCH_FSCK_ERR_ptr_bucket_data_type_mismatch,
624                         "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
625                         "while marking %s",
626                         ptr->dev, bucket_nr, b_gen,
627                         bch2_data_types[bucket_data_type],
628                         bch2_data_types[ptr_data_type],
629                         (printbuf_reset(&buf),
630                          bch2_bkey_val_to_text(&buf, c, k), buf.buf));
631                 ret = -EIO;
632                 goto err;
633         }
634
635         if ((u64) bucket_sectors + sectors > U32_MAX) {
636                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
637                               BCH_FSCK_ERR_bucket_sector_count_overflow,
638                         "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
639                         "while marking %s",
640                         ptr->dev, bucket_nr, b_gen,
641                         bch2_data_types[bucket_data_type ?: ptr_data_type],
642                         bucket_sectors, sectors,
643                         (printbuf_reset(&buf),
644                          bch2_bkey_val_to_text(&buf, c, k), buf.buf));
645                 ret = -EIO;
646                 goto err;
647         }
648 out:
649         printbuf_exit(&buf);
650         return ret;
651 err:
652         bch2_dump_trans_updates(trans);
653         goto out;
654 }
655
656 void bch2_trans_fs_usage_revert(struct btree_trans *trans,
657                                 struct replicas_delta_list *deltas)
658 {
659         struct bch_fs *c = trans->c;
660         struct bch_fs_usage *dst;
661         struct replicas_delta *d, *top = (void *) deltas->d + deltas->used;
662         s64 added = 0;
663         unsigned i;
664
665         percpu_down_read(&c->mark_lock);
666         preempt_disable();
667         dst = fs_usage_ptr(c, trans->journal_res.seq, false);
668
669         /* revert changes: */
670         for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
671                 switch (d->r.data_type) {
672                 case BCH_DATA_btree:
673                 case BCH_DATA_user:
674                 case BCH_DATA_parity:
675                         added += d->delta;
676                 }
677                 BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
678         }
679
680         dst->nr_inodes -= deltas->nr_inodes;
681
682         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
683                 added                           -= deltas->persistent_reserved[i];
684                 dst->reserved                   -= deltas->persistent_reserved[i];
685                 dst->persistent_reserved[i]     -= deltas->persistent_reserved[i];
686         }
687
688         if (added > 0) {
689                 trans->disk_res->sectors += added;
690                 this_cpu_add(*c->online_reserved, added);
691         }
692
693         preempt_enable();
694         percpu_up_read(&c->mark_lock);
695 }
696
697 int bch2_trans_fs_usage_apply(struct btree_trans *trans,
698                               struct replicas_delta_list *deltas)
699 {
700         struct bch_fs *c = trans->c;
701         static int warned_disk_usage = 0;
702         bool warn = false;
703         u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
704         struct replicas_delta *d, *d2;
705         struct replicas_delta *top = (void *) deltas->d + deltas->used;
706         struct bch_fs_usage *dst;
707         s64 added = 0, should_not_have_added;
708         unsigned i;
709
710         percpu_down_read(&c->mark_lock);
711         preempt_disable();
712         dst = fs_usage_ptr(c, trans->journal_res.seq, false);
713
714         for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
715                 switch (d->r.data_type) {
716                 case BCH_DATA_btree:
717                 case BCH_DATA_user:
718                 case BCH_DATA_parity:
719                         added += d->delta;
720                 }
721
722                 if (__update_replicas(c, dst, &d->r, d->delta))
723                         goto need_mark;
724         }
725
726         dst->nr_inodes += deltas->nr_inodes;
727
728         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
729                 added                           += deltas->persistent_reserved[i];
730                 dst->reserved                   += deltas->persistent_reserved[i];
731                 dst->persistent_reserved[i]     += deltas->persistent_reserved[i];
732         }
733
734         /*
735          * Not allowed to reduce sectors_available except by getting a
736          * reservation:
737          */
738         should_not_have_added = added - (s64) disk_res_sectors;
739         if (unlikely(should_not_have_added > 0)) {
740                 u64 old, new, v = atomic64_read(&c->sectors_available);
741
742                 do {
743                         old = v;
744                         new = max_t(s64, 0, old - should_not_have_added);
745                 } while ((v = atomic64_cmpxchg(&c->sectors_available,
746                                                old, new)) != old);
747
748                 added -= should_not_have_added;
749                 warn = true;
750         }
751
752         if (added > 0) {
753                 trans->disk_res->sectors -= added;
754                 this_cpu_sub(*c->online_reserved, added);
755         }
756
757         preempt_enable();
758         percpu_up_read(&c->mark_lock);
759
760         if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
761                 bch2_trans_inconsistent(trans,
762                                         "disk usage increased %lli more than %llu sectors reserved)",
763                                         should_not_have_added, disk_res_sectors);
764         return 0;
765 need_mark:
766         /* revert changes: */
767         for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
768                 BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
769
770         preempt_enable();
771         percpu_up_read(&c->mark_lock);
772         return -1;
773 }
774
775 /* KEY_TYPE_extent: */
776
777 static int __mark_pointer(struct btree_trans *trans,
778                           struct bkey_s_c k,
779                           const struct bch_extent_ptr *ptr,
780                           s64 sectors, enum bch_data_type ptr_data_type,
781                           u8 bucket_gen, u8 *bucket_data_type,
782                           u32 *dirty_sectors, u32 *cached_sectors)
783 {
784         u32 *dst_sectors = !ptr->cached
785                 ? dirty_sectors
786                 : cached_sectors;
787         int ret = bch2_check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
788                                    bucket_gen, *bucket_data_type, *dst_sectors);
789
790         if (ret)
791                 return ret;
792
793         *dst_sectors += sectors;
794
795         if (!*dirty_sectors && !*cached_sectors)
796                 *bucket_data_type = 0;
797         else if (*bucket_data_type != BCH_DATA_stripe)
798                 *bucket_data_type = ptr_data_type;
799
800         return 0;
801 }
802
803 static int bch2_trigger_pointer(struct btree_trans *trans,
804                         enum btree_id btree_id, unsigned level,
805                         struct bkey_s_c k, struct extent_ptr_decoded p,
806                         s64 *sectors,
807                         unsigned flags)
808 {
809         bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
810         struct bpos bucket;
811         struct bch_backpointer bp;
812
813         bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
814         *sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
815
816         if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
817                 struct btree_iter iter;
818                 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, &iter, bucket);
819                 int ret = PTR_ERR_OR_ZERO(a);
820                 if (ret)
821                         return ret;
822
823                 ret = __mark_pointer(trans, k, &p.ptr, *sectors, bp.data_type,
824                                      a->v.gen, &a->v.data_type,
825                                      &a->v.dirty_sectors, &a->v.cached_sectors) ?:
826                         bch2_trans_update(trans, &iter, &a->k_i, 0);
827                 bch2_trans_iter_exit(trans, &iter);
828
829                 if (ret)
830                         return ret;
831
832                 if (!p.ptr.cached) {
833                         ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
834                         if (ret)
835                                 return ret;
836                 }
837         }
838
839         if (flags & BTREE_TRIGGER_GC) {
840                 struct bch_fs *c = trans->c;
841                 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
842                 enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
843
844                 percpu_down_read(&c->mark_lock);
845                 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
846                 bucket_lock(g);
847                 struct bucket old = *g;
848
849                 u8 bucket_data_type = g->data_type;
850                 int ret = __mark_pointer(trans, k, &p.ptr, *sectors,
851                                      data_type, g->gen,
852                                      &bucket_data_type,
853                                      &g->dirty_sectors,
854                                      &g->cached_sectors);
855                 if (ret) {
856                         bucket_unlock(g);
857                         percpu_up_read(&c->mark_lock);
858                         return ret;
859                 }
860
861                 g->data_type = bucket_data_type;
862                 struct bucket new = *g;
863                 bucket_unlock(g);
864                 bch2_dev_usage_update_m(c, ca, &old, &new);
865                 percpu_up_read(&c->mark_lock);
866         }
867
868         return 0;
869 }
870
871 static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
872                                 struct bkey_s_c k,
873                                 struct extent_ptr_decoded p,
874                                 enum bch_data_type data_type,
875                                 s64 sectors, unsigned flags)
876 {
877         if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
878                 struct btree_iter iter;
879                 struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
880                                 BTREE_ID_stripes, POS(0, p.ec.idx),
881                                 BTREE_ITER_WITH_UPDATES, stripe);
882                 int ret = PTR_ERR_OR_ZERO(s);
883                 if (unlikely(ret)) {
884                         bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
885                                 "pointer to nonexistent stripe %llu",
886                                 (u64) p.ec.idx);
887                         goto err;
888                 }
889
890                 if (!bch2_ptr_matches_stripe(&s->v, p)) {
891                         bch2_trans_inconsistent(trans,
892                                 "stripe pointer doesn't match stripe %llu",
893                                 (u64) p.ec.idx);
894                         ret = -EIO;
895                         goto err;
896                 }
897
898                 stripe_blockcount_set(&s->v, p.ec.block,
899                         stripe_blockcount_get(&s->v, p.ec.block) +
900                         sectors);
901
902                 struct bch_replicas_padded r;
903                 bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
904                 r.e.data_type = data_type;
905                 ret = bch2_update_replicas_list(trans, &r.e, sectors);
906 err:
907                 bch2_trans_iter_exit(trans, &iter);
908                 return ret;
909         }
910
911         if (flags & BTREE_TRIGGER_GC) {
912                 struct bch_fs *c = trans->c;
913
914                 BUG_ON(!(flags & BTREE_TRIGGER_GC));
915
916                 struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
917                 if (!m) {
918                         bch_err(c, "error allocating memory for gc_stripes, idx %llu",
919                                 (u64) p.ec.idx);
920                         return -BCH_ERR_ENOMEM_mark_stripe_ptr;
921                 }
922
923                 mutex_lock(&c->ec_stripes_heap_lock);
924
925                 if (!m || !m->alive) {
926                         mutex_unlock(&c->ec_stripes_heap_lock);
927                         struct printbuf buf = PRINTBUF;
928                         bch2_bkey_val_to_text(&buf, c, k);
929                         bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n  while marking %s",
930                                             (u64) p.ec.idx, buf.buf);
931                         printbuf_exit(&buf);
932                         bch2_inconsistent_error(c);
933                         return -EIO;
934                 }
935
936                 m->block_sectors[p.ec.block] += sectors;
937
938                 struct bch_replicas_padded r = m->r;
939                 mutex_unlock(&c->ec_stripes_heap_lock);
940
941                 r.e.data_type = data_type;
942                 bch2_update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
943         }
944
945         return 0;
946 }
947
948 static int __trigger_extent(struct btree_trans *trans,
949                             enum btree_id btree_id, unsigned level,
950                             struct bkey_s_c k, unsigned flags)
951 {
952         bool gc = flags & BTREE_TRIGGER_GC;
953         struct bch_fs *c = trans->c;
954         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
955         const union bch_extent_entry *entry;
956         struct extent_ptr_decoded p;
957         struct bch_replicas_padded r;
958         enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
959                 ? BCH_DATA_btree
960                 : BCH_DATA_user;
961         s64 dirty_sectors = 0;
962         int ret = 0;
963
964         r.e.data_type   = data_type;
965         r.e.nr_devs     = 0;
966         r.e.nr_required = 1;
967
968         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
969                 s64 disk_sectors;
970                 ret = bch2_trigger_pointer(trans, btree_id, level, k, p, &disk_sectors, flags);
971                 if (ret < 0)
972                         return ret;
973
974                 bool stale = ret > 0;
975
976                 if (p.ptr.cached) {
977                         if (!stale) {
978                                 ret = !gc
979                                         ? bch2_update_cached_sectors_list(trans, p.ptr.dev, disk_sectors)
980                                         : update_cached_sectors(c, k, p.ptr.dev, disk_sectors, 0, true);
981                                 bch2_fs_fatal_err_on(ret && gc, c, "%s(): no replicas entry while updating cached sectors",
982                                                      __func__);
983                                 if (ret)
984                                         return ret;
985                         }
986                 } else if (!p.has_ec) {
987                         dirty_sectors          += disk_sectors;
988                         r.e.devs[r.e.nr_devs++] = p.ptr.dev;
989                 } else {
990                         ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
991                         if (ret)
992                                 return ret;
993
994                         /*
995                          * There may be other dirty pointers in this extent, but
996                          * if so they're not required for mounting if we have an
997                          * erasure coded pointer in this extent:
998                          */
999                         r.e.nr_required = 0;
1000                 }
1001         }
1002
1003         if (r.e.nr_devs) {
1004                 ret = !gc
1005                         ? bch2_update_replicas_list(trans, &r.e, dirty_sectors)
1006                         : bch2_update_replicas(c, k, &r.e, dirty_sectors, 0, true);
1007                 if (unlikely(ret && gc)) {
1008                         struct printbuf buf = PRINTBUF;
1009
1010                         bch2_bkey_val_to_text(&buf, c, k);
1011                         bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
1012                         printbuf_exit(&buf);
1013                 }
1014                 if (ret)
1015                         return ret;
1016         }
1017
1018         return 0;
1019 }
1020
1021 int bch2_trigger_extent(struct btree_trans *trans,
1022                         enum btree_id btree_id, unsigned level,
1023                         struct bkey_s_c old, struct bkey_s new,
1024                         unsigned flags)
1025 {
1026         struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
1027         struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
1028         unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
1029         unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
1030
1031         /* if pointers aren't changing - nothing to do: */
1032         if (new_ptrs_bytes == old_ptrs_bytes &&
1033             !memcmp(new_ptrs.start,
1034                     old_ptrs.start,
1035                     new_ptrs_bytes))
1036                 return 0;
1037
1038         if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
1039                 struct bch_fs *c = trans->c;
1040                 int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) -
1041                           (int) bch2_bkey_needs_rebalance(c, old);
1042
1043                 if (mod) {
1044                         int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new.k->p, mod > 0);
1045                         if (ret)
1046                                 return ret;
1047                 }
1048         }
1049
1050         if (flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))
1051                 return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree_id, level, old, new, flags);
1052
1053         return 0;
1054 }
1055
1056 /* KEY_TYPE_reservation */
1057
1058 static int __trigger_reservation(struct btree_trans *trans,
1059                                  enum btree_id btree_id, unsigned level,
1060                                  struct bkey_s_c k, unsigned flags)
1061 {
1062         struct bch_fs *c = trans->c;
1063         unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
1064         s64 sectors = (s64) k.k->size * replicas;
1065
1066         if (flags & BTREE_TRIGGER_OVERWRITE)
1067                 sectors = -sectors;
1068
1069         if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
1070                 int ret = bch2_replicas_deltas_realloc(trans, 0);
1071                 if (ret)
1072                         return ret;
1073
1074                 struct replicas_delta_list *d = trans->fs_usage_deltas;
1075                 replicas = min(replicas, ARRAY_SIZE(d->persistent_reserved));
1076
1077                 d->persistent_reserved[replicas - 1] += sectors;
1078         }
1079
1080         if (flags & BTREE_TRIGGER_GC) {
1081                 percpu_down_read(&c->mark_lock);
1082                 preempt_disable();
1083
1084                 struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc);
1085
1086                 replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved));
1087                 fs_usage->reserved                              += sectors;
1088                 fs_usage->persistent_reserved[replicas - 1]     += sectors;
1089
1090                 preempt_enable();
1091                 percpu_up_read(&c->mark_lock);
1092         }
1093
1094         return 0;
1095 }
1096
1097 int bch2_trigger_reservation(struct btree_trans *trans,
1098                           enum btree_id btree_id, unsigned level,
1099                           struct bkey_s_c old, struct bkey_s new,
1100                           unsigned flags)
1101 {
1102         return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
1103 }
1104
1105 /* Mark superblocks: */
1106
1107 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1108                                     struct bch_dev *ca, size_t b,
1109                                     enum bch_data_type type,
1110                                     unsigned sectors)
1111 {
1112         struct bch_fs *c = trans->c;
1113         struct btree_iter iter;
1114         struct bkey_i_alloc_v4 *a;
1115         int ret = 0;
1116
1117         /*
1118          * Backup superblock might be past the end of our normal usable space:
1119          */
1120         if (b >= ca->mi.nbuckets)
1121                 return 0;
1122
1123         a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
1124         if (IS_ERR(a))
1125                 return PTR_ERR(a);
1126
1127         if (a->v.data_type && type && a->v.data_type != type) {
1128                 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
1129                               BCH_FSCK_ERR_bucket_metadata_type_mismatch,
1130                         "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
1131                         "while marking %s",
1132                         iter.pos.inode, iter.pos.offset, a->v.gen,
1133                         bch2_data_types[a->v.data_type],
1134                         bch2_data_types[type],
1135                         bch2_data_types[type]);
1136                 ret = -EIO;
1137                 goto err;
1138         }
1139
1140         if (a->v.data_type      != type ||
1141             a->v.dirty_sectors  != sectors) {
1142                 a->v.data_type          = type;
1143                 a->v.dirty_sectors      = sectors;
1144                 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1145         }
1146 err:
1147         bch2_trans_iter_exit(trans, &iter);
1148         return ret;
1149 }
1150
1151 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1152                                     struct bch_dev *ca, size_t b,
1153                                     enum bch_data_type type,
1154                                     unsigned sectors)
1155 {
1156         return commit_do(trans, NULL, NULL, 0,
1157                         __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1158 }
1159
1160 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1161                                             struct bch_dev *ca,
1162                                             u64 start, u64 end,
1163                                             enum bch_data_type type,
1164                                             u64 *bucket, unsigned *bucket_sectors)
1165 {
1166         do {
1167                 u64 b = sector_to_bucket(ca, start);
1168                 unsigned sectors =
1169                         min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1170
1171                 if (b != *bucket && *bucket_sectors) {
1172                         int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1173                                                                   type, *bucket_sectors);
1174                         if (ret)
1175                                 return ret;
1176
1177                         *bucket_sectors = 0;
1178                 }
1179
1180                 *bucket         = b;
1181                 *bucket_sectors += sectors;
1182                 start += sectors;
1183         } while (start < end);
1184
1185         return 0;
1186 }
1187
1188 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
1189                                     struct bch_dev *ca)
1190 {
1191         struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1192         u64 bucket = 0;
1193         unsigned i, bucket_sectors = 0;
1194         int ret;
1195
1196         for (i = 0; i < layout->nr_superblocks; i++) {
1197                 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1198
1199                 if (offset == BCH_SB_SECTOR) {
1200                         ret = bch2_trans_mark_metadata_sectors(trans, ca,
1201                                                 0, BCH_SB_SECTOR,
1202                                                 BCH_DATA_sb, &bucket, &bucket_sectors);
1203                         if (ret)
1204                                 return ret;
1205                 }
1206
1207                 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1208                                       offset + (1 << layout->sb_max_size_bits),
1209                                       BCH_DATA_sb, &bucket, &bucket_sectors);
1210                 if (ret)
1211                         return ret;
1212         }
1213
1214         if (bucket_sectors) {
1215                 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1216                                 bucket, BCH_DATA_sb, bucket_sectors);
1217                 if (ret)
1218                         return ret;
1219         }
1220
1221         for (i = 0; i < ca->journal.nr; i++) {
1222                 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1223                                 ca->journal.buckets[i],
1224                                 BCH_DATA_journal, ca->mi.bucket_size);
1225                 if (ret)
1226                         return ret;
1227         }
1228
1229         return 0;
1230 }
1231
1232 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
1233 {
1234         int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
1235
1236         bch_err_fn(c, ret);
1237         return ret;
1238 }
1239
1240 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1241 {
1242         for_each_online_member(c, ca) {
1243                 int ret = bch2_trans_mark_dev_sb(c, ca);
1244                 if (ret) {
1245                         percpu_ref_put(&ca->ref);
1246                         return ret;
1247                 }
1248         }
1249
1250         return 0;
1251 }
1252
1253 /* Disk reservations: */
1254
1255 #define SECTORS_CACHE   1024
1256
1257 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1258                               u64 sectors, int flags)
1259 {
1260         struct bch_fs_pcpu *pcpu;
1261         u64 old, v, get;
1262         s64 sectors_available;
1263         int ret;
1264
1265         percpu_down_read(&c->mark_lock);
1266         preempt_disable();
1267         pcpu = this_cpu_ptr(c->pcpu);
1268
1269         if (sectors <= pcpu->sectors_available)
1270                 goto out;
1271
1272         v = atomic64_read(&c->sectors_available);
1273         do {
1274                 old = v;
1275                 get = min((u64) sectors + SECTORS_CACHE, old);
1276
1277                 if (get < sectors) {
1278                         preempt_enable();
1279                         goto recalculate;
1280                 }
1281         } while ((v = atomic64_cmpxchg(&c->sectors_available,
1282                                        old, old - get)) != old);
1283
1284         pcpu->sectors_available         += get;
1285
1286 out:
1287         pcpu->sectors_available         -= sectors;
1288         this_cpu_add(*c->online_reserved, sectors);
1289         res->sectors                    += sectors;
1290
1291         preempt_enable();
1292         percpu_up_read(&c->mark_lock);
1293         return 0;
1294
1295 recalculate:
1296         mutex_lock(&c->sectors_available_lock);
1297
1298         percpu_u64_set(&c->pcpu->sectors_available, 0);
1299         sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1300
1301         if (sectors <= sectors_available ||
1302             (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1303                 atomic64_set(&c->sectors_available,
1304                              max_t(s64, 0, sectors_available - sectors));
1305                 this_cpu_add(*c->online_reserved, sectors);
1306                 res->sectors                    += sectors;
1307                 ret = 0;
1308         } else {
1309                 atomic64_set(&c->sectors_available, sectors_available);
1310                 ret = -BCH_ERR_ENOSPC_disk_reservation;
1311         }
1312
1313         mutex_unlock(&c->sectors_available_lock);
1314         percpu_up_read(&c->mark_lock);
1315
1316         return ret;
1317 }
1318
1319 /* Startup/shutdown: */
1320
1321 static void bucket_gens_free_rcu(struct rcu_head *rcu)
1322 {
1323         struct bucket_gens *buckets =
1324                 container_of(rcu, struct bucket_gens, rcu);
1325
1326         kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
1327 }
1328
1329 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1330 {
1331         struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1332         unsigned long *buckets_nouse = NULL;
1333         bool resize = ca->bucket_gens != NULL;
1334         int ret;
1335
1336         if (!(bucket_gens       = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
1337                                             GFP_KERNEL|__GFP_ZERO))) {
1338                 ret = -BCH_ERR_ENOMEM_bucket_gens;
1339                 goto err;
1340         }
1341
1342         if ((c->opts.buckets_nouse &&
1343              !(buckets_nouse    = kvpmalloc(BITS_TO_LONGS(nbuckets) *
1344                                             sizeof(unsigned long),
1345                                             GFP_KERNEL|__GFP_ZERO)))) {
1346                 ret = -BCH_ERR_ENOMEM_buckets_nouse;
1347                 goto err;
1348         }
1349
1350         bucket_gens->first_bucket = ca->mi.first_bucket;
1351         bucket_gens->nbuckets   = nbuckets;
1352
1353         if (resize) {
1354                 down_write(&c->gc_lock);
1355                 down_write(&ca->bucket_lock);
1356                 percpu_down_write(&c->mark_lock);
1357         }
1358
1359         old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1360
1361         if (resize) {
1362                 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
1363
1364                 memcpy(bucket_gens->b,
1365                        old_bucket_gens->b,
1366                        n);
1367                 if (buckets_nouse)
1368                         memcpy(buckets_nouse,
1369                                ca->buckets_nouse,
1370                                BITS_TO_LONGS(n) * sizeof(unsigned long));
1371         }
1372
1373         rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1374         bucket_gens     = old_bucket_gens;
1375
1376         swap(ca->buckets_nouse, buckets_nouse);
1377
1378         nbuckets = ca->mi.nbuckets;
1379
1380         if (resize) {
1381                 percpu_up_write(&c->mark_lock);
1382                 up_write(&ca->bucket_lock);
1383                 up_write(&c->gc_lock);
1384         }
1385
1386         ret = 0;
1387 err:
1388         kvpfree(buckets_nouse,
1389                 BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
1390         if (bucket_gens)
1391                 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1392
1393         return ret;
1394 }
1395
1396 void bch2_dev_buckets_free(struct bch_dev *ca)
1397 {
1398         unsigned i;
1399
1400         kvpfree(ca->buckets_nouse,
1401                 BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
1402         kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
1403                 sizeof(struct bucket_gens) + ca->mi.nbuckets);
1404
1405         for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
1406                 free_percpu(ca->usage[i]);
1407         kfree(ca->usage_base);
1408 }
1409
1410 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1411 {
1412         unsigned i;
1413
1414         ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
1415         if (!ca->usage_base)
1416                 return -BCH_ERR_ENOMEM_usage_init;
1417
1418         for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
1419                 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
1420                 if (!ca->usage[i])
1421                         return -BCH_ERR_ENOMEM_usage_init;
1422         }
1423
1424         return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
1425 }