block/DAC960.c: make some arrays static const, shrinks object size
[sfrench/cifs-2.6.git] / drivers / md / bcache / btree.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22  */
23
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38
39 #include <trace/events/bcache.h>
40
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90
91 #define MAX_NEED_GC             64
92 #define MAX_SAVE_PRIO           72
93 #define MAX_GC_TIMES            100
94 #define MIN_GC_NODES            100
95 #define GC_SLEEP_MS             100
96
97 #define PTR_DIRTY_BIT           (((uint64_t) 1 << 36))
98
99 #define PTR_HASH(c, k)                                                  \
100         (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
102 #define insert_lock(s, b)       ((b)->level <= (s)->lock)
103
104 /*
105  * These macros are for recursing down the btree - they handle the details of
106  * locking and looking up nodes in the cache for you. They're best treated as
107  * mere syntax when reading code that uses them.
108  *
109  * op->lock determines whether we take a read or a write lock at a given depth.
110  * If you've got a read lock and find that you need a write lock (i.e. you're
111  * going to have to split), set op->lock and return -EINTR; btree_root() will
112  * call you again and you'll have the correct lock.
113  */
114
115 /**
116  * btree - recurse down the btree on a specified key
117  * @fn:         function to call, which will be passed the child node
118  * @key:        key to recurse on
119  * @b:          parent btree node
120  * @op:         pointer to struct btree_op
121  */
122 #define btree(fn, key, b, op, ...)                                      \
123 ({                                                                      \
124         int _r, l = (b)->level - 1;                                     \
125         bool _w = l <= (op)->lock;                                      \
126         struct btree *_child = bch_btree_node_get((b)->c, op, key, l,   \
127                                                   _w, b);               \
128         if (!IS_ERR(_child)) {                                          \
129                 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);       \
130                 rw_unlock(_w, _child);                                  \
131         } else                                                          \
132                 _r = PTR_ERR(_child);                                   \
133         _r;                                                             \
134 })
135
136 /**
137  * btree_root - call a function on the root of the btree
138  * @fn:         function to call, which will be passed the child node
139  * @c:          cache set
140  * @op:         pointer to struct btree_op
141  */
142 #define btree_root(fn, c, op, ...)                                      \
143 ({                                                                      \
144         int _r = -EINTR;                                                \
145         do {                                                            \
146                 struct btree *_b = (c)->root;                           \
147                 bool _w = insert_lock(op, _b);                          \
148                 rw_lock(_w, _b, _b->level);                             \
149                 if (_b == (c)->root &&                                  \
150                     _w == insert_lock(op, _b)) {                        \
151                         _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);   \
152                 }                                                       \
153                 rw_unlock(_w, _b);                                      \
154                 bch_cannibalize_unlock(c);                              \
155                 if (_r == -EINTR)                                       \
156                         schedule();                                     \
157         } while (_r == -EINTR);                                         \
158                                                                         \
159         finish_wait(&(c)->btree_cache_wait, &(op)->wait);               \
160         _r;                                                             \
161 })
162
163 static inline struct bset *write_block(struct btree *b)
164 {
165         return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
166 }
167
168 static void bch_btree_init_next(struct btree *b)
169 {
170         /* If not a leaf node, always sort */
171         if (b->level && b->keys.nsets)
172                 bch_btree_sort(&b->keys, &b->c->sort);
173         else
174                 bch_btree_sort_lazy(&b->keys, &b->c->sort);
175
176         if (b->written < btree_blocks(b))
177                 bch_bset_init_next(&b->keys, write_block(b),
178                                    bset_magic(&b->c->sb));
179
180 }
181
182 /* Btree key manipulation */
183
184 void bkey_put(struct cache_set *c, struct bkey *k)
185 {
186         unsigned int i;
187
188         for (i = 0; i < KEY_PTRS(k); i++)
189                 if (ptr_available(c, k, i))
190                         atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
191 }
192
193 /* Btree IO */
194
195 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
196 {
197         uint64_t crc = b->key.ptr[0];
198         void *data = (void *) i + 8, *end = bset_bkey_last(i);
199
200         crc = bch_crc64_update(crc, data, end - data);
201         return crc ^ 0xffffffffffffffffULL;
202 }
203
204 void bch_btree_node_read_done(struct btree *b)
205 {
206         const char *err = "bad btree header";
207         struct bset *i = btree_bset_first(b);
208         struct btree_iter *iter;
209
210         iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
211         iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
212         iter->used = 0;
213
214 #ifdef CONFIG_BCACHE_DEBUG
215         iter->b = &b->keys;
216 #endif
217
218         if (!i->seq)
219                 goto err;
220
221         for (;
222              b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
223              i = write_block(b)) {
224                 err = "unsupported bset version";
225                 if (i->version > BCACHE_BSET_VERSION)
226                         goto err;
227
228                 err = "bad btree header";
229                 if (b->written + set_blocks(i, block_bytes(b->c)) >
230                     btree_blocks(b))
231                         goto err;
232
233                 err = "bad magic";
234                 if (i->magic != bset_magic(&b->c->sb))
235                         goto err;
236
237                 err = "bad checksum";
238                 switch (i->version) {
239                 case 0:
240                         if (i->csum != csum_set(i))
241                                 goto err;
242                         break;
243                 case BCACHE_BSET_VERSION:
244                         if (i->csum != btree_csum_set(b, i))
245                                 goto err;
246                         break;
247                 }
248
249                 err = "empty set";
250                 if (i != b->keys.set[0].data && !i->keys)
251                         goto err;
252
253                 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
254
255                 b->written += set_blocks(i, block_bytes(b->c));
256         }
257
258         err = "corrupted btree";
259         for (i = write_block(b);
260              bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
261              i = ((void *) i) + block_bytes(b->c))
262                 if (i->seq == b->keys.set[0].data->seq)
263                         goto err;
264
265         bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
266
267         i = b->keys.set[0].data;
268         err = "short btree key";
269         if (b->keys.set[0].size &&
270             bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
271                 goto err;
272
273         if (b->written < btree_blocks(b))
274                 bch_bset_init_next(&b->keys, write_block(b),
275                                    bset_magic(&b->c->sb));
276 out:
277         mempool_free(iter, &b->c->fill_iter);
278         return;
279 err:
280         set_btree_node_io_error(b);
281         bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
282                             err, PTR_BUCKET_NR(b->c, &b->key, 0),
283                             bset_block_offset(b, i), i->keys);
284         goto out;
285 }
286
287 static void btree_node_read_endio(struct bio *bio)
288 {
289         struct closure *cl = bio->bi_private;
290
291         closure_put(cl);
292 }
293
294 static void bch_btree_node_read(struct btree *b)
295 {
296         uint64_t start_time = local_clock();
297         struct closure cl;
298         struct bio *bio;
299
300         trace_bcache_btree_read(b);
301
302         closure_init_stack(&cl);
303
304         bio = bch_bbio_alloc(b->c);
305         bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
306         bio->bi_end_io  = btree_node_read_endio;
307         bio->bi_private = &cl;
308         bio->bi_opf = REQ_OP_READ | REQ_META;
309
310         bch_bio_map(bio, b->keys.set[0].data);
311
312         bch_submit_bbio(bio, b->c, &b->key, 0);
313         closure_sync(&cl);
314
315         if (bio->bi_status)
316                 set_btree_node_io_error(b);
317
318         bch_bbio_free(bio, b->c);
319
320         if (btree_node_io_error(b))
321                 goto err;
322
323         bch_btree_node_read_done(b);
324         bch_time_stats_update(&b->c->btree_read_time, start_time);
325
326         return;
327 err:
328         bch_cache_set_error(b->c, "io error reading bucket %zu",
329                             PTR_BUCKET_NR(b->c, &b->key, 0));
330 }
331
332 static void btree_complete_write(struct btree *b, struct btree_write *w)
333 {
334         if (w->prio_blocked &&
335             !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
336                 wake_up_allocators(b->c);
337
338         if (w->journal) {
339                 atomic_dec_bug(w->journal);
340                 __closure_wake_up(&b->c->journal.wait);
341         }
342
343         w->prio_blocked = 0;
344         w->journal      = NULL;
345 }
346
347 static void btree_node_write_unlock(struct closure *cl)
348 {
349         struct btree *b = container_of(cl, struct btree, io);
350
351         up(&b->io_mutex);
352 }
353
354 static void __btree_node_write_done(struct closure *cl)
355 {
356         struct btree *b = container_of(cl, struct btree, io);
357         struct btree_write *w = btree_prev_write(b);
358
359         bch_bbio_free(b->bio, b->c);
360         b->bio = NULL;
361         btree_complete_write(b, w);
362
363         if (btree_node_dirty(b))
364                 schedule_delayed_work(&b->work, 30 * HZ);
365
366         closure_return_with_destructor(cl, btree_node_write_unlock);
367 }
368
369 static void btree_node_write_done(struct closure *cl)
370 {
371         struct btree *b = container_of(cl, struct btree, io);
372
373         bio_free_pages(b->bio);
374         __btree_node_write_done(cl);
375 }
376
377 static void btree_node_write_endio(struct bio *bio)
378 {
379         struct closure *cl = bio->bi_private;
380         struct btree *b = container_of(cl, struct btree, io);
381
382         if (bio->bi_status)
383                 set_btree_node_io_error(b);
384
385         bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
386         closure_put(cl);
387 }
388
389 static void do_btree_node_write(struct btree *b)
390 {
391         struct closure *cl = &b->io;
392         struct bset *i = btree_bset_last(b);
393         BKEY_PADDED(key) k;
394
395         i->version      = BCACHE_BSET_VERSION;
396         i->csum         = btree_csum_set(b, i);
397
398         BUG_ON(b->bio);
399         b->bio = bch_bbio_alloc(b->c);
400
401         b->bio->bi_end_io       = btree_node_write_endio;
402         b->bio->bi_private      = cl;
403         b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
404         b->bio->bi_opf          = REQ_OP_WRITE | REQ_META | REQ_FUA;
405         bch_bio_map(b->bio, i);
406
407         /*
408          * If we're appending to a leaf node, we don't technically need FUA -
409          * this write just needs to be persisted before the next journal write,
410          * which will be marked FLUSH|FUA.
411          *
412          * Similarly if we're writing a new btree root - the pointer is going to
413          * be in the next journal entry.
414          *
415          * But if we're writing a new btree node (that isn't a root) or
416          * appending to a non leaf btree node, we need either FUA or a flush
417          * when we write the parent with the new pointer. FUA is cheaper than a
418          * flush, and writes appending to leaf nodes aren't blocking anything so
419          * just make all btree node writes FUA to keep things sane.
420          */
421
422         bkey_copy(&k.key, &b->key);
423         SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
424                        bset_sector_offset(&b->keys, i));
425
426         if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
427                 int j;
428                 struct bio_vec *bv;
429                 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
430
431                 bio_for_each_segment_all(bv, b->bio, j)
432                         memcpy(page_address(bv->bv_page),
433                                base + j * PAGE_SIZE, PAGE_SIZE);
434
435                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
436
437                 continue_at(cl, btree_node_write_done, NULL);
438         } else {
439                 /*
440                  * No problem for multipage bvec since the bio is
441                  * just allocated
442                  */
443                 b->bio->bi_vcnt = 0;
444                 bch_bio_map(b->bio, i);
445
446                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
447
448                 closure_sync(cl);
449                 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
450         }
451 }
452
453 void __bch_btree_node_write(struct btree *b, struct closure *parent)
454 {
455         struct bset *i = btree_bset_last(b);
456
457         lockdep_assert_held(&b->write_lock);
458
459         trace_bcache_btree_write(b);
460
461         BUG_ON(current->bio_list);
462         BUG_ON(b->written >= btree_blocks(b));
463         BUG_ON(b->written && !i->keys);
464         BUG_ON(btree_bset_first(b)->seq != i->seq);
465         bch_check_keys(&b->keys, "writing");
466
467         cancel_delayed_work(&b->work);
468
469         /* If caller isn't waiting for write, parent refcount is cache set */
470         down(&b->io_mutex);
471         closure_init(&b->io, parent ?: &b->c->cl);
472
473         clear_bit(BTREE_NODE_dirty,      &b->flags);
474         change_bit(BTREE_NODE_write_idx, &b->flags);
475
476         do_btree_node_write(b);
477
478         atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
479                         &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
480
481         b->written += set_blocks(i, block_bytes(b->c));
482 }
483
484 void bch_btree_node_write(struct btree *b, struct closure *parent)
485 {
486         unsigned int nsets = b->keys.nsets;
487
488         lockdep_assert_held(&b->lock);
489
490         __bch_btree_node_write(b, parent);
491
492         /*
493          * do verify if there was more than one set initially (i.e. we did a
494          * sort) and we sorted down to a single set:
495          */
496         if (nsets && !b->keys.nsets)
497                 bch_btree_verify(b);
498
499         bch_btree_init_next(b);
500 }
501
502 static void bch_btree_node_write_sync(struct btree *b)
503 {
504         struct closure cl;
505
506         closure_init_stack(&cl);
507
508         mutex_lock(&b->write_lock);
509         bch_btree_node_write(b, &cl);
510         mutex_unlock(&b->write_lock);
511
512         closure_sync(&cl);
513 }
514
515 static void btree_node_write_work(struct work_struct *w)
516 {
517         struct btree *b = container_of(to_delayed_work(w), struct btree, work);
518
519         mutex_lock(&b->write_lock);
520         if (btree_node_dirty(b))
521                 __bch_btree_node_write(b, NULL);
522         mutex_unlock(&b->write_lock);
523 }
524
525 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
526 {
527         struct bset *i = btree_bset_last(b);
528         struct btree_write *w = btree_current_write(b);
529
530         lockdep_assert_held(&b->write_lock);
531
532         BUG_ON(!b->written);
533         BUG_ON(!i->keys);
534
535         if (!btree_node_dirty(b))
536                 schedule_delayed_work(&b->work, 30 * HZ);
537
538         set_btree_node_dirty(b);
539
540         if (journal_ref) {
541                 if (w->journal &&
542                     journal_pin_cmp(b->c, w->journal, journal_ref)) {
543                         atomic_dec_bug(w->journal);
544                         w->journal = NULL;
545                 }
546
547                 if (!w->journal) {
548                         w->journal = journal_ref;
549                         atomic_inc(w->journal);
550                 }
551         }
552
553         /* Force write if set is too big */
554         if (set_bytes(i) > PAGE_SIZE - 48 &&
555             !current->bio_list)
556                 bch_btree_node_write(b, NULL);
557 }
558
559 /*
560  * Btree in memory cache - allocation/freeing
561  * mca -> memory cache
562  */
563
564 #define mca_reserve(c)  (((c->root && c->root->level)           \
565                           ? c->root->level : 1) * 8 + 16)
566 #define mca_can_free(c)                                         \
567         max_t(int, 0, c->btree_cache_used - mca_reserve(c))
568
569 static void mca_data_free(struct btree *b)
570 {
571         BUG_ON(b->io_mutex.count != 1);
572
573         bch_btree_keys_free(&b->keys);
574
575         b->c->btree_cache_used--;
576         list_move(&b->list, &b->c->btree_cache_freed);
577 }
578
579 static void mca_bucket_free(struct btree *b)
580 {
581         BUG_ON(btree_node_dirty(b));
582
583         b->key.ptr[0] = 0;
584         hlist_del_init_rcu(&b->hash);
585         list_move(&b->list, &b->c->btree_cache_freeable);
586 }
587
588 static unsigned int btree_order(struct bkey *k)
589 {
590         return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
591 }
592
593 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
594 {
595         if (!bch_btree_keys_alloc(&b->keys,
596                                   max_t(unsigned int,
597                                         ilog2(b->c->btree_pages),
598                                         btree_order(k)),
599                                   gfp)) {
600                 b->c->btree_cache_used++;
601                 list_move(&b->list, &b->c->btree_cache);
602         } else {
603                 list_move(&b->list, &b->c->btree_cache_freed);
604         }
605 }
606
607 static struct btree *mca_bucket_alloc(struct cache_set *c,
608                                       struct bkey *k, gfp_t gfp)
609 {
610         struct btree *b = kzalloc(sizeof(struct btree), gfp);
611
612         if (!b)
613                 return NULL;
614
615         init_rwsem(&b->lock);
616         lockdep_set_novalidate_class(&b->lock);
617         mutex_init(&b->write_lock);
618         lockdep_set_novalidate_class(&b->write_lock);
619         INIT_LIST_HEAD(&b->list);
620         INIT_DELAYED_WORK(&b->work, btree_node_write_work);
621         b->c = c;
622         sema_init(&b->io_mutex, 1);
623
624         mca_data_alloc(b, k, gfp);
625         return b;
626 }
627
628 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
629 {
630         struct closure cl;
631
632         closure_init_stack(&cl);
633         lockdep_assert_held(&b->c->bucket_lock);
634
635         if (!down_write_trylock(&b->lock))
636                 return -ENOMEM;
637
638         BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
639
640         if (b->keys.page_order < min_order)
641                 goto out_unlock;
642
643         if (!flush) {
644                 if (btree_node_dirty(b))
645                         goto out_unlock;
646
647                 if (down_trylock(&b->io_mutex))
648                         goto out_unlock;
649                 up(&b->io_mutex);
650         }
651
652         mutex_lock(&b->write_lock);
653         if (btree_node_dirty(b))
654                 __bch_btree_node_write(b, &cl);
655         mutex_unlock(&b->write_lock);
656
657         closure_sync(&cl);
658
659         /* wait for any in flight btree write */
660         down(&b->io_mutex);
661         up(&b->io_mutex);
662
663         return 0;
664 out_unlock:
665         rw_unlock(true, b);
666         return -ENOMEM;
667 }
668
669 static unsigned long bch_mca_scan(struct shrinker *shrink,
670                                   struct shrink_control *sc)
671 {
672         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
673         struct btree *b, *t;
674         unsigned long i, nr = sc->nr_to_scan;
675         unsigned long freed = 0;
676         unsigned int btree_cache_used;
677
678         if (c->shrinker_disabled)
679                 return SHRINK_STOP;
680
681         if (c->btree_cache_alloc_lock)
682                 return SHRINK_STOP;
683
684         /* Return -1 if we can't do anything right now */
685         if (sc->gfp_mask & __GFP_IO)
686                 mutex_lock(&c->bucket_lock);
687         else if (!mutex_trylock(&c->bucket_lock))
688                 return -1;
689
690         /*
691          * It's _really_ critical that we don't free too many btree nodes - we
692          * have to always leave ourselves a reserve. The reserve is how we
693          * guarantee that allocating memory for a new btree node can always
694          * succeed, so that inserting keys into the btree can always succeed and
695          * IO can always make forward progress:
696          */
697         nr /= c->btree_pages;
698         nr = min_t(unsigned long, nr, mca_can_free(c));
699
700         i = 0;
701         btree_cache_used = c->btree_cache_used;
702         list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
703                 if (nr <= 0)
704                         goto out;
705
706                 if (++i > 3 &&
707                     !mca_reap(b, 0, false)) {
708                         mca_data_free(b);
709                         rw_unlock(true, b);
710                         freed++;
711                 }
712                 nr--;
713         }
714
715         for (;  (nr--) && i < btree_cache_used; i++) {
716                 if (list_empty(&c->btree_cache))
717                         goto out;
718
719                 b = list_first_entry(&c->btree_cache, struct btree, list);
720                 list_rotate_left(&c->btree_cache);
721
722                 if (!b->accessed &&
723                     !mca_reap(b, 0, false)) {
724                         mca_bucket_free(b);
725                         mca_data_free(b);
726                         rw_unlock(true, b);
727                         freed++;
728                 } else
729                         b->accessed = 0;
730         }
731 out:
732         mutex_unlock(&c->bucket_lock);
733         return freed * c->btree_pages;
734 }
735
736 static unsigned long bch_mca_count(struct shrinker *shrink,
737                                    struct shrink_control *sc)
738 {
739         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
740
741         if (c->shrinker_disabled)
742                 return 0;
743
744         if (c->btree_cache_alloc_lock)
745                 return 0;
746
747         return mca_can_free(c) * c->btree_pages;
748 }
749
750 void bch_btree_cache_free(struct cache_set *c)
751 {
752         struct btree *b;
753         struct closure cl;
754
755         closure_init_stack(&cl);
756
757         if (c->shrink.list.next)
758                 unregister_shrinker(&c->shrink);
759
760         mutex_lock(&c->bucket_lock);
761
762 #ifdef CONFIG_BCACHE_DEBUG
763         if (c->verify_data)
764                 list_move(&c->verify_data->list, &c->btree_cache);
765
766         free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
767 #endif
768
769         list_splice(&c->btree_cache_freeable,
770                     &c->btree_cache);
771
772         while (!list_empty(&c->btree_cache)) {
773                 b = list_first_entry(&c->btree_cache, struct btree, list);
774
775                 if (btree_node_dirty(b))
776                         btree_complete_write(b, btree_current_write(b));
777                 clear_bit(BTREE_NODE_dirty, &b->flags);
778
779                 mca_data_free(b);
780         }
781
782         while (!list_empty(&c->btree_cache_freed)) {
783                 b = list_first_entry(&c->btree_cache_freed,
784                                      struct btree, list);
785                 list_del(&b->list);
786                 cancel_delayed_work_sync(&b->work);
787                 kfree(b);
788         }
789
790         mutex_unlock(&c->bucket_lock);
791 }
792
793 int bch_btree_cache_alloc(struct cache_set *c)
794 {
795         unsigned int i;
796
797         for (i = 0; i < mca_reserve(c); i++)
798                 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
799                         return -ENOMEM;
800
801         list_splice_init(&c->btree_cache,
802                          &c->btree_cache_freeable);
803
804 #ifdef CONFIG_BCACHE_DEBUG
805         mutex_init(&c->verify_lock);
806
807         c->verify_ondisk = (void *)
808                 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
809
810         c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
811
812         if (c->verify_data &&
813             c->verify_data->keys.set->data)
814                 list_del_init(&c->verify_data->list);
815         else
816                 c->verify_data = NULL;
817 #endif
818
819         c->shrink.count_objects = bch_mca_count;
820         c->shrink.scan_objects = bch_mca_scan;
821         c->shrink.seeks = 4;
822         c->shrink.batch = c->btree_pages * 2;
823
824         if (register_shrinker(&c->shrink))
825                 pr_warn("bcache: %s: could not register shrinker",
826                                 __func__);
827
828         return 0;
829 }
830
831 /* Btree in memory cache - hash table */
832
833 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
834 {
835         return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
836 }
837
838 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
839 {
840         struct btree *b;
841
842         rcu_read_lock();
843         hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
844                 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
845                         goto out;
846         b = NULL;
847 out:
848         rcu_read_unlock();
849         return b;
850 }
851
852 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
853 {
854         struct task_struct *old;
855
856         old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
857         if (old && old != current) {
858                 if (op)
859                         prepare_to_wait(&c->btree_cache_wait, &op->wait,
860                                         TASK_UNINTERRUPTIBLE);
861                 return -EINTR;
862         }
863
864         return 0;
865 }
866
867 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
868                                      struct bkey *k)
869 {
870         struct btree *b;
871
872         trace_bcache_btree_cache_cannibalize(c);
873
874         if (mca_cannibalize_lock(c, op))
875                 return ERR_PTR(-EINTR);
876
877         list_for_each_entry_reverse(b, &c->btree_cache, list)
878                 if (!mca_reap(b, btree_order(k), false))
879                         return b;
880
881         list_for_each_entry_reverse(b, &c->btree_cache, list)
882                 if (!mca_reap(b, btree_order(k), true))
883                         return b;
884
885         WARN(1, "btree cache cannibalize failed\n");
886         return ERR_PTR(-ENOMEM);
887 }
888
889 /*
890  * We can only have one thread cannibalizing other cached btree nodes at a time,
891  * or we'll deadlock. We use an open coded mutex to ensure that, which a
892  * cannibalize_bucket() will take. This means every time we unlock the root of
893  * the btree, we need to release this lock if we have it held.
894  */
895 static void bch_cannibalize_unlock(struct cache_set *c)
896 {
897         if (c->btree_cache_alloc_lock == current) {
898                 c->btree_cache_alloc_lock = NULL;
899                 wake_up(&c->btree_cache_wait);
900         }
901 }
902
903 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
904                                struct bkey *k, int level)
905 {
906         struct btree *b;
907
908         BUG_ON(current->bio_list);
909
910         lockdep_assert_held(&c->bucket_lock);
911
912         if (mca_find(c, k))
913                 return NULL;
914
915         /* btree_free() doesn't free memory; it sticks the node on the end of
916          * the list. Check if there's any freed nodes there:
917          */
918         list_for_each_entry(b, &c->btree_cache_freeable, list)
919                 if (!mca_reap(b, btree_order(k), false))
920                         goto out;
921
922         /* We never free struct btree itself, just the memory that holds the on
923          * disk node. Check the freed list before allocating a new one:
924          */
925         list_for_each_entry(b, &c->btree_cache_freed, list)
926                 if (!mca_reap(b, 0, false)) {
927                         mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
928                         if (!b->keys.set[0].data)
929                                 goto err;
930                         else
931                                 goto out;
932                 }
933
934         b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
935         if (!b)
936                 goto err;
937
938         BUG_ON(!down_write_trylock(&b->lock));
939         if (!b->keys.set->data)
940                 goto err;
941 out:
942         BUG_ON(b->io_mutex.count != 1);
943
944         bkey_copy(&b->key, k);
945         list_move(&b->list, &c->btree_cache);
946         hlist_del_init_rcu(&b->hash);
947         hlist_add_head_rcu(&b->hash, mca_hash(c, k));
948
949         lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
950         b->parent       = (void *) ~0UL;
951         b->flags        = 0;
952         b->written      = 0;
953         b->level        = level;
954
955         if (!b->level)
956                 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
957                                     &b->c->expensive_debug_checks);
958         else
959                 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
960                                     &b->c->expensive_debug_checks);
961
962         return b;
963 err:
964         if (b)
965                 rw_unlock(true, b);
966
967         b = mca_cannibalize(c, op, k);
968         if (!IS_ERR(b))
969                 goto out;
970
971         return b;
972 }
973
974 /*
975  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
976  * in from disk if necessary.
977  *
978  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
979  *
980  * The btree node will have either a read or a write lock held, depending on
981  * level and op->lock.
982  */
983 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
984                                  struct bkey *k, int level, bool write,
985                                  struct btree *parent)
986 {
987         int i = 0;
988         struct btree *b;
989
990         BUG_ON(level < 0);
991 retry:
992         b = mca_find(c, k);
993
994         if (!b) {
995                 if (current->bio_list)
996                         return ERR_PTR(-EAGAIN);
997
998                 mutex_lock(&c->bucket_lock);
999                 b = mca_alloc(c, op, k, level);
1000                 mutex_unlock(&c->bucket_lock);
1001
1002                 if (!b)
1003                         goto retry;
1004                 if (IS_ERR(b))
1005                         return b;
1006
1007                 bch_btree_node_read(b);
1008
1009                 if (!write)
1010                         downgrade_write(&b->lock);
1011         } else {
1012                 rw_lock(write, b, level);
1013                 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1014                         rw_unlock(write, b);
1015                         goto retry;
1016                 }
1017                 BUG_ON(b->level != level);
1018         }
1019
1020         if (btree_node_io_error(b)) {
1021                 rw_unlock(write, b);
1022                 return ERR_PTR(-EIO);
1023         }
1024
1025         BUG_ON(!b->written);
1026
1027         b->parent = parent;
1028         b->accessed = 1;
1029
1030         for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1031                 prefetch(b->keys.set[i].tree);
1032                 prefetch(b->keys.set[i].data);
1033         }
1034
1035         for (; i <= b->keys.nsets; i++)
1036                 prefetch(b->keys.set[i].data);
1037
1038         return b;
1039 }
1040
1041 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1042 {
1043         struct btree *b;
1044
1045         mutex_lock(&parent->c->bucket_lock);
1046         b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1047         mutex_unlock(&parent->c->bucket_lock);
1048
1049         if (!IS_ERR_OR_NULL(b)) {
1050                 b->parent = parent;
1051                 bch_btree_node_read(b);
1052                 rw_unlock(true, b);
1053         }
1054 }
1055
1056 /* Btree alloc */
1057
1058 static void btree_node_free(struct btree *b)
1059 {
1060         trace_bcache_btree_node_free(b);
1061
1062         BUG_ON(b == b->c->root);
1063
1064         mutex_lock(&b->write_lock);
1065
1066         if (btree_node_dirty(b))
1067                 btree_complete_write(b, btree_current_write(b));
1068         clear_bit(BTREE_NODE_dirty, &b->flags);
1069
1070         mutex_unlock(&b->write_lock);
1071
1072         cancel_delayed_work(&b->work);
1073
1074         mutex_lock(&b->c->bucket_lock);
1075         bch_bucket_free(b->c, &b->key);
1076         mca_bucket_free(b);
1077         mutex_unlock(&b->c->bucket_lock);
1078 }
1079
1080 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1081                                      int level, bool wait,
1082                                      struct btree *parent)
1083 {
1084         BKEY_PADDED(key) k;
1085         struct btree *b = ERR_PTR(-EAGAIN);
1086
1087         mutex_lock(&c->bucket_lock);
1088 retry:
1089         if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1090                 goto err;
1091
1092         bkey_put(c, &k.key);
1093         SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1094
1095         b = mca_alloc(c, op, &k.key, level);
1096         if (IS_ERR(b))
1097                 goto err_free;
1098
1099         if (!b) {
1100                 cache_bug(c,
1101                         "Tried to allocate bucket that was in btree cache");
1102                 goto retry;
1103         }
1104
1105         b->accessed = 1;
1106         b->parent = parent;
1107         bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1108
1109         mutex_unlock(&c->bucket_lock);
1110
1111         trace_bcache_btree_node_alloc(b);
1112         return b;
1113 err_free:
1114         bch_bucket_free(c, &k.key);
1115 err:
1116         mutex_unlock(&c->bucket_lock);
1117
1118         trace_bcache_btree_node_alloc_fail(c);
1119         return b;
1120 }
1121
1122 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1123                                           struct btree_op *op, int level,
1124                                           struct btree *parent)
1125 {
1126         return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1127 }
1128
1129 static struct btree *btree_node_alloc_replacement(struct btree *b,
1130                                                   struct btree_op *op)
1131 {
1132         struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1133
1134         if (!IS_ERR_OR_NULL(n)) {
1135                 mutex_lock(&n->write_lock);
1136                 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1137                 bkey_copy_key(&n->key, &b->key);
1138                 mutex_unlock(&n->write_lock);
1139         }
1140
1141         return n;
1142 }
1143
1144 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1145 {
1146         unsigned int i;
1147
1148         mutex_lock(&b->c->bucket_lock);
1149
1150         atomic_inc(&b->c->prio_blocked);
1151
1152         bkey_copy(k, &b->key);
1153         bkey_copy_key(k, &ZERO_KEY);
1154
1155         for (i = 0; i < KEY_PTRS(k); i++)
1156                 SET_PTR_GEN(k, i,
1157                             bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1158                                         PTR_BUCKET(b->c, &b->key, i)));
1159
1160         mutex_unlock(&b->c->bucket_lock);
1161 }
1162
1163 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1164 {
1165         struct cache_set *c = b->c;
1166         struct cache *ca;
1167         unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
1168
1169         mutex_lock(&c->bucket_lock);
1170
1171         for_each_cache(ca, c, i)
1172                 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1173                         if (op)
1174                                 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1175                                                 TASK_UNINTERRUPTIBLE);
1176                         mutex_unlock(&c->bucket_lock);
1177                         return -EINTR;
1178                 }
1179
1180         mutex_unlock(&c->bucket_lock);
1181
1182         return mca_cannibalize_lock(b->c, op);
1183 }
1184
1185 /* Garbage collection */
1186
1187 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1188                                     struct bkey *k)
1189 {
1190         uint8_t stale = 0;
1191         unsigned int i;
1192         struct bucket *g;
1193
1194         /*
1195          * ptr_invalid() can't return true for the keys that mark btree nodes as
1196          * freed, but since ptr_bad() returns true we'll never actually use them
1197          * for anything and thus we don't want mark their pointers here
1198          */
1199         if (!bkey_cmp(k, &ZERO_KEY))
1200                 return stale;
1201
1202         for (i = 0; i < KEY_PTRS(k); i++) {
1203                 if (!ptr_available(c, k, i))
1204                         continue;
1205
1206                 g = PTR_BUCKET(c, k, i);
1207
1208                 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1209                         g->last_gc = PTR_GEN(k, i);
1210
1211                 if (ptr_stale(c, k, i)) {
1212                         stale = max(stale, ptr_stale(c, k, i));
1213                         continue;
1214                 }
1215
1216                 cache_bug_on(GC_MARK(g) &&
1217                              (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1218                              c, "inconsistent ptrs: mark = %llu, level = %i",
1219                              GC_MARK(g), level);
1220
1221                 if (level)
1222                         SET_GC_MARK(g, GC_MARK_METADATA);
1223                 else if (KEY_DIRTY(k))
1224                         SET_GC_MARK(g, GC_MARK_DIRTY);
1225                 else if (!GC_MARK(g))
1226                         SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1227
1228                 /* guard against overflow */
1229                 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1230                                              GC_SECTORS_USED(g) + KEY_SIZE(k),
1231                                              MAX_GC_SECTORS_USED));
1232
1233                 BUG_ON(!GC_SECTORS_USED(g));
1234         }
1235
1236         return stale;
1237 }
1238
1239 #define btree_mark_key(b, k)    __bch_btree_mark_key(b->c, b->level, k)
1240
1241 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1242 {
1243         unsigned int i;
1244
1245         for (i = 0; i < KEY_PTRS(k); i++)
1246                 if (ptr_available(c, k, i) &&
1247                     !ptr_stale(c, k, i)) {
1248                         struct bucket *b = PTR_BUCKET(c, k, i);
1249
1250                         b->gen = PTR_GEN(k, i);
1251
1252                         if (level && bkey_cmp(k, &ZERO_KEY))
1253                                 b->prio = BTREE_PRIO;
1254                         else if (!level && b->prio == BTREE_PRIO)
1255                                 b->prio = INITIAL_PRIO;
1256                 }
1257
1258         __bch_btree_mark_key(c, level, k);
1259 }
1260
1261 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1262 {
1263         stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1264 }
1265
1266 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1267 {
1268         uint8_t stale = 0;
1269         unsigned int keys = 0, good_keys = 0;
1270         struct bkey *k;
1271         struct btree_iter iter;
1272         struct bset_tree *t;
1273
1274         gc->nodes++;
1275
1276         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1277                 stale = max(stale, btree_mark_key(b, k));
1278                 keys++;
1279
1280                 if (bch_ptr_bad(&b->keys, k))
1281                         continue;
1282
1283                 gc->key_bytes += bkey_u64s(k);
1284                 gc->nkeys++;
1285                 good_keys++;
1286
1287                 gc->data += KEY_SIZE(k);
1288         }
1289
1290         for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1291                 btree_bug_on(t->size &&
1292                              bset_written(&b->keys, t) &&
1293                              bkey_cmp(&b->key, &t->end) < 0,
1294                              b, "found short btree key in gc");
1295
1296         if (b->c->gc_always_rewrite)
1297                 return true;
1298
1299         if (stale > 10)
1300                 return true;
1301
1302         if ((keys - good_keys) * 2 > keys)
1303                 return true;
1304
1305         return false;
1306 }
1307
1308 #define GC_MERGE_NODES  4U
1309
1310 struct gc_merge_info {
1311         struct btree    *b;
1312         unsigned int    keys;
1313 };
1314
1315 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1316                                  struct keylist *insert_keys,
1317                                  atomic_t *journal_ref,
1318                                  struct bkey *replace_key);
1319
1320 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1321                              struct gc_stat *gc, struct gc_merge_info *r)
1322 {
1323         unsigned int i, nodes = 0, keys = 0, blocks;
1324         struct btree *new_nodes[GC_MERGE_NODES];
1325         struct keylist keylist;
1326         struct closure cl;
1327         struct bkey *k;
1328
1329         bch_keylist_init(&keylist);
1330
1331         if (btree_check_reserve(b, NULL))
1332                 return 0;
1333
1334         memset(new_nodes, 0, sizeof(new_nodes));
1335         closure_init_stack(&cl);
1336
1337         while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1338                 keys += r[nodes++].keys;
1339
1340         blocks = btree_default_blocks(b->c) * 2 / 3;
1341
1342         if (nodes < 2 ||
1343             __set_blocks(b->keys.set[0].data, keys,
1344                          block_bytes(b->c)) > blocks * (nodes - 1))
1345                 return 0;
1346
1347         for (i = 0; i < nodes; i++) {
1348                 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1349                 if (IS_ERR_OR_NULL(new_nodes[i]))
1350                         goto out_nocoalesce;
1351         }
1352
1353         /*
1354          * We have to check the reserve here, after we've allocated our new
1355          * nodes, to make sure the insert below will succeed - we also check
1356          * before as an optimization to potentially avoid a bunch of expensive
1357          * allocs/sorts
1358          */
1359         if (btree_check_reserve(b, NULL))
1360                 goto out_nocoalesce;
1361
1362         for (i = 0; i < nodes; i++)
1363                 mutex_lock(&new_nodes[i]->write_lock);
1364
1365         for (i = nodes - 1; i > 0; --i) {
1366                 struct bset *n1 = btree_bset_first(new_nodes[i]);
1367                 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1368                 struct bkey *k, *last = NULL;
1369
1370                 keys = 0;
1371
1372                 if (i > 1) {
1373                         for (k = n2->start;
1374                              k < bset_bkey_last(n2);
1375                              k = bkey_next(k)) {
1376                                 if (__set_blocks(n1, n1->keys + keys +
1377                                                  bkey_u64s(k),
1378                                                  block_bytes(b->c)) > blocks)
1379                                         break;
1380
1381                                 last = k;
1382                                 keys += bkey_u64s(k);
1383                         }
1384                 } else {
1385                         /*
1386                          * Last node we're not getting rid of - we're getting
1387                          * rid of the node at r[0]. Have to try and fit all of
1388                          * the remaining keys into this node; we can't ensure
1389                          * they will always fit due to rounding and variable
1390                          * length keys (shouldn't be possible in practice,
1391                          * though)
1392                          */
1393                         if (__set_blocks(n1, n1->keys + n2->keys,
1394                                          block_bytes(b->c)) >
1395                             btree_blocks(new_nodes[i]))
1396                                 goto out_nocoalesce;
1397
1398                         keys = n2->keys;
1399                         /* Take the key of the node we're getting rid of */
1400                         last = &r->b->key;
1401                 }
1402
1403                 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1404                        btree_blocks(new_nodes[i]));
1405
1406                 if (last)
1407                         bkey_copy_key(&new_nodes[i]->key, last);
1408
1409                 memcpy(bset_bkey_last(n1),
1410                        n2->start,
1411                        (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1412
1413                 n1->keys += keys;
1414                 r[i].keys = n1->keys;
1415
1416                 memmove(n2->start,
1417                         bset_bkey_idx(n2, keys),
1418                         (void *) bset_bkey_last(n2) -
1419                         (void *) bset_bkey_idx(n2, keys));
1420
1421                 n2->keys -= keys;
1422
1423                 if (__bch_keylist_realloc(&keylist,
1424                                           bkey_u64s(&new_nodes[i]->key)))
1425                         goto out_nocoalesce;
1426
1427                 bch_btree_node_write(new_nodes[i], &cl);
1428                 bch_keylist_add(&keylist, &new_nodes[i]->key);
1429         }
1430
1431         for (i = 0; i < nodes; i++)
1432                 mutex_unlock(&new_nodes[i]->write_lock);
1433
1434         closure_sync(&cl);
1435
1436         /* We emptied out this node */
1437         BUG_ON(btree_bset_first(new_nodes[0])->keys);
1438         btree_node_free(new_nodes[0]);
1439         rw_unlock(true, new_nodes[0]);
1440         new_nodes[0] = NULL;
1441
1442         for (i = 0; i < nodes; i++) {
1443                 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1444                         goto out_nocoalesce;
1445
1446                 make_btree_freeing_key(r[i].b, keylist.top);
1447                 bch_keylist_push(&keylist);
1448         }
1449
1450         bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1451         BUG_ON(!bch_keylist_empty(&keylist));
1452
1453         for (i = 0; i < nodes; i++) {
1454                 btree_node_free(r[i].b);
1455                 rw_unlock(true, r[i].b);
1456
1457                 r[i].b = new_nodes[i];
1458         }
1459
1460         memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1461         r[nodes - 1].b = ERR_PTR(-EINTR);
1462
1463         trace_bcache_btree_gc_coalesce(nodes);
1464         gc->nodes--;
1465
1466         bch_keylist_free(&keylist);
1467
1468         /* Invalidated our iterator */
1469         return -EINTR;
1470
1471 out_nocoalesce:
1472         closure_sync(&cl);
1473         bch_keylist_free(&keylist);
1474
1475         while ((k = bch_keylist_pop(&keylist)))
1476                 if (!bkey_cmp(k, &ZERO_KEY))
1477                         atomic_dec(&b->c->prio_blocked);
1478
1479         for (i = 0; i < nodes; i++)
1480                 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1481                         btree_node_free(new_nodes[i]);
1482                         rw_unlock(true, new_nodes[i]);
1483                 }
1484         return 0;
1485 }
1486
1487 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1488                                  struct btree *replace)
1489 {
1490         struct keylist keys;
1491         struct btree *n;
1492
1493         if (btree_check_reserve(b, NULL))
1494                 return 0;
1495
1496         n = btree_node_alloc_replacement(replace, NULL);
1497
1498         /* recheck reserve after allocating replacement node */
1499         if (btree_check_reserve(b, NULL)) {
1500                 btree_node_free(n);
1501                 rw_unlock(true, n);
1502                 return 0;
1503         }
1504
1505         bch_btree_node_write_sync(n);
1506
1507         bch_keylist_init(&keys);
1508         bch_keylist_add(&keys, &n->key);
1509
1510         make_btree_freeing_key(replace, keys.top);
1511         bch_keylist_push(&keys);
1512
1513         bch_btree_insert_node(b, op, &keys, NULL, NULL);
1514         BUG_ON(!bch_keylist_empty(&keys));
1515
1516         btree_node_free(replace);
1517         rw_unlock(true, n);
1518
1519         /* Invalidated our iterator */
1520         return -EINTR;
1521 }
1522
1523 static unsigned int btree_gc_count_keys(struct btree *b)
1524 {
1525         struct bkey *k;
1526         struct btree_iter iter;
1527         unsigned int ret = 0;
1528
1529         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1530                 ret += bkey_u64s(k);
1531
1532         return ret;
1533 }
1534
1535 static size_t btree_gc_min_nodes(struct cache_set *c)
1536 {
1537         size_t min_nodes;
1538
1539         /*
1540          * Since incremental GC would stop 100ms when front
1541          * side I/O comes, so when there are many btree nodes,
1542          * if GC only processes constant (100) nodes each time,
1543          * GC would last a long time, and the front side I/Os
1544          * would run out of the buckets (since no new bucket
1545          * can be allocated during GC), and be blocked again.
1546          * So GC should not process constant nodes, but varied
1547          * nodes according to the number of btree nodes, which
1548          * realized by dividing GC into constant(100) times,
1549          * so when there are many btree nodes, GC can process
1550          * more nodes each time, otherwise, GC will process less
1551          * nodes each time (but no less than MIN_GC_NODES)
1552          */
1553         min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1554         if (min_nodes < MIN_GC_NODES)
1555                 min_nodes = MIN_GC_NODES;
1556
1557         return min_nodes;
1558 }
1559
1560
1561 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1562                             struct closure *writes, struct gc_stat *gc)
1563 {
1564         int ret = 0;
1565         bool should_rewrite;
1566         struct bkey *k;
1567         struct btree_iter iter;
1568         struct gc_merge_info r[GC_MERGE_NODES];
1569         struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1570
1571         bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1572
1573         for (i = r; i < r + ARRAY_SIZE(r); i++)
1574                 i->b = ERR_PTR(-EINTR);
1575
1576         while (1) {
1577                 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1578                 if (k) {
1579                         r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1580                                                   true, b);
1581                         if (IS_ERR(r->b)) {
1582                                 ret = PTR_ERR(r->b);
1583                                 break;
1584                         }
1585
1586                         r->keys = btree_gc_count_keys(r->b);
1587
1588                         ret = btree_gc_coalesce(b, op, gc, r);
1589                         if (ret)
1590                                 break;
1591                 }
1592
1593                 if (!last->b)
1594                         break;
1595
1596                 if (!IS_ERR(last->b)) {
1597                         should_rewrite = btree_gc_mark_node(last->b, gc);
1598                         if (should_rewrite) {
1599                                 ret = btree_gc_rewrite_node(b, op, last->b);
1600                                 if (ret)
1601                                         break;
1602                         }
1603
1604                         if (last->b->level) {
1605                                 ret = btree_gc_recurse(last->b, op, writes, gc);
1606                                 if (ret)
1607                                         break;
1608                         }
1609
1610                         bkey_copy_key(&b->c->gc_done, &last->b->key);
1611
1612                         /*
1613                          * Must flush leaf nodes before gc ends, since replace
1614                          * operations aren't journalled
1615                          */
1616                         mutex_lock(&last->b->write_lock);
1617                         if (btree_node_dirty(last->b))
1618                                 bch_btree_node_write(last->b, writes);
1619                         mutex_unlock(&last->b->write_lock);
1620                         rw_unlock(true, last->b);
1621                 }
1622
1623                 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1624                 r->b = NULL;
1625
1626                 if (atomic_read(&b->c->search_inflight) &&
1627                     gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1628                         gc->nodes_pre =  gc->nodes;
1629                         ret = -EAGAIN;
1630                         break;
1631                 }
1632
1633                 if (need_resched()) {
1634                         ret = -EAGAIN;
1635                         break;
1636                 }
1637         }
1638
1639         for (i = r; i < r + ARRAY_SIZE(r); i++)
1640                 if (!IS_ERR_OR_NULL(i->b)) {
1641                         mutex_lock(&i->b->write_lock);
1642                         if (btree_node_dirty(i->b))
1643                                 bch_btree_node_write(i->b, writes);
1644                         mutex_unlock(&i->b->write_lock);
1645                         rw_unlock(true, i->b);
1646                 }
1647
1648         return ret;
1649 }
1650
1651 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1652                              struct closure *writes, struct gc_stat *gc)
1653 {
1654         struct btree *n = NULL;
1655         int ret = 0;
1656         bool should_rewrite;
1657
1658         should_rewrite = btree_gc_mark_node(b, gc);
1659         if (should_rewrite) {
1660                 n = btree_node_alloc_replacement(b, NULL);
1661
1662                 if (!IS_ERR_OR_NULL(n)) {
1663                         bch_btree_node_write_sync(n);
1664
1665                         bch_btree_set_root(n);
1666                         btree_node_free(b);
1667                         rw_unlock(true, n);
1668
1669                         return -EINTR;
1670                 }
1671         }
1672
1673         __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1674
1675         if (b->level) {
1676                 ret = btree_gc_recurse(b, op, writes, gc);
1677                 if (ret)
1678                         return ret;
1679         }
1680
1681         bkey_copy_key(&b->c->gc_done, &b->key);
1682
1683         return ret;
1684 }
1685
1686 static void btree_gc_start(struct cache_set *c)
1687 {
1688         struct cache *ca;
1689         struct bucket *b;
1690         unsigned int i;
1691
1692         if (!c->gc_mark_valid)
1693                 return;
1694
1695         mutex_lock(&c->bucket_lock);
1696
1697         c->gc_mark_valid = 0;
1698         c->gc_done = ZERO_KEY;
1699
1700         for_each_cache(ca, c, i)
1701                 for_each_bucket(b, ca) {
1702                         b->last_gc = b->gen;
1703                         if (!atomic_read(&b->pin)) {
1704                                 SET_GC_MARK(b, 0);
1705                                 SET_GC_SECTORS_USED(b, 0);
1706                         }
1707                 }
1708
1709         mutex_unlock(&c->bucket_lock);
1710 }
1711
1712 static void bch_btree_gc_finish(struct cache_set *c)
1713 {
1714         struct bucket *b;
1715         struct cache *ca;
1716         unsigned int i;
1717
1718         mutex_lock(&c->bucket_lock);
1719
1720         set_gc_sectors(c);
1721         c->gc_mark_valid = 1;
1722         c->need_gc      = 0;
1723
1724         for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1725                 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1726                             GC_MARK_METADATA);
1727
1728         /* don't reclaim buckets to which writeback keys point */
1729         rcu_read_lock();
1730         for (i = 0; i < c->devices_max_used; i++) {
1731                 struct bcache_device *d = c->devices[i];
1732                 struct cached_dev *dc;
1733                 struct keybuf_key *w, *n;
1734                 unsigned int j;
1735
1736                 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1737                         continue;
1738                 dc = container_of(d, struct cached_dev, disk);
1739
1740                 spin_lock(&dc->writeback_keys.lock);
1741                 rbtree_postorder_for_each_entry_safe(w, n,
1742                                         &dc->writeback_keys.keys, node)
1743                         for (j = 0; j < KEY_PTRS(&w->key); j++)
1744                                 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1745                                             GC_MARK_DIRTY);
1746                 spin_unlock(&dc->writeback_keys.lock);
1747         }
1748         rcu_read_unlock();
1749
1750         c->avail_nbuckets = 0;
1751         for_each_cache(ca, c, i) {
1752                 uint64_t *i;
1753
1754                 ca->invalidate_needs_gc = 0;
1755
1756                 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1757                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1758
1759                 for (i = ca->prio_buckets;
1760                      i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1761                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1762
1763                 for_each_bucket(b, ca) {
1764                         c->need_gc      = max(c->need_gc, bucket_gc_gen(b));
1765
1766                         if (atomic_read(&b->pin))
1767                                 continue;
1768
1769                         BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1770
1771                         if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1772                                 c->avail_nbuckets++;
1773                 }
1774         }
1775
1776         mutex_unlock(&c->bucket_lock);
1777 }
1778
1779 static void bch_btree_gc(struct cache_set *c)
1780 {
1781         int ret;
1782         struct gc_stat stats;
1783         struct closure writes;
1784         struct btree_op op;
1785         uint64_t start_time = local_clock();
1786
1787         trace_bcache_gc_start(c);
1788
1789         memset(&stats, 0, sizeof(struct gc_stat));
1790         closure_init_stack(&writes);
1791         bch_btree_op_init(&op, SHRT_MAX);
1792
1793         btree_gc_start(c);
1794
1795         /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1796         do {
1797                 ret = btree_root(gc_root, c, &op, &writes, &stats);
1798                 closure_sync(&writes);
1799                 cond_resched();
1800
1801                 if (ret == -EAGAIN)
1802                         schedule_timeout_interruptible(msecs_to_jiffies
1803                                                        (GC_SLEEP_MS));
1804                 else if (ret)
1805                         pr_warn("gc failed!");
1806         } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1807
1808         bch_btree_gc_finish(c);
1809         wake_up_allocators(c);
1810
1811         bch_time_stats_update(&c->btree_gc_time, start_time);
1812
1813         stats.key_bytes *= sizeof(uint64_t);
1814         stats.data      <<= 9;
1815         bch_update_bucket_in_use(c, &stats);
1816         memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1817
1818         trace_bcache_gc_end(c);
1819
1820         bch_moving_gc(c);
1821 }
1822
1823 static bool gc_should_run(struct cache_set *c)
1824 {
1825         struct cache *ca;
1826         unsigned int i;
1827
1828         for_each_cache(ca, c, i)
1829                 if (ca->invalidate_needs_gc)
1830                         return true;
1831
1832         if (atomic_read(&c->sectors_to_gc) < 0)
1833                 return true;
1834
1835         return false;
1836 }
1837
1838 static int bch_gc_thread(void *arg)
1839 {
1840         struct cache_set *c = arg;
1841
1842         while (1) {
1843                 wait_event_interruptible(c->gc_wait,
1844                            kthread_should_stop() ||
1845                            test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1846                            gc_should_run(c));
1847
1848                 if (kthread_should_stop() ||
1849                     test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1850                         break;
1851
1852                 set_gc_sectors(c);
1853                 bch_btree_gc(c);
1854         }
1855
1856         wait_for_kthread_stop();
1857         return 0;
1858 }
1859
1860 int bch_gc_thread_start(struct cache_set *c)
1861 {
1862         c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1863         return PTR_ERR_OR_ZERO(c->gc_thread);
1864 }
1865
1866 /* Initial partial gc */
1867
1868 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1869 {
1870         int ret = 0;
1871         struct bkey *k, *p = NULL;
1872         struct btree_iter iter;
1873
1874         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1875                 bch_initial_mark_key(b->c, b->level, k);
1876
1877         bch_initial_mark_key(b->c, b->level + 1, &b->key);
1878
1879         if (b->level) {
1880                 bch_btree_iter_init(&b->keys, &iter, NULL);
1881
1882                 do {
1883                         k = bch_btree_iter_next_filter(&iter, &b->keys,
1884                                                        bch_ptr_bad);
1885                         if (k) {
1886                                 btree_node_prefetch(b, k);
1887                                 /*
1888                                  * initiallize c->gc_stats.nodes
1889                                  * for incremental GC
1890                                  */
1891                                 b->c->gc_stats.nodes++;
1892                         }
1893
1894                         if (p)
1895                                 ret = btree(check_recurse, p, b, op);
1896
1897                         p = k;
1898                 } while (p && !ret);
1899         }
1900
1901         return ret;
1902 }
1903
1904 int bch_btree_check(struct cache_set *c)
1905 {
1906         struct btree_op op;
1907
1908         bch_btree_op_init(&op, SHRT_MAX);
1909
1910         return btree_root(check_recurse, c, &op);
1911 }
1912
1913 void bch_initial_gc_finish(struct cache_set *c)
1914 {
1915         struct cache *ca;
1916         struct bucket *b;
1917         unsigned int i;
1918
1919         bch_btree_gc_finish(c);
1920
1921         mutex_lock(&c->bucket_lock);
1922
1923         /*
1924          * We need to put some unused buckets directly on the prio freelist in
1925          * order to get the allocator thread started - it needs freed buckets in
1926          * order to rewrite the prios and gens, and it needs to rewrite prios
1927          * and gens in order to free buckets.
1928          *
1929          * This is only safe for buckets that have no live data in them, which
1930          * there should always be some of.
1931          */
1932         for_each_cache(ca, c, i) {
1933                 for_each_bucket(b, ca) {
1934                         if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1935                             fifo_full(&ca->free[RESERVE_BTREE]))
1936                                 break;
1937
1938                         if (bch_can_invalidate_bucket(ca, b) &&
1939                             !GC_MARK(b)) {
1940                                 __bch_invalidate_one_bucket(ca, b);
1941                                 if (!fifo_push(&ca->free[RESERVE_PRIO],
1942                                    b - ca->buckets))
1943                                         fifo_push(&ca->free[RESERVE_BTREE],
1944                                                   b - ca->buckets);
1945                         }
1946                 }
1947         }
1948
1949         mutex_unlock(&c->bucket_lock);
1950 }
1951
1952 /* Btree insertion */
1953
1954 static bool btree_insert_key(struct btree *b, struct bkey *k,
1955                              struct bkey *replace_key)
1956 {
1957         unsigned int status;
1958
1959         BUG_ON(bkey_cmp(k, &b->key) > 0);
1960
1961         status = bch_btree_insert_key(&b->keys, k, replace_key);
1962         if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1963                 bch_check_keys(&b->keys, "%u for %s", status,
1964                                replace_key ? "replace" : "insert");
1965
1966                 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1967                                               status);
1968                 return true;
1969         } else
1970                 return false;
1971 }
1972
1973 static size_t insert_u64s_remaining(struct btree *b)
1974 {
1975         long ret = bch_btree_keys_u64s_remaining(&b->keys);
1976
1977         /*
1978          * Might land in the middle of an existing extent and have to split it
1979          */
1980         if (b->keys.ops->is_extents)
1981                 ret -= KEY_MAX_U64S;
1982
1983         return max(ret, 0L);
1984 }
1985
1986 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1987                                   struct keylist *insert_keys,
1988                                   struct bkey *replace_key)
1989 {
1990         bool ret = false;
1991         int oldsize = bch_count_data(&b->keys);
1992
1993         while (!bch_keylist_empty(insert_keys)) {
1994                 struct bkey *k = insert_keys->keys;
1995
1996                 if (bkey_u64s(k) > insert_u64s_remaining(b))
1997                         break;
1998
1999                 if (bkey_cmp(k, &b->key) <= 0) {
2000                         if (!b->level)
2001                                 bkey_put(b->c, k);
2002
2003                         ret |= btree_insert_key(b, k, replace_key);
2004                         bch_keylist_pop_front(insert_keys);
2005                 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2006                         BKEY_PADDED(key) temp;
2007                         bkey_copy(&temp.key, insert_keys->keys);
2008
2009                         bch_cut_back(&b->key, &temp.key);
2010                         bch_cut_front(&b->key, insert_keys->keys);
2011
2012                         ret |= btree_insert_key(b, &temp.key, replace_key);
2013                         break;
2014                 } else {
2015                         break;
2016                 }
2017         }
2018
2019         if (!ret)
2020                 op->insert_collision = true;
2021
2022         BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2023
2024         BUG_ON(bch_count_data(&b->keys) < oldsize);
2025         return ret;
2026 }
2027
2028 static int btree_split(struct btree *b, struct btree_op *op,
2029                        struct keylist *insert_keys,
2030                        struct bkey *replace_key)
2031 {
2032         bool split;
2033         struct btree *n1, *n2 = NULL, *n3 = NULL;
2034         uint64_t start_time = local_clock();
2035         struct closure cl;
2036         struct keylist parent_keys;
2037
2038         closure_init_stack(&cl);
2039         bch_keylist_init(&parent_keys);
2040
2041         if (btree_check_reserve(b, op)) {
2042                 if (!b->level)
2043                         return -EINTR;
2044                 else
2045                         WARN(1, "insufficient reserve for split\n");
2046         }
2047
2048         n1 = btree_node_alloc_replacement(b, op);
2049         if (IS_ERR(n1))
2050                 goto err;
2051
2052         split = set_blocks(btree_bset_first(n1),
2053                            block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2054
2055         if (split) {
2056                 unsigned int keys = 0;
2057
2058                 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2059
2060                 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2061                 if (IS_ERR(n2))
2062                         goto err_free1;
2063
2064                 if (!b->parent) {
2065                         n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2066                         if (IS_ERR(n3))
2067                                 goto err_free2;
2068                 }
2069
2070                 mutex_lock(&n1->write_lock);
2071                 mutex_lock(&n2->write_lock);
2072
2073                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2074
2075                 /*
2076                  * Has to be a linear search because we don't have an auxiliary
2077                  * search tree yet
2078                  */
2079
2080                 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2081                         keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2082                                                         keys));
2083
2084                 bkey_copy_key(&n1->key,
2085                               bset_bkey_idx(btree_bset_first(n1), keys));
2086                 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2087
2088                 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2089                 btree_bset_first(n1)->keys = keys;
2090
2091                 memcpy(btree_bset_first(n2)->start,
2092                        bset_bkey_last(btree_bset_first(n1)),
2093                        btree_bset_first(n2)->keys * sizeof(uint64_t));
2094
2095                 bkey_copy_key(&n2->key, &b->key);
2096
2097                 bch_keylist_add(&parent_keys, &n2->key);
2098                 bch_btree_node_write(n2, &cl);
2099                 mutex_unlock(&n2->write_lock);
2100                 rw_unlock(true, n2);
2101         } else {
2102                 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2103
2104                 mutex_lock(&n1->write_lock);
2105                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2106         }
2107
2108         bch_keylist_add(&parent_keys, &n1->key);
2109         bch_btree_node_write(n1, &cl);
2110         mutex_unlock(&n1->write_lock);
2111
2112         if (n3) {
2113                 /* Depth increases, make a new root */
2114                 mutex_lock(&n3->write_lock);
2115                 bkey_copy_key(&n3->key, &MAX_KEY);
2116                 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2117                 bch_btree_node_write(n3, &cl);
2118                 mutex_unlock(&n3->write_lock);
2119
2120                 closure_sync(&cl);
2121                 bch_btree_set_root(n3);
2122                 rw_unlock(true, n3);
2123         } else if (!b->parent) {
2124                 /* Root filled up but didn't need to be split */
2125                 closure_sync(&cl);
2126                 bch_btree_set_root(n1);
2127         } else {
2128                 /* Split a non root node */
2129                 closure_sync(&cl);
2130                 make_btree_freeing_key(b, parent_keys.top);
2131                 bch_keylist_push(&parent_keys);
2132
2133                 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2134                 BUG_ON(!bch_keylist_empty(&parent_keys));
2135         }
2136
2137         btree_node_free(b);
2138         rw_unlock(true, n1);
2139
2140         bch_time_stats_update(&b->c->btree_split_time, start_time);
2141
2142         return 0;
2143 err_free2:
2144         bkey_put(b->c, &n2->key);
2145         btree_node_free(n2);
2146         rw_unlock(true, n2);
2147 err_free1:
2148         bkey_put(b->c, &n1->key);
2149         btree_node_free(n1);
2150         rw_unlock(true, n1);
2151 err:
2152         WARN(1, "bcache: btree split failed (level %u)", b->level);
2153
2154         if (n3 == ERR_PTR(-EAGAIN) ||
2155             n2 == ERR_PTR(-EAGAIN) ||
2156             n1 == ERR_PTR(-EAGAIN))
2157                 return -EAGAIN;
2158
2159         return -ENOMEM;
2160 }
2161
2162 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2163                                  struct keylist *insert_keys,
2164                                  atomic_t *journal_ref,
2165                                  struct bkey *replace_key)
2166 {
2167         struct closure cl;
2168
2169         BUG_ON(b->level && replace_key);
2170
2171         closure_init_stack(&cl);
2172
2173         mutex_lock(&b->write_lock);
2174
2175         if (write_block(b) != btree_bset_last(b) &&
2176             b->keys.last_set_unwritten)
2177                 bch_btree_init_next(b); /* just wrote a set */
2178
2179         if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2180                 mutex_unlock(&b->write_lock);
2181                 goto split;
2182         }
2183
2184         BUG_ON(write_block(b) != btree_bset_last(b));
2185
2186         if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2187                 if (!b->level)
2188                         bch_btree_leaf_dirty(b, journal_ref);
2189                 else
2190                         bch_btree_node_write(b, &cl);
2191         }
2192
2193         mutex_unlock(&b->write_lock);
2194
2195         /* wait for btree node write if necessary, after unlock */
2196         closure_sync(&cl);
2197
2198         return 0;
2199 split:
2200         if (current->bio_list) {
2201                 op->lock = b->c->root->level + 1;
2202                 return -EAGAIN;
2203         } else if (op->lock <= b->c->root->level) {
2204                 op->lock = b->c->root->level + 1;
2205                 return -EINTR;
2206         } else {
2207                 /* Invalidated all iterators */
2208                 int ret = btree_split(b, op, insert_keys, replace_key);
2209
2210                 if (bch_keylist_empty(insert_keys))
2211                         return 0;
2212                 else if (!ret)
2213                         return -EINTR;
2214                 return ret;
2215         }
2216 }
2217
2218 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2219                                struct bkey *check_key)
2220 {
2221         int ret = -EINTR;
2222         uint64_t btree_ptr = b->key.ptr[0];
2223         unsigned long seq = b->seq;
2224         struct keylist insert;
2225         bool upgrade = op->lock == -1;
2226
2227         bch_keylist_init(&insert);
2228
2229         if (upgrade) {
2230                 rw_unlock(false, b);
2231                 rw_lock(true, b, b->level);
2232
2233                 if (b->key.ptr[0] != btree_ptr ||
2234                     b->seq != seq + 1) {
2235                         op->lock = b->level;
2236                         goto out;
2237                 }
2238         }
2239
2240         SET_KEY_PTRS(check_key, 1);
2241         get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2242
2243         SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2244
2245         bch_keylist_add(&insert, check_key);
2246
2247         ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2248
2249         BUG_ON(!ret && !bch_keylist_empty(&insert));
2250 out:
2251         if (upgrade)
2252                 downgrade_write(&b->lock);
2253         return ret;
2254 }
2255
2256 struct btree_insert_op {
2257         struct btree_op op;
2258         struct keylist  *keys;
2259         atomic_t        *journal_ref;
2260         struct bkey     *replace_key;
2261 };
2262
2263 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2264 {
2265         struct btree_insert_op *op = container_of(b_op,
2266                                         struct btree_insert_op, op);
2267
2268         int ret = bch_btree_insert_node(b, &op->op, op->keys,
2269                                         op->journal_ref, op->replace_key);
2270         if (ret && !bch_keylist_empty(op->keys))
2271                 return ret;
2272         else
2273                 return MAP_DONE;
2274 }
2275
2276 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2277                      atomic_t *journal_ref, struct bkey *replace_key)
2278 {
2279         struct btree_insert_op op;
2280         int ret = 0;
2281
2282         BUG_ON(current->bio_list);
2283         BUG_ON(bch_keylist_empty(keys));
2284
2285         bch_btree_op_init(&op.op, 0);
2286         op.keys         = keys;
2287         op.journal_ref  = journal_ref;
2288         op.replace_key  = replace_key;
2289
2290         while (!ret && !bch_keylist_empty(keys)) {
2291                 op.op.lock = 0;
2292                 ret = bch_btree_map_leaf_nodes(&op.op, c,
2293                                                &START_KEY(keys->keys),
2294                                                btree_insert_fn);
2295         }
2296
2297         if (ret) {
2298                 struct bkey *k;
2299
2300                 pr_err("error %i", ret);
2301
2302                 while ((k = bch_keylist_pop(keys)))
2303                         bkey_put(c, k);
2304         } else if (op.op.insert_collision)
2305                 ret = -ESRCH;
2306
2307         return ret;
2308 }
2309
2310 void bch_btree_set_root(struct btree *b)
2311 {
2312         unsigned int i;
2313         struct closure cl;
2314
2315         closure_init_stack(&cl);
2316
2317         trace_bcache_btree_set_root(b);
2318
2319         BUG_ON(!b->written);
2320
2321         for (i = 0; i < KEY_PTRS(&b->key); i++)
2322                 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2323
2324         mutex_lock(&b->c->bucket_lock);
2325         list_del_init(&b->list);
2326         mutex_unlock(&b->c->bucket_lock);
2327
2328         b->c->root = b;
2329
2330         bch_journal_meta(b->c, &cl);
2331         closure_sync(&cl);
2332 }
2333
2334 /* Map across nodes or keys */
2335
2336 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2337                                        struct bkey *from,
2338                                        btree_map_nodes_fn *fn, int flags)
2339 {
2340         int ret = MAP_CONTINUE;
2341
2342         if (b->level) {
2343                 struct bkey *k;
2344                 struct btree_iter iter;
2345
2346                 bch_btree_iter_init(&b->keys, &iter, from);
2347
2348                 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2349                                                        bch_ptr_bad))) {
2350                         ret = btree(map_nodes_recurse, k, b,
2351                                     op, from, fn, flags);
2352                         from = NULL;
2353
2354                         if (ret != MAP_CONTINUE)
2355                                 return ret;
2356                 }
2357         }
2358
2359         if (!b->level || flags == MAP_ALL_NODES)
2360                 ret = fn(op, b);
2361
2362         return ret;
2363 }
2364
2365 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2366                           struct bkey *from, btree_map_nodes_fn *fn, int flags)
2367 {
2368         return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2369 }
2370
2371 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2372                                       struct bkey *from, btree_map_keys_fn *fn,
2373                                       int flags)
2374 {
2375         int ret = MAP_CONTINUE;
2376         struct bkey *k;
2377         struct btree_iter iter;
2378
2379         bch_btree_iter_init(&b->keys, &iter, from);
2380
2381         while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2382                 ret = !b->level
2383                         ? fn(op, b, k)
2384                         : btree(map_keys_recurse, k, b, op, from, fn, flags);
2385                 from = NULL;
2386
2387                 if (ret != MAP_CONTINUE)
2388                         return ret;
2389         }
2390
2391         if (!b->level && (flags & MAP_END_KEY))
2392                 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2393                                      KEY_OFFSET(&b->key), 0));
2394
2395         return ret;
2396 }
2397
2398 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2399                        struct bkey *from, btree_map_keys_fn *fn, int flags)
2400 {
2401         return btree_root(map_keys_recurse, c, op, from, fn, flags);
2402 }
2403
2404 /* Keybuf code */
2405
2406 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2407 {
2408         /* Overlapping keys compare equal */
2409         if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2410                 return -1;
2411         if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2412                 return 1;
2413         return 0;
2414 }
2415
2416 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2417                                             struct keybuf_key *r)
2418 {
2419         return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2420 }
2421
2422 struct refill {
2423         struct btree_op op;
2424         unsigned int    nr_found;
2425         struct keybuf   *buf;
2426         struct bkey     *end;
2427         keybuf_pred_fn  *pred;
2428 };
2429
2430 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2431                             struct bkey *k)
2432 {
2433         struct refill *refill = container_of(op, struct refill, op);
2434         struct keybuf *buf = refill->buf;
2435         int ret = MAP_CONTINUE;
2436
2437         if (bkey_cmp(k, refill->end) >= 0) {
2438                 ret = MAP_DONE;
2439                 goto out;
2440         }
2441
2442         if (!KEY_SIZE(k)) /* end key */
2443                 goto out;
2444
2445         if (refill->pred(buf, k)) {
2446                 struct keybuf_key *w;
2447
2448                 spin_lock(&buf->lock);
2449
2450                 w = array_alloc(&buf->freelist);
2451                 if (!w) {
2452                         spin_unlock(&buf->lock);
2453                         return MAP_DONE;
2454                 }
2455
2456                 w->private = NULL;
2457                 bkey_copy(&w->key, k);
2458
2459                 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2460                         array_free(&buf->freelist, w);
2461                 else
2462                         refill->nr_found++;
2463
2464                 if (array_freelist_empty(&buf->freelist))
2465                         ret = MAP_DONE;
2466
2467                 spin_unlock(&buf->lock);
2468         }
2469 out:
2470         buf->last_scanned = *k;
2471         return ret;
2472 }
2473
2474 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2475                        struct bkey *end, keybuf_pred_fn *pred)
2476 {
2477         struct bkey start = buf->last_scanned;
2478         struct refill refill;
2479
2480         cond_resched();
2481
2482         bch_btree_op_init(&refill.op, -1);
2483         refill.nr_found = 0;
2484         refill.buf      = buf;
2485         refill.end      = end;
2486         refill.pred     = pred;
2487
2488         bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2489                            refill_keybuf_fn, MAP_END_KEY);
2490
2491         trace_bcache_keyscan(refill.nr_found,
2492                              KEY_INODE(&start), KEY_OFFSET(&start),
2493                              KEY_INODE(&buf->last_scanned),
2494                              KEY_OFFSET(&buf->last_scanned));
2495
2496         spin_lock(&buf->lock);
2497
2498         if (!RB_EMPTY_ROOT(&buf->keys)) {
2499                 struct keybuf_key *w;
2500
2501                 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2502                 buf->start      = START_KEY(&w->key);
2503
2504                 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2505                 buf->end        = w->key;
2506         } else {
2507                 buf->start      = MAX_KEY;
2508                 buf->end        = MAX_KEY;
2509         }
2510
2511         spin_unlock(&buf->lock);
2512 }
2513
2514 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2515 {
2516         rb_erase(&w->node, &buf->keys);
2517         array_free(&buf->freelist, w);
2518 }
2519
2520 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2521 {
2522         spin_lock(&buf->lock);
2523         __bch_keybuf_del(buf, w);
2524         spin_unlock(&buf->lock);
2525 }
2526
2527 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2528                                   struct bkey *end)
2529 {
2530         bool ret = false;
2531         struct keybuf_key *p, *w, s;
2532
2533         s.key = *start;
2534
2535         if (bkey_cmp(end, &buf->start) <= 0 ||
2536             bkey_cmp(start, &buf->end) >= 0)
2537                 return false;
2538
2539         spin_lock(&buf->lock);
2540         w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2541
2542         while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2543                 p = w;
2544                 w = RB_NEXT(w, node);
2545
2546                 if (p->private)
2547                         ret = true;
2548                 else
2549                         __bch_keybuf_del(buf, p);
2550         }
2551
2552         spin_unlock(&buf->lock);
2553         return ret;
2554 }
2555
2556 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2557 {
2558         struct keybuf_key *w;
2559
2560         spin_lock(&buf->lock);
2561
2562         w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2563
2564         while (w && w->private)
2565                 w = RB_NEXT(w, node);
2566
2567         if (w)
2568                 w->private = ERR_PTR(-EINTR);
2569
2570         spin_unlock(&buf->lock);
2571         return w;
2572 }
2573
2574 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2575                                           struct keybuf *buf,
2576                                           struct bkey *end,
2577                                           keybuf_pred_fn *pred)
2578 {
2579         struct keybuf_key *ret;
2580
2581         while (1) {
2582                 ret = bch_keybuf_next(buf);
2583                 if (ret)
2584                         break;
2585
2586                 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2587                         pr_debug("scan finished");
2588                         break;
2589                 }
2590
2591                 bch_refill_keybuf(c, buf, end, pred);
2592         }
2593
2594         return ret;
2595 }
2596
2597 void bch_keybuf_init(struct keybuf *buf)
2598 {
2599         buf->last_scanned       = MAX_KEY;
2600         buf->keys               = RB_ROOT;
2601
2602         spin_lock_init(&buf->lock);
2603         array_allocator_init(&buf->freelist);
2604 }