1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
14 #include "transaction.h"
17 #include "btrfs_inode.h"
18 #include "async-thread.h"
19 #include "free-space-cache.h"
20 #include "inode-map.h"
22 #include "print-tree.h"
25 * backref_node, mapping_node and tree_block start with this
28 struct rb_node rb_node;
33 * present a tree block in the backref cache
36 struct rb_node rb_node;
40 /* objectid of tree block owner, can be not uptodate */
42 /* link to pending, changed or detached list */
43 struct list_head list;
44 /* list of upper level blocks reference this block */
45 struct list_head upper;
46 /* list of child blocks in the cache */
47 struct list_head lower;
48 /* NULL if this node is not tree root */
49 struct btrfs_root *root;
50 /* extent buffer got by COW the block */
51 struct extent_buffer *eb;
52 /* level of tree block */
54 /* is the block in non-reference counted tree */
55 unsigned int cowonly:1;
56 /* 1 if no child node in the cache */
57 unsigned int lowest:1;
58 /* is the extent buffer locked */
59 unsigned int locked:1;
60 /* has the block been processed */
61 unsigned int processed:1;
62 /* have backrefs of this block been checked */
63 unsigned int checked:1;
65 * 1 if corresponding block has been cowed but some upper
66 * level block pointers may not point to the new location
68 unsigned int pending:1;
70 * 1 if the backref node isn't connected to any other
73 unsigned int detached:1;
77 * present a block pointer in the backref cache
80 struct list_head list[2];
81 struct backref_node *node[2];
86 #define RELOCATION_RESERVED_NODES 256
88 struct backref_cache {
89 /* red black tree of all backref nodes in the cache */
90 struct rb_root rb_root;
91 /* for passing backref nodes to btrfs_reloc_cow_block */
92 struct backref_node *path[BTRFS_MAX_LEVEL];
94 * list of blocks that have been cowed but some block
95 * pointers in upper level blocks may not reflect the
98 struct list_head pending[BTRFS_MAX_LEVEL];
99 /* list of backref nodes with no child node */
100 struct list_head leaves;
101 /* list of blocks that have been cowed in current transaction */
102 struct list_head changed;
103 /* list of detached backref node. */
104 struct list_head detached;
113 * map address of tree root to tree
115 struct mapping_node {
116 struct rb_node rb_node;
121 struct mapping_tree {
122 struct rb_root rb_root;
127 * present a tree block to process
130 struct rb_node rb_node;
132 struct btrfs_key key;
133 unsigned int level:8;
134 unsigned int key_ready:1;
137 #define MAX_EXTENTS 128
139 struct file_extent_cluster {
142 u64 boundary[MAX_EXTENTS];
146 struct reloc_control {
147 /* block group to relocate */
148 struct btrfs_block_group_cache *block_group;
150 struct btrfs_root *extent_root;
151 /* inode for moving data */
152 struct inode *data_inode;
154 struct btrfs_block_rsv *block_rsv;
156 struct backref_cache backref_cache;
158 struct file_extent_cluster cluster;
159 /* tree blocks have been processed */
160 struct extent_io_tree processed_blocks;
161 /* map start of tree root to corresponding reloc tree */
162 struct mapping_tree reloc_root_tree;
163 /* list of reloc trees */
164 struct list_head reloc_roots;
165 /* list of subvolume trees that get relocated */
166 struct list_head dirty_subvol_roots;
167 /* size of metadata reservation for merging reloc trees */
168 u64 merging_rsv_size;
169 /* size of relocated tree nodes */
171 /* reserved size for block group relocation*/
177 unsigned int stage:8;
178 unsigned int create_reloc_tree:1;
179 unsigned int merge_reloc_tree:1;
180 unsigned int found_file_extent:1;
183 /* stages of data relocation */
184 #define MOVE_DATA_EXTENTS 0
185 #define UPDATE_DATA_PTRS 1
187 static void remove_backref_node(struct backref_cache *cache,
188 struct backref_node *node);
189 static void __mark_block_processed(struct reloc_control *rc,
190 struct backref_node *node);
192 static void mapping_tree_init(struct mapping_tree *tree)
194 tree->rb_root = RB_ROOT;
195 spin_lock_init(&tree->lock);
198 static void backref_cache_init(struct backref_cache *cache)
201 cache->rb_root = RB_ROOT;
202 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
203 INIT_LIST_HEAD(&cache->pending[i]);
204 INIT_LIST_HEAD(&cache->changed);
205 INIT_LIST_HEAD(&cache->detached);
206 INIT_LIST_HEAD(&cache->leaves);
209 static void backref_cache_cleanup(struct backref_cache *cache)
211 struct backref_node *node;
214 while (!list_empty(&cache->detached)) {
215 node = list_entry(cache->detached.next,
216 struct backref_node, list);
217 remove_backref_node(cache, node);
220 while (!list_empty(&cache->leaves)) {
221 node = list_entry(cache->leaves.next,
222 struct backref_node, lower);
223 remove_backref_node(cache, node);
226 cache->last_trans = 0;
228 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
229 ASSERT(list_empty(&cache->pending[i]));
230 ASSERT(list_empty(&cache->changed));
231 ASSERT(list_empty(&cache->detached));
232 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
233 ASSERT(!cache->nr_nodes);
234 ASSERT(!cache->nr_edges);
237 static struct backref_node *alloc_backref_node(struct backref_cache *cache)
239 struct backref_node *node;
241 node = kzalloc(sizeof(*node), GFP_NOFS);
243 INIT_LIST_HEAD(&node->list);
244 INIT_LIST_HEAD(&node->upper);
245 INIT_LIST_HEAD(&node->lower);
246 RB_CLEAR_NODE(&node->rb_node);
252 static void free_backref_node(struct backref_cache *cache,
253 struct backref_node *node)
261 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
263 struct backref_edge *edge;
265 edge = kzalloc(sizeof(*edge), GFP_NOFS);
271 static void free_backref_edge(struct backref_cache *cache,
272 struct backref_edge *edge)
280 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
281 struct rb_node *node)
283 struct rb_node **p = &root->rb_node;
284 struct rb_node *parent = NULL;
285 struct tree_entry *entry;
289 entry = rb_entry(parent, struct tree_entry, rb_node);
291 if (bytenr < entry->bytenr)
293 else if (bytenr > entry->bytenr)
299 rb_link_node(node, parent, p);
300 rb_insert_color(node, root);
304 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
306 struct rb_node *n = root->rb_node;
307 struct tree_entry *entry;
310 entry = rb_entry(n, struct tree_entry, rb_node);
312 if (bytenr < entry->bytenr)
314 else if (bytenr > entry->bytenr)
322 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
325 struct btrfs_fs_info *fs_info = NULL;
326 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
329 fs_info = bnode->root->fs_info;
330 btrfs_panic(fs_info, errno,
331 "Inconsistency in backref cache found at offset %llu",
336 * walk up backref nodes until reach node presents tree root
338 static struct backref_node *walk_up_backref(struct backref_node *node,
339 struct backref_edge *edges[],
342 struct backref_edge *edge;
345 while (!list_empty(&node->upper)) {
346 edge = list_entry(node->upper.next,
347 struct backref_edge, list[LOWER]);
349 node = edge->node[UPPER];
351 BUG_ON(node->detached);
357 * walk down backref nodes to find start of next reference path
359 static struct backref_node *walk_down_backref(struct backref_edge *edges[],
362 struct backref_edge *edge;
363 struct backref_node *lower;
367 edge = edges[idx - 1];
368 lower = edge->node[LOWER];
369 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
373 edge = list_entry(edge->list[LOWER].next,
374 struct backref_edge, list[LOWER]);
375 edges[idx - 1] = edge;
377 return edge->node[UPPER];
383 static void unlock_node_buffer(struct backref_node *node)
386 btrfs_tree_unlock(node->eb);
391 static void drop_node_buffer(struct backref_node *node)
394 unlock_node_buffer(node);
395 free_extent_buffer(node->eb);
400 static void drop_backref_node(struct backref_cache *tree,
401 struct backref_node *node)
403 BUG_ON(!list_empty(&node->upper));
405 drop_node_buffer(node);
406 list_del(&node->list);
407 list_del(&node->lower);
408 if (!RB_EMPTY_NODE(&node->rb_node))
409 rb_erase(&node->rb_node, &tree->rb_root);
410 free_backref_node(tree, node);
414 * remove a backref node from the backref cache
416 static void remove_backref_node(struct backref_cache *cache,
417 struct backref_node *node)
419 struct backref_node *upper;
420 struct backref_edge *edge;
425 BUG_ON(!node->lowest && !node->detached);
426 while (!list_empty(&node->upper)) {
427 edge = list_entry(node->upper.next, struct backref_edge,
429 upper = edge->node[UPPER];
430 list_del(&edge->list[LOWER]);
431 list_del(&edge->list[UPPER]);
432 free_backref_edge(cache, edge);
434 if (RB_EMPTY_NODE(&upper->rb_node)) {
435 BUG_ON(!list_empty(&node->upper));
436 drop_backref_node(cache, node);
442 * add the node to leaf node list if no other
443 * child block cached.
445 if (list_empty(&upper->lower)) {
446 list_add_tail(&upper->lower, &cache->leaves);
451 drop_backref_node(cache, node);
454 static void update_backref_node(struct backref_cache *cache,
455 struct backref_node *node, u64 bytenr)
457 struct rb_node *rb_node;
458 rb_erase(&node->rb_node, &cache->rb_root);
459 node->bytenr = bytenr;
460 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
462 backref_tree_panic(rb_node, -EEXIST, bytenr);
466 * update backref cache after a transaction commit
468 static int update_backref_cache(struct btrfs_trans_handle *trans,
469 struct backref_cache *cache)
471 struct backref_node *node;
474 if (cache->last_trans == 0) {
475 cache->last_trans = trans->transid;
479 if (cache->last_trans == trans->transid)
483 * detached nodes are used to avoid unnecessary backref
484 * lookup. transaction commit changes the extent tree.
485 * so the detached nodes are no longer useful.
487 while (!list_empty(&cache->detached)) {
488 node = list_entry(cache->detached.next,
489 struct backref_node, list);
490 remove_backref_node(cache, node);
493 while (!list_empty(&cache->changed)) {
494 node = list_entry(cache->changed.next,
495 struct backref_node, list);
496 list_del_init(&node->list);
497 BUG_ON(node->pending);
498 update_backref_node(cache, node, node->new_bytenr);
502 * some nodes can be left in the pending list if there were
503 * errors during processing the pending nodes.
505 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
506 list_for_each_entry(node, &cache->pending[level], list) {
507 BUG_ON(!node->pending);
508 if (node->bytenr == node->new_bytenr)
510 update_backref_node(cache, node, node->new_bytenr);
514 cache->last_trans = 0;
519 static int should_ignore_root(struct btrfs_root *root)
521 struct btrfs_root *reloc_root;
523 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
526 reloc_root = root->reloc_root;
530 if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
531 root->fs_info->running_transaction->transid - 1)
534 * if there is reloc tree and it was created in previous
535 * transaction backref lookup can find the reloc tree,
536 * so backref node for the fs tree root is useless for
542 * find reloc tree by address of tree root
544 static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
547 struct rb_node *rb_node;
548 struct mapping_node *node;
549 struct btrfs_root *root = NULL;
551 spin_lock(&rc->reloc_root_tree.lock);
552 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
554 node = rb_entry(rb_node, struct mapping_node, rb_node);
555 root = (struct btrfs_root *)node->data;
557 spin_unlock(&rc->reloc_root_tree.lock);
561 static int is_cowonly_root(u64 root_objectid)
563 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
564 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
565 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
566 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
567 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
568 root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
569 root_objectid == BTRFS_UUID_TREE_OBJECTID ||
570 root_objectid == BTRFS_QUOTA_TREE_OBJECTID ||
571 root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
576 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
579 struct btrfs_key key;
581 key.objectid = root_objectid;
582 key.type = BTRFS_ROOT_ITEM_KEY;
583 if (is_cowonly_root(root_objectid))
586 key.offset = (u64)-1;
588 return btrfs_get_fs_root(fs_info, &key, false);
591 static noinline_for_stack
592 int find_inline_backref(struct extent_buffer *leaf, int slot,
593 unsigned long *ptr, unsigned long *end)
595 struct btrfs_key key;
596 struct btrfs_extent_item *ei;
597 struct btrfs_tree_block_info *bi;
600 btrfs_item_key_to_cpu(leaf, &key, slot);
602 item_size = btrfs_item_size_nr(leaf, slot);
603 if (item_size < sizeof(*ei)) {
604 btrfs_print_v0_err(leaf->fs_info);
605 btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
608 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
609 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
610 BTRFS_EXTENT_FLAG_TREE_BLOCK));
612 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
613 item_size <= sizeof(*ei) + sizeof(*bi)) {
614 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
617 if (key.type == BTRFS_METADATA_ITEM_KEY &&
618 item_size <= sizeof(*ei)) {
619 WARN_ON(item_size < sizeof(*ei));
623 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
624 bi = (struct btrfs_tree_block_info *)(ei + 1);
625 *ptr = (unsigned long)(bi + 1);
627 *ptr = (unsigned long)(ei + 1);
629 *end = (unsigned long)ei + item_size;
634 * build backref tree for a given tree block. root of the backref tree
635 * corresponds the tree block, leaves of the backref tree correspond
636 * roots of b-trees that reference the tree block.
638 * the basic idea of this function is check backrefs of a given block
639 * to find upper level blocks that reference the block, and then check
640 * backrefs of these upper level blocks recursively. the recursion stop
641 * when tree root is reached or backrefs for the block is cached.
643 * NOTE: if we find backrefs for a block are cached, we know backrefs
644 * for all upper level blocks that directly/indirectly reference the
645 * block are also cached.
647 static noinline_for_stack
648 struct backref_node *build_backref_tree(struct reloc_control *rc,
649 struct btrfs_key *node_key,
650 int level, u64 bytenr)
652 struct backref_cache *cache = &rc->backref_cache;
653 struct btrfs_path *path1; /* For searching extent root */
654 struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
655 struct extent_buffer *eb;
656 struct btrfs_root *root;
657 struct backref_node *cur;
658 struct backref_node *upper;
659 struct backref_node *lower;
660 struct backref_node *node = NULL;
661 struct backref_node *exist = NULL;
662 struct backref_edge *edge;
663 struct rb_node *rb_node;
664 struct btrfs_key key;
667 LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
672 bool need_check = true;
674 path1 = btrfs_alloc_path();
675 path2 = btrfs_alloc_path();
676 if (!path1 || !path2) {
680 path1->reada = READA_FORWARD;
681 path2->reada = READA_FORWARD;
683 node = alloc_backref_node(cache);
689 node->bytenr = bytenr;
696 key.objectid = cur->bytenr;
697 key.type = BTRFS_METADATA_ITEM_KEY;
698 key.offset = (u64)-1;
700 path1->search_commit_root = 1;
701 path1->skip_locking = 1;
702 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
709 ASSERT(path1->slots[0]);
713 WARN_ON(cur->checked);
714 if (!list_empty(&cur->upper)) {
716 * the backref was added previously when processing
717 * backref of type BTRFS_TREE_BLOCK_REF_KEY
719 ASSERT(list_is_singular(&cur->upper));
720 edge = list_entry(cur->upper.next, struct backref_edge,
722 ASSERT(list_empty(&edge->list[UPPER]));
723 exist = edge->node[UPPER];
725 * add the upper level block to pending list if we need
729 list_add_tail(&edge->list[UPPER], &list);
736 eb = path1->nodes[0];
739 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
740 ret = btrfs_next_leaf(rc->extent_root, path1);
747 eb = path1->nodes[0];
750 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
751 if (key.objectid != cur->bytenr) {
756 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
757 key.type == BTRFS_METADATA_ITEM_KEY) {
758 ret = find_inline_backref(eb, path1->slots[0],
766 /* update key for inline back ref */
767 struct btrfs_extent_inline_ref *iref;
769 iref = (struct btrfs_extent_inline_ref *)ptr;
770 type = btrfs_get_extent_inline_ref_type(eb, iref,
771 BTRFS_REF_TYPE_BLOCK);
772 if (type == BTRFS_REF_TYPE_INVALID) {
777 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
779 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
780 key.type != BTRFS_SHARED_BLOCK_REF_KEY);
784 * Parent node found and matches current inline ref, no need to
785 * rebuild this node for this inline ref.
788 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
789 exist->owner == key.offset) ||
790 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
791 exist->bytenr == key.offset))) {
796 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
797 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
798 if (key.objectid == key.offset) {
800 * Only root blocks of reloc trees use backref
801 * pointing to itself.
803 root = find_reloc_root(rc, cur->bytenr);
809 edge = alloc_backref_edge(cache);
814 rb_node = tree_search(&cache->rb_root, key.offset);
816 upper = alloc_backref_node(cache);
818 free_backref_edge(cache, edge);
822 upper->bytenr = key.offset;
823 upper->level = cur->level + 1;
825 * backrefs for the upper level block isn't
826 * cached, add the block to pending list
828 list_add_tail(&edge->list[UPPER], &list);
830 upper = rb_entry(rb_node, struct backref_node,
832 ASSERT(upper->checked);
833 INIT_LIST_HEAD(&edge->list[UPPER]);
835 list_add_tail(&edge->list[LOWER], &cur->upper);
836 edge->node[LOWER] = cur;
837 edge->node[UPPER] = upper;
840 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
842 btrfs_print_v0_err(rc->extent_root->fs_info);
843 btrfs_handle_fs_error(rc->extent_root->fs_info, err,
846 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
851 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
852 * means the root objectid. We need to search the tree to get
855 root = read_fs_root(rc->extent_root->fs_info, key.offset);
861 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
864 if (btrfs_root_level(&root->root_item) == cur->level) {
866 ASSERT(btrfs_root_bytenr(&root->root_item) ==
868 if (should_ignore_root(root))
869 list_add(&cur->list, &useless);
875 level = cur->level + 1;
877 /* Search the tree to find parent blocks referring the block. */
878 path2->search_commit_root = 1;
879 path2->skip_locking = 1;
880 path2->lowest_level = level;
881 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
882 path2->lowest_level = 0;
887 if (ret > 0 && path2->slots[level] > 0)
888 path2->slots[level]--;
890 eb = path2->nodes[level];
891 if (btrfs_node_blockptr(eb, path2->slots[level]) !=
893 btrfs_err(root->fs_info,
894 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
895 cur->bytenr, level - 1,
896 root->root_key.objectid,
897 node_key->objectid, node_key->type,
905 /* Add all nodes and edges in the path */
906 for (; level < BTRFS_MAX_LEVEL; level++) {
907 if (!path2->nodes[level]) {
908 ASSERT(btrfs_root_bytenr(&root->root_item) ==
910 if (should_ignore_root(root))
911 list_add(&lower->list, &useless);
917 edge = alloc_backref_edge(cache);
923 eb = path2->nodes[level];
924 rb_node = tree_search(&cache->rb_root, eb->start);
926 upper = alloc_backref_node(cache);
928 free_backref_edge(cache, edge);
932 upper->bytenr = eb->start;
933 upper->owner = btrfs_header_owner(eb);
934 upper->level = lower->level + 1;
935 if (!test_bit(BTRFS_ROOT_REF_COWS,
940 * if we know the block isn't shared
941 * we can void checking its backrefs.
943 if (btrfs_block_can_be_shared(root, eb))
949 * add the block to pending list if we
950 * need check its backrefs, we only do this once
951 * while walking up a tree as we will catch
952 * anything else later on.
954 if (!upper->checked && need_check) {
956 list_add_tail(&edge->list[UPPER],
961 INIT_LIST_HEAD(&edge->list[UPPER]);
964 upper = rb_entry(rb_node, struct backref_node,
966 ASSERT(upper->checked);
967 INIT_LIST_HEAD(&edge->list[UPPER]);
969 upper->owner = btrfs_header_owner(eb);
971 list_add_tail(&edge->list[LOWER], &lower->upper);
972 edge->node[LOWER] = lower;
973 edge->node[UPPER] = upper;
980 btrfs_release_path(path2);
983 ptr += btrfs_extent_inline_ref_size(key.type);
993 btrfs_release_path(path1);
998 /* the pending list isn't empty, take the first block to process */
999 if (!list_empty(&list)) {
1000 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1001 list_del_init(&edge->list[UPPER]);
1002 cur = edge->node[UPPER];
1007 * everything goes well, connect backref nodes and insert backref nodes
1010 ASSERT(node->checked);
1011 cowonly = node->cowonly;
1013 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1016 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1017 list_add_tail(&node->lower, &cache->leaves);
1020 list_for_each_entry(edge, &node->upper, list[LOWER])
1021 list_add_tail(&edge->list[UPPER], &list);
1023 while (!list_empty(&list)) {
1024 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1025 list_del_init(&edge->list[UPPER]);
1026 upper = edge->node[UPPER];
1027 if (upper->detached) {
1028 list_del(&edge->list[LOWER]);
1029 lower = edge->node[LOWER];
1030 free_backref_edge(cache, edge);
1031 if (list_empty(&lower->upper))
1032 list_add(&lower->list, &useless);
1036 if (!RB_EMPTY_NODE(&upper->rb_node)) {
1037 if (upper->lowest) {
1038 list_del_init(&upper->lower);
1042 list_add_tail(&edge->list[UPPER], &upper->lower);
1046 if (!upper->checked) {
1048 * Still want to blow up for developers since this is a
1055 if (cowonly != upper->cowonly) {
1062 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1065 backref_tree_panic(rb_node, -EEXIST,
1069 list_add_tail(&edge->list[UPPER], &upper->lower);
1071 list_for_each_entry(edge, &upper->upper, list[LOWER])
1072 list_add_tail(&edge->list[UPPER], &list);
1075 * process useless backref nodes. backref nodes for tree leaves
1076 * are deleted from the cache. backref nodes for upper level
1077 * tree blocks are left in the cache to avoid unnecessary backref
1080 while (!list_empty(&useless)) {
1081 upper = list_entry(useless.next, struct backref_node, list);
1082 list_del_init(&upper->list);
1083 ASSERT(list_empty(&upper->upper));
1086 if (upper->lowest) {
1087 list_del_init(&upper->lower);
1090 while (!list_empty(&upper->lower)) {
1091 edge = list_entry(upper->lower.next,
1092 struct backref_edge, list[UPPER]);
1093 list_del(&edge->list[UPPER]);
1094 list_del(&edge->list[LOWER]);
1095 lower = edge->node[LOWER];
1096 free_backref_edge(cache, edge);
1098 if (list_empty(&lower->upper))
1099 list_add(&lower->list, &useless);
1101 __mark_block_processed(rc, upper);
1102 if (upper->level > 0) {
1103 list_add(&upper->list, &cache->detached);
1104 upper->detached = 1;
1106 rb_erase(&upper->rb_node, &cache->rb_root);
1107 free_backref_node(cache, upper);
1111 btrfs_free_path(path1);
1112 btrfs_free_path(path2);
1114 while (!list_empty(&useless)) {
1115 lower = list_entry(useless.next,
1116 struct backref_node, list);
1117 list_del_init(&lower->list);
1119 while (!list_empty(&list)) {
1120 edge = list_first_entry(&list, struct backref_edge,
1122 list_del(&edge->list[UPPER]);
1123 list_del(&edge->list[LOWER]);
1124 lower = edge->node[LOWER];
1125 upper = edge->node[UPPER];
1126 free_backref_edge(cache, edge);
1129 * Lower is no longer linked to any upper backref nodes
1130 * and isn't in the cache, we can free it ourselves.
1132 if (list_empty(&lower->upper) &&
1133 RB_EMPTY_NODE(&lower->rb_node))
1134 list_add(&lower->list, &useless);
1136 if (!RB_EMPTY_NODE(&upper->rb_node))
1139 /* Add this guy's upper edges to the list to process */
1140 list_for_each_entry(edge, &upper->upper, list[LOWER])
1141 list_add_tail(&edge->list[UPPER], &list);
1142 if (list_empty(&upper->upper))
1143 list_add(&upper->list, &useless);
1146 while (!list_empty(&useless)) {
1147 lower = list_entry(useless.next,
1148 struct backref_node, list);
1149 list_del_init(&lower->list);
1152 free_backref_node(cache, lower);
1155 free_backref_node(cache, node);
1156 return ERR_PTR(err);
1158 ASSERT(!node || !node->detached);
1163 * helper to add backref node for the newly created snapshot.
1164 * the backref node is created by cloning backref node that
1165 * corresponds to root of source tree
1167 static int clone_backref_node(struct btrfs_trans_handle *trans,
1168 struct reloc_control *rc,
1169 struct btrfs_root *src,
1170 struct btrfs_root *dest)
1172 struct btrfs_root *reloc_root = src->reloc_root;
1173 struct backref_cache *cache = &rc->backref_cache;
1174 struct backref_node *node = NULL;
1175 struct backref_node *new_node;
1176 struct backref_edge *edge;
1177 struct backref_edge *new_edge;
1178 struct rb_node *rb_node;
1180 if (cache->last_trans > 0)
1181 update_backref_cache(trans, cache);
1183 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1185 node = rb_entry(rb_node, struct backref_node, rb_node);
1189 BUG_ON(node->new_bytenr != reloc_root->node->start);
1193 rb_node = tree_search(&cache->rb_root,
1194 reloc_root->commit_root->start);
1196 node = rb_entry(rb_node, struct backref_node,
1198 BUG_ON(node->detached);
1205 new_node = alloc_backref_node(cache);
1209 new_node->bytenr = dest->node->start;
1210 new_node->level = node->level;
1211 new_node->lowest = node->lowest;
1212 new_node->checked = 1;
1213 new_node->root = dest;
1215 if (!node->lowest) {
1216 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1217 new_edge = alloc_backref_edge(cache);
1221 new_edge->node[UPPER] = new_node;
1222 new_edge->node[LOWER] = edge->node[LOWER];
1223 list_add_tail(&new_edge->list[UPPER],
1227 list_add_tail(&new_node->lower, &cache->leaves);
1230 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1231 &new_node->rb_node);
1233 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1235 if (!new_node->lowest) {
1236 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1237 list_add_tail(&new_edge->list[LOWER],
1238 &new_edge->node[LOWER]->upper);
1243 while (!list_empty(&new_node->lower)) {
1244 new_edge = list_entry(new_node->lower.next,
1245 struct backref_edge, list[UPPER]);
1246 list_del(&new_edge->list[UPPER]);
1247 free_backref_edge(cache, new_edge);
1249 free_backref_node(cache, new_node);
1254 * helper to add 'address of tree root -> reloc tree' mapping
1256 static int __must_check __add_reloc_root(struct btrfs_root *root)
1258 struct btrfs_fs_info *fs_info = root->fs_info;
1259 struct rb_node *rb_node;
1260 struct mapping_node *node;
1261 struct reloc_control *rc = fs_info->reloc_ctl;
1263 node = kmalloc(sizeof(*node), GFP_NOFS);
1267 node->bytenr = root->node->start;
1270 spin_lock(&rc->reloc_root_tree.lock);
1271 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1272 node->bytenr, &node->rb_node);
1273 spin_unlock(&rc->reloc_root_tree.lock);
1275 btrfs_panic(fs_info, -EEXIST,
1276 "Duplicate root found for start=%llu while inserting into relocation tree",
1280 list_add_tail(&root->root_list, &rc->reloc_roots);
1285 * helper to delete the 'address of tree root -> reloc tree'
1288 static void __del_reloc_root(struct btrfs_root *root)
1290 struct btrfs_fs_info *fs_info = root->fs_info;
1291 struct rb_node *rb_node;
1292 struct mapping_node *node = NULL;
1293 struct reloc_control *rc = fs_info->reloc_ctl;
1295 if (rc && root->node) {
1296 spin_lock(&rc->reloc_root_tree.lock);
1297 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1300 node = rb_entry(rb_node, struct mapping_node, rb_node);
1301 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1303 spin_unlock(&rc->reloc_root_tree.lock);
1306 BUG_ON((struct btrfs_root *)node->data != root);
1309 spin_lock(&fs_info->trans_lock);
1310 list_del_init(&root->root_list);
1311 spin_unlock(&fs_info->trans_lock);
1316 * helper to update the 'address of tree root -> reloc tree'
1319 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
1321 struct btrfs_fs_info *fs_info = root->fs_info;
1322 struct rb_node *rb_node;
1323 struct mapping_node *node = NULL;
1324 struct reloc_control *rc = fs_info->reloc_ctl;
1326 spin_lock(&rc->reloc_root_tree.lock);
1327 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1330 node = rb_entry(rb_node, struct mapping_node, rb_node);
1331 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1333 spin_unlock(&rc->reloc_root_tree.lock);
1337 BUG_ON((struct btrfs_root *)node->data != root);
1339 spin_lock(&rc->reloc_root_tree.lock);
1340 node->bytenr = new_bytenr;
1341 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1342 node->bytenr, &node->rb_node);
1343 spin_unlock(&rc->reloc_root_tree.lock);
1345 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1349 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1350 struct btrfs_root *root, u64 objectid)
1352 struct btrfs_fs_info *fs_info = root->fs_info;
1353 struct btrfs_root *reloc_root;
1354 struct extent_buffer *eb;
1355 struct btrfs_root_item *root_item;
1356 struct btrfs_key root_key;
1359 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1362 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1363 root_key.type = BTRFS_ROOT_ITEM_KEY;
1364 root_key.offset = objectid;
1366 if (root->root_key.objectid == objectid) {
1367 u64 commit_root_gen;
1369 /* called by btrfs_init_reloc_root */
1370 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1371 BTRFS_TREE_RELOC_OBJECTID);
1374 * Set the last_snapshot field to the generation of the commit
1375 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
1376 * correctly (returns true) when the relocation root is created
1377 * either inside the critical section of a transaction commit
1378 * (through transaction.c:qgroup_account_snapshot()) and when
1379 * it's created before the transaction commit is started.
1381 commit_root_gen = btrfs_header_generation(root->commit_root);
1382 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
1385 * called by btrfs_reloc_post_snapshot_hook.
1386 * the source tree is a reloc tree, all tree blocks
1387 * modified after it was created have RELOC flag
1388 * set in their headers. so it's OK to not update
1389 * the 'last_snapshot'.
1391 ret = btrfs_copy_root(trans, root, root->node, &eb,
1392 BTRFS_TREE_RELOC_OBJECTID);
1396 memcpy(root_item, &root->root_item, sizeof(*root_item));
1397 btrfs_set_root_bytenr(root_item, eb->start);
1398 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1399 btrfs_set_root_generation(root_item, trans->transid);
1401 if (root->root_key.objectid == objectid) {
1402 btrfs_set_root_refs(root_item, 0);
1403 memset(&root_item->drop_progress, 0,
1404 sizeof(struct btrfs_disk_key));
1405 root_item->drop_level = 0;
1408 btrfs_tree_unlock(eb);
1409 free_extent_buffer(eb);
1411 ret = btrfs_insert_root(trans, fs_info->tree_root,
1412 &root_key, root_item);
1416 reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
1417 BUG_ON(IS_ERR(reloc_root));
1418 reloc_root->last_trans = trans->transid;
1423 * create reloc tree for a given fs tree. reloc tree is just a
1424 * snapshot of the fs tree with special root objectid.
1426 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1427 struct btrfs_root *root)
1429 struct btrfs_fs_info *fs_info = root->fs_info;
1430 struct btrfs_root *reloc_root;
1431 struct reloc_control *rc = fs_info->reloc_ctl;
1432 struct btrfs_block_rsv *rsv;
1436 if (root->reloc_root) {
1437 reloc_root = root->reloc_root;
1438 reloc_root->last_trans = trans->transid;
1442 if (!rc || !rc->create_reloc_tree ||
1443 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1446 if (!trans->reloc_reserved) {
1447 rsv = trans->block_rsv;
1448 trans->block_rsv = rc->block_rsv;
1451 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1453 trans->block_rsv = rsv;
1455 ret = __add_reloc_root(reloc_root);
1457 root->reloc_root = reloc_root;
1462 * update root item of reloc tree
1464 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1465 struct btrfs_root *root)
1467 struct btrfs_fs_info *fs_info = root->fs_info;
1468 struct btrfs_root *reloc_root;
1469 struct btrfs_root_item *root_item;
1472 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) ||
1476 reloc_root = root->reloc_root;
1477 root_item = &reloc_root->root_item;
1479 /* root->reloc_root will stay until current relocation finished */
1480 if (fs_info->reloc_ctl->merge_reloc_tree &&
1481 btrfs_root_refs(root_item) == 0) {
1482 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1483 __del_reloc_root(reloc_root);
1486 if (reloc_root->commit_root != reloc_root->node) {
1487 btrfs_set_root_node(root_item, reloc_root->node);
1488 free_extent_buffer(reloc_root->commit_root);
1489 reloc_root->commit_root = btrfs_root_node(reloc_root);
1492 ret = btrfs_update_root(trans, fs_info->tree_root,
1493 &reloc_root->root_key, root_item);
1501 * helper to find first cached inode with inode number >= objectid
1504 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1506 struct rb_node *node;
1507 struct rb_node *prev;
1508 struct btrfs_inode *entry;
1509 struct inode *inode;
1511 spin_lock(&root->inode_lock);
1513 node = root->inode_tree.rb_node;
1517 entry = rb_entry(node, struct btrfs_inode, rb_node);
1519 if (objectid < btrfs_ino(entry))
1520 node = node->rb_left;
1521 else if (objectid > btrfs_ino(entry))
1522 node = node->rb_right;
1528 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1529 if (objectid <= btrfs_ino(entry)) {
1533 prev = rb_next(prev);
1537 entry = rb_entry(node, struct btrfs_inode, rb_node);
1538 inode = igrab(&entry->vfs_inode);
1540 spin_unlock(&root->inode_lock);
1544 objectid = btrfs_ino(entry) + 1;
1545 if (cond_resched_lock(&root->inode_lock))
1548 node = rb_next(node);
1550 spin_unlock(&root->inode_lock);
1554 static int in_block_group(u64 bytenr,
1555 struct btrfs_block_group_cache *block_group)
1557 if (bytenr >= block_group->key.objectid &&
1558 bytenr < block_group->key.objectid + block_group->key.offset)
1564 * get new location of data
1566 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1567 u64 bytenr, u64 num_bytes)
1569 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1570 struct btrfs_path *path;
1571 struct btrfs_file_extent_item *fi;
1572 struct extent_buffer *leaf;
1575 path = btrfs_alloc_path();
1579 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1580 ret = btrfs_lookup_file_extent(NULL, root, path,
1581 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1589 leaf = path->nodes[0];
1590 fi = btrfs_item_ptr(leaf, path->slots[0],
1591 struct btrfs_file_extent_item);
1593 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1594 btrfs_file_extent_compression(leaf, fi) ||
1595 btrfs_file_extent_encryption(leaf, fi) ||
1596 btrfs_file_extent_other_encoding(leaf, fi));
1598 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1603 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1606 btrfs_free_path(path);
1611 * update file extent items in the tree leaf to point to
1612 * the new locations.
1614 static noinline_for_stack
1615 int replace_file_extents(struct btrfs_trans_handle *trans,
1616 struct reloc_control *rc,
1617 struct btrfs_root *root,
1618 struct extent_buffer *leaf)
1620 struct btrfs_fs_info *fs_info = root->fs_info;
1621 struct btrfs_key key;
1622 struct btrfs_file_extent_item *fi;
1623 struct inode *inode = NULL;
1635 if (rc->stage != UPDATE_DATA_PTRS)
1638 /* reloc trees always use full backref */
1639 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1640 parent = leaf->start;
1644 nritems = btrfs_header_nritems(leaf);
1645 for (i = 0; i < nritems; i++) {
1647 btrfs_item_key_to_cpu(leaf, &key, i);
1648 if (key.type != BTRFS_EXTENT_DATA_KEY)
1650 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1651 if (btrfs_file_extent_type(leaf, fi) ==
1652 BTRFS_FILE_EXTENT_INLINE)
1654 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1655 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1658 if (!in_block_group(bytenr, rc->block_group))
1662 * if we are modifying block in fs tree, wait for readpage
1663 * to complete and drop the extent cache
1665 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1667 inode = find_next_inode(root, key.objectid);
1669 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1670 btrfs_add_delayed_iput(inode);
1671 inode = find_next_inode(root, key.objectid);
1673 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1675 btrfs_file_extent_num_bytes(leaf, fi);
1676 WARN_ON(!IS_ALIGNED(key.offset,
1677 fs_info->sectorsize));
1678 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1680 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1685 btrfs_drop_extent_cache(BTRFS_I(inode),
1686 key.offset, end, 1);
1687 unlock_extent(&BTRFS_I(inode)->io_tree,
1692 ret = get_new_location(rc->data_inode, &new_bytenr,
1696 * Don't have to abort since we've not changed anything
1697 * in the file extent yet.
1702 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1705 key.offset -= btrfs_file_extent_offset(leaf, fi);
1706 ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
1708 btrfs_header_owner(leaf),
1709 key.objectid, key.offset);
1711 btrfs_abort_transaction(trans, ret);
1715 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1716 parent, btrfs_header_owner(leaf),
1717 key.objectid, key.offset);
1719 btrfs_abort_transaction(trans, ret);
1724 btrfs_mark_buffer_dirty(leaf);
1726 btrfs_add_delayed_iput(inode);
1730 static noinline_for_stack
1731 int memcmp_node_keys(struct extent_buffer *eb, int slot,
1732 struct btrfs_path *path, int level)
1734 struct btrfs_disk_key key1;
1735 struct btrfs_disk_key key2;
1736 btrfs_node_key(eb, &key1, slot);
1737 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1738 return memcmp(&key1, &key2, sizeof(key1));
1742 * try to replace tree blocks in fs tree with the new blocks
1743 * in reloc tree. tree blocks haven't been modified since the
1744 * reloc tree was create can be replaced.
1746 * if a block was replaced, level of the block + 1 is returned.
1747 * if no block got replaced, 0 is returned. if there are other
1748 * errors, a negative error number is returned.
1750 static noinline_for_stack
1751 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1752 struct btrfs_root *dest, struct btrfs_root *src,
1753 struct btrfs_path *path, struct btrfs_key *next_key,
1754 int lowest_level, int max_level)
1756 struct btrfs_fs_info *fs_info = dest->fs_info;
1757 struct extent_buffer *eb;
1758 struct extent_buffer *parent;
1759 struct btrfs_key key;
1771 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1772 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1774 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1776 slot = path->slots[lowest_level];
1777 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1779 eb = btrfs_lock_root_node(dest);
1780 btrfs_set_lock_blocking_write(eb);
1781 level = btrfs_header_level(eb);
1783 if (level < lowest_level) {
1784 btrfs_tree_unlock(eb);
1785 free_extent_buffer(eb);
1790 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1793 btrfs_set_lock_blocking_write(eb);
1796 next_key->objectid = (u64)-1;
1797 next_key->type = (u8)-1;
1798 next_key->offset = (u64)-1;
1803 struct btrfs_key first_key;
1805 level = btrfs_header_level(parent);
1806 BUG_ON(level < lowest_level);
1808 ret = btrfs_bin_search(parent, &key, level, &slot);
1809 if (ret && slot > 0)
1812 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1813 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1815 old_bytenr = btrfs_node_blockptr(parent, slot);
1816 blocksize = fs_info->nodesize;
1817 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1818 btrfs_node_key_to_cpu(parent, &first_key, slot);
1820 if (level <= max_level) {
1821 eb = path->nodes[level];
1822 new_bytenr = btrfs_node_blockptr(eb,
1823 path->slots[level]);
1824 new_ptr_gen = btrfs_node_ptr_generation(eb,
1825 path->slots[level]);
1831 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1836 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1837 memcmp_node_keys(parent, slot, path, level)) {
1838 if (level <= lowest_level) {
1843 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
1844 level - 1, &first_key);
1848 } else if (!extent_buffer_uptodate(eb)) {
1850 free_extent_buffer(eb);
1853 btrfs_tree_lock(eb);
1855 ret = btrfs_cow_block(trans, dest, eb, parent,
1859 btrfs_set_lock_blocking_write(eb);
1861 btrfs_tree_unlock(parent);
1862 free_extent_buffer(parent);
1869 btrfs_tree_unlock(parent);
1870 free_extent_buffer(parent);
1875 btrfs_node_key_to_cpu(path->nodes[level], &key,
1876 path->slots[level]);
1877 btrfs_release_path(path);
1879 path->lowest_level = level;
1880 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1881 path->lowest_level = 0;
1885 * Info qgroup to trace both subtrees.
1887 * We must trace both trees.
1888 * 1) Tree reloc subtree
1889 * If not traced, we will leak data numbers
1891 * If not traced, we will double count old data
1893 * We don't scan the subtree right now, but only record
1894 * the swapped tree blocks.
1895 * The real subtree rescan is delayed until we have new
1896 * CoW on the subtree root node before transaction commit.
1898 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1899 rc->block_group, parent, slot,
1900 path->nodes[level], path->slots[level],
1905 * swap blocks in fs tree and reloc tree.
1907 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1908 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1909 btrfs_mark_buffer_dirty(parent);
1911 btrfs_set_node_blockptr(path->nodes[level],
1912 path->slots[level], old_bytenr);
1913 btrfs_set_node_ptr_generation(path->nodes[level],
1914 path->slots[level], old_ptr_gen);
1915 btrfs_mark_buffer_dirty(path->nodes[level]);
1917 ret = btrfs_inc_extent_ref(trans, src, old_bytenr,
1918 blocksize, path->nodes[level]->start,
1919 src->root_key.objectid, level - 1, 0);
1921 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr,
1922 blocksize, 0, dest->root_key.objectid,
1926 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
1927 path->nodes[level]->start,
1928 src->root_key.objectid, level - 1, 0);
1931 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
1932 0, dest->root_key.objectid, level - 1,
1936 btrfs_unlock_up_safe(path, 0);
1941 btrfs_tree_unlock(parent);
1942 free_extent_buffer(parent);
1947 * helper to find next relocated block in reloc tree
1949 static noinline_for_stack
1950 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1953 struct extent_buffer *eb;
1958 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1960 for (i = 0; i < *level; i++) {
1961 free_extent_buffer(path->nodes[i]);
1962 path->nodes[i] = NULL;
1965 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1966 eb = path->nodes[i];
1967 nritems = btrfs_header_nritems(eb);
1968 while (path->slots[i] + 1 < nritems) {
1970 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1977 free_extent_buffer(path->nodes[i]);
1978 path->nodes[i] = NULL;
1984 * walk down reloc tree to find relocated block of lowest level
1986 static noinline_for_stack
1987 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1990 struct btrfs_fs_info *fs_info = root->fs_info;
1991 struct extent_buffer *eb = NULL;
1998 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2000 for (i = *level; i > 0; i--) {
2001 struct btrfs_key first_key;
2003 eb = path->nodes[i];
2004 nritems = btrfs_header_nritems(eb);
2005 while (path->slots[i] < nritems) {
2006 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
2007 if (ptr_gen > last_snapshot)
2011 if (path->slots[i] >= nritems) {
2022 bytenr = btrfs_node_blockptr(eb, path->slots[i]);
2023 btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
2024 eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
2028 } else if (!extent_buffer_uptodate(eb)) {
2029 free_extent_buffer(eb);
2032 BUG_ON(btrfs_header_level(eb) != i - 1);
2033 path->nodes[i - 1] = eb;
2034 path->slots[i - 1] = 0;
2040 * invalidate extent cache for file extents whose key in range of
2041 * [min_key, max_key)
2043 static int invalidate_extent_cache(struct btrfs_root *root,
2044 struct btrfs_key *min_key,
2045 struct btrfs_key *max_key)
2047 struct btrfs_fs_info *fs_info = root->fs_info;
2048 struct inode *inode = NULL;
2053 objectid = min_key->objectid;
2058 if (objectid > max_key->objectid)
2061 inode = find_next_inode(root, objectid);
2064 ino = btrfs_ino(BTRFS_I(inode));
2066 if (ino > max_key->objectid) {
2072 if (!S_ISREG(inode->i_mode))
2075 if (unlikely(min_key->objectid == ino)) {
2076 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
2078 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
2081 start = min_key->offset;
2082 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
2088 if (unlikely(max_key->objectid == ino)) {
2089 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
2091 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
2094 if (max_key->offset == 0)
2096 end = max_key->offset;
2097 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
2104 /* the lock_extent waits for readpage to complete */
2105 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2106 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
2107 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2112 static int find_next_key(struct btrfs_path *path, int level,
2113 struct btrfs_key *key)
2116 while (level < BTRFS_MAX_LEVEL) {
2117 if (!path->nodes[level])
2119 if (path->slots[level] + 1 <
2120 btrfs_header_nritems(path->nodes[level])) {
2121 btrfs_node_key_to_cpu(path->nodes[level], key,
2122 path->slots[level] + 1);
2131 * Insert current subvolume into reloc_control::dirty_subvol_roots
2133 static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
2134 struct reloc_control *rc,
2135 struct btrfs_root *root)
2137 struct btrfs_root *reloc_root = root->reloc_root;
2138 struct btrfs_root_item *reloc_root_item;
2140 /* @root must be a subvolume tree root with a valid reloc tree */
2141 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
2144 reloc_root_item = &reloc_root->root_item;
2145 memset(&reloc_root_item->drop_progress, 0,
2146 sizeof(reloc_root_item->drop_progress));
2147 reloc_root_item->drop_level = 0;
2148 btrfs_set_root_refs(reloc_root_item, 0);
2149 btrfs_update_reloc_root(trans, root);
2151 if (list_empty(&root->reloc_dirty_list)) {
2152 btrfs_grab_fs_root(root);
2153 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
2157 static int clean_dirty_subvols(struct reloc_control *rc)
2159 struct btrfs_root *root;
2160 struct btrfs_root *next;
2163 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
2165 struct btrfs_root *reloc_root = root->reloc_root;
2167 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
2168 list_del_init(&root->reloc_dirty_list);
2169 root->reloc_root = NULL;
2173 ret2 = btrfs_drop_snapshot(reloc_root, NULL, 0, 1);
2174 if (ret2 < 0 && !ret)
2177 btrfs_put_fs_root(root);
2183 * merge the relocated tree blocks in reloc tree with corresponding
2186 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2187 struct btrfs_root *root)
2189 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2190 struct btrfs_key key;
2191 struct btrfs_key next_key;
2192 struct btrfs_trans_handle *trans = NULL;
2193 struct btrfs_root *reloc_root;
2194 struct btrfs_root_item *root_item;
2195 struct btrfs_path *path;
2196 struct extent_buffer *leaf;
2204 path = btrfs_alloc_path();
2207 path->reada = READA_FORWARD;
2209 reloc_root = root->reloc_root;
2210 root_item = &reloc_root->root_item;
2212 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2213 level = btrfs_root_level(root_item);
2214 extent_buffer_get(reloc_root->node);
2215 path->nodes[level] = reloc_root->node;
2216 path->slots[level] = 0;
2218 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2220 level = root_item->drop_level;
2222 path->lowest_level = level;
2223 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
2224 path->lowest_level = 0;
2226 btrfs_free_path(path);
2230 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2231 path->slots[level]);
2232 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2234 btrfs_unlock_up_safe(path, 0);
2237 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2238 memset(&next_key, 0, sizeof(next_key));
2241 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2242 BTRFS_RESERVE_FLUSH_ALL);
2247 trans = btrfs_start_transaction(root, 0);
2248 if (IS_ERR(trans)) {
2249 err = PTR_ERR(trans);
2253 trans->block_rsv = rc->block_rsv;
2258 ret = walk_down_reloc_tree(reloc_root, path, &level);
2266 if (!find_next_key(path, level, &key) &&
2267 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2270 ret = replace_path(trans, rc, root, reloc_root, path,
2271 &next_key, level, max_level);
2280 btrfs_node_key_to_cpu(path->nodes[level], &key,
2281 path->slots[level]);
2285 ret = walk_up_reloc_tree(reloc_root, path, &level);
2291 * save the merging progress in the drop_progress.
2292 * this is OK since root refs == 1 in this case.
2294 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2295 path->slots[level]);
2296 root_item->drop_level = level;
2298 btrfs_end_transaction_throttle(trans);
2301 btrfs_btree_balance_dirty(fs_info);
2303 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2304 invalidate_extent_cache(root, &key, &next_key);
2308 * handle the case only one block in the fs tree need to be
2309 * relocated and the block is tree root.
2311 leaf = btrfs_lock_root_node(root);
2312 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2313 btrfs_tree_unlock(leaf);
2314 free_extent_buffer(leaf);
2318 btrfs_free_path(path);
2321 insert_dirty_subvol(trans, rc, root);
2324 btrfs_end_transaction_throttle(trans);
2326 btrfs_btree_balance_dirty(fs_info);
2328 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2329 invalidate_extent_cache(root, &key, &next_key);
2334 static noinline_for_stack
2335 int prepare_to_merge(struct reloc_control *rc, int err)
2337 struct btrfs_root *root = rc->extent_root;
2338 struct btrfs_fs_info *fs_info = root->fs_info;
2339 struct btrfs_root *reloc_root;
2340 struct btrfs_trans_handle *trans;
2341 LIST_HEAD(reloc_roots);
2345 mutex_lock(&fs_info->reloc_mutex);
2346 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2347 rc->merging_rsv_size += rc->nodes_relocated * 2;
2348 mutex_unlock(&fs_info->reloc_mutex);
2352 num_bytes = rc->merging_rsv_size;
2353 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2354 BTRFS_RESERVE_FLUSH_ALL);
2359 trans = btrfs_join_transaction(rc->extent_root);
2360 if (IS_ERR(trans)) {
2362 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2364 return PTR_ERR(trans);
2368 if (num_bytes != rc->merging_rsv_size) {
2369 btrfs_end_transaction(trans);
2370 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2376 rc->merge_reloc_tree = 1;
2378 while (!list_empty(&rc->reloc_roots)) {
2379 reloc_root = list_entry(rc->reloc_roots.next,
2380 struct btrfs_root, root_list);
2381 list_del_init(&reloc_root->root_list);
2383 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2384 BUG_ON(IS_ERR(root));
2385 BUG_ON(root->reloc_root != reloc_root);
2388 * set reference count to 1, so btrfs_recover_relocation
2389 * knows it should resumes merging
2392 btrfs_set_root_refs(&reloc_root->root_item, 1);
2393 btrfs_update_reloc_root(trans, root);
2395 list_add(&reloc_root->root_list, &reloc_roots);
2398 list_splice(&reloc_roots, &rc->reloc_roots);
2401 btrfs_commit_transaction(trans);
2403 btrfs_end_transaction(trans);
2407 static noinline_for_stack
2408 void free_reloc_roots(struct list_head *list)
2410 struct btrfs_root *reloc_root;
2412 while (!list_empty(list)) {
2413 reloc_root = list_entry(list->next, struct btrfs_root,
2415 __del_reloc_root(reloc_root);
2416 free_extent_buffer(reloc_root->node);
2417 free_extent_buffer(reloc_root->commit_root);
2418 reloc_root->node = NULL;
2419 reloc_root->commit_root = NULL;
2423 static noinline_for_stack
2424 void merge_reloc_roots(struct reloc_control *rc)
2426 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2427 struct btrfs_root *root;
2428 struct btrfs_root *reloc_root;
2429 LIST_HEAD(reloc_roots);
2433 root = rc->extent_root;
2436 * this serializes us with btrfs_record_root_in_transaction,
2437 * we have to make sure nobody is in the middle of
2438 * adding their roots to the list while we are
2441 mutex_lock(&fs_info->reloc_mutex);
2442 list_splice_init(&rc->reloc_roots, &reloc_roots);
2443 mutex_unlock(&fs_info->reloc_mutex);
2445 while (!list_empty(&reloc_roots)) {
2447 reloc_root = list_entry(reloc_roots.next,
2448 struct btrfs_root, root_list);
2450 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2451 root = read_fs_root(fs_info,
2452 reloc_root->root_key.offset);
2453 BUG_ON(IS_ERR(root));
2454 BUG_ON(root->reloc_root != reloc_root);
2456 ret = merge_reloc_root(rc, root);
2458 if (list_empty(&reloc_root->root_list))
2459 list_add_tail(&reloc_root->root_list,
2464 list_del_init(&reloc_root->root_list);
2474 btrfs_handle_fs_error(fs_info, ret, NULL);
2475 if (!list_empty(&reloc_roots))
2476 free_reloc_roots(&reloc_roots);
2478 /* new reloc root may be added */
2479 mutex_lock(&fs_info->reloc_mutex);
2480 list_splice_init(&rc->reloc_roots, &reloc_roots);
2481 mutex_unlock(&fs_info->reloc_mutex);
2482 if (!list_empty(&reloc_roots))
2483 free_reloc_roots(&reloc_roots);
2486 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2489 static void free_block_list(struct rb_root *blocks)
2491 struct tree_block *block;
2492 struct rb_node *rb_node;
2493 while ((rb_node = rb_first(blocks))) {
2494 block = rb_entry(rb_node, struct tree_block, rb_node);
2495 rb_erase(rb_node, blocks);
2500 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2501 struct btrfs_root *reloc_root)
2503 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2504 struct btrfs_root *root;
2506 if (reloc_root->last_trans == trans->transid)
2509 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2510 BUG_ON(IS_ERR(root));
2511 BUG_ON(root->reloc_root != reloc_root);
2513 return btrfs_record_root_in_trans(trans, root);
2516 static noinline_for_stack
2517 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2518 struct reloc_control *rc,
2519 struct backref_node *node,
2520 struct backref_edge *edges[])
2522 struct backref_node *next;
2523 struct btrfs_root *root;
2529 next = walk_up_backref(next, edges, &index);
2532 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
2534 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2535 record_reloc_root_in_trans(trans, root);
2539 btrfs_record_root_in_trans(trans, root);
2540 root = root->reloc_root;
2542 if (next->new_bytenr != root->node->start) {
2543 BUG_ON(next->new_bytenr);
2544 BUG_ON(!list_empty(&next->list));
2545 next->new_bytenr = root->node->start;
2547 list_add_tail(&next->list,
2548 &rc->backref_cache.changed);
2549 __mark_block_processed(rc, next);
2555 next = walk_down_backref(edges, &index);
2556 if (!next || next->level <= node->level)
2563 /* setup backref node path for btrfs_reloc_cow_block */
2565 rc->backref_cache.path[next->level] = next;
2568 next = edges[index]->node[UPPER];
2574 * select a tree root for relocation. return NULL if the block
2575 * is reference counted. we should use do_relocation() in this
2576 * case. return a tree root pointer if the block isn't reference
2577 * counted. return -ENOENT if the block is root of reloc tree.
2579 static noinline_for_stack
2580 struct btrfs_root *select_one_root(struct backref_node *node)
2582 struct backref_node *next;
2583 struct btrfs_root *root;
2584 struct btrfs_root *fs_root = NULL;
2585 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2591 next = walk_up_backref(next, edges, &index);
2595 /* no other choice for non-references counted tree */
2596 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
2599 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2605 next = walk_down_backref(edges, &index);
2606 if (!next || next->level <= node->level)
2611 return ERR_PTR(-ENOENT);
2615 static noinline_for_stack
2616 u64 calcu_metadata_size(struct reloc_control *rc,
2617 struct backref_node *node, int reserve)
2619 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2620 struct backref_node *next = node;
2621 struct backref_edge *edge;
2622 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2626 BUG_ON(reserve && node->processed);
2631 if (next->processed && (reserve || next != node))
2634 num_bytes += fs_info->nodesize;
2636 if (list_empty(&next->upper))
2639 edge = list_entry(next->upper.next,
2640 struct backref_edge, list[LOWER]);
2641 edges[index++] = edge;
2642 next = edge->node[UPPER];
2644 next = walk_down_backref(edges, &index);
2649 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2650 struct reloc_control *rc,
2651 struct backref_node *node)
2653 struct btrfs_root *root = rc->extent_root;
2654 struct btrfs_fs_info *fs_info = root->fs_info;
2659 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2661 trans->block_rsv = rc->block_rsv;
2662 rc->reserved_bytes += num_bytes;
2665 * We are under a transaction here so we can only do limited flushing.
2666 * If we get an enospc just kick back -EAGAIN so we know to drop the
2667 * transaction and try to refill when we can flush all the things.
2669 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
2670 BTRFS_RESERVE_FLUSH_LIMIT);
2672 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2673 while (tmp <= rc->reserved_bytes)
2676 * only one thread can access block_rsv at this point,
2677 * so we don't need hold lock to protect block_rsv.
2678 * we expand more reservation size here to allow enough
2679 * space for relocation and we will return earlier in
2682 rc->block_rsv->size = tmp + fs_info->nodesize *
2683 RELOCATION_RESERVED_NODES;
2691 * relocate a block tree, and then update pointers in upper level
2692 * blocks that reference the block to point to the new location.
2694 * if called by link_to_upper, the block has already been relocated.
2695 * in that case this function just updates pointers.
2697 static int do_relocation(struct btrfs_trans_handle *trans,
2698 struct reloc_control *rc,
2699 struct backref_node *node,
2700 struct btrfs_key *key,
2701 struct btrfs_path *path, int lowest)
2703 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2704 struct backref_node *upper;
2705 struct backref_edge *edge;
2706 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2707 struct btrfs_root *root;
2708 struct extent_buffer *eb;
2716 BUG_ON(lowest && node->eb);
2718 path->lowest_level = node->level + 1;
2719 rc->backref_cache.path[node->level] = node;
2720 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2721 struct btrfs_key first_key;
2725 upper = edge->node[UPPER];
2726 root = select_reloc_root(trans, rc, upper, edges);
2729 if (upper->eb && !upper->locked) {
2731 ret = btrfs_bin_search(upper->eb, key,
2732 upper->level, &slot);
2734 bytenr = btrfs_node_blockptr(upper->eb, slot);
2735 if (node->eb->start == bytenr)
2738 drop_node_buffer(upper);
2742 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2749 btrfs_release_path(path);
2754 upper->eb = path->nodes[upper->level];
2755 path->nodes[upper->level] = NULL;
2757 BUG_ON(upper->eb != path->nodes[upper->level]);
2761 path->locks[upper->level] = 0;
2763 slot = path->slots[upper->level];
2764 btrfs_release_path(path);
2766 ret = btrfs_bin_search(upper->eb, key, upper->level,
2771 bytenr = btrfs_node_blockptr(upper->eb, slot);
2773 if (bytenr != node->bytenr) {
2774 btrfs_err(root->fs_info,
2775 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2776 bytenr, node->bytenr, slot,
2782 if (node->eb->start == bytenr)
2786 blocksize = root->fs_info->nodesize;
2787 generation = btrfs_node_ptr_generation(upper->eb, slot);
2788 btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
2789 eb = read_tree_block(fs_info, bytenr, generation,
2790 upper->level - 1, &first_key);
2794 } else if (!extent_buffer_uptodate(eb)) {
2795 free_extent_buffer(eb);
2799 btrfs_tree_lock(eb);
2800 btrfs_set_lock_blocking_write(eb);
2803 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2805 btrfs_tree_unlock(eb);
2806 free_extent_buffer(eb);
2811 BUG_ON(node->eb != eb);
2813 btrfs_set_node_blockptr(upper->eb, slot,
2815 btrfs_set_node_ptr_generation(upper->eb, slot,
2817 btrfs_mark_buffer_dirty(upper->eb);
2819 ret = btrfs_inc_extent_ref(trans, root,
2820 node->eb->start, blocksize,
2822 btrfs_header_owner(upper->eb),
2826 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2830 if (!upper->pending)
2831 drop_node_buffer(upper);
2833 unlock_node_buffer(upper);
2838 if (!err && node->pending) {
2839 drop_node_buffer(node);
2840 list_move_tail(&node->list, &rc->backref_cache.changed);
2844 path->lowest_level = 0;
2845 BUG_ON(err == -ENOSPC);
2849 static int link_to_upper(struct btrfs_trans_handle *trans,
2850 struct reloc_control *rc,
2851 struct backref_node *node,
2852 struct btrfs_path *path)
2854 struct btrfs_key key;
2856 btrfs_node_key_to_cpu(node->eb, &key, 0);
2857 return do_relocation(trans, rc, node, &key, path, 0);
2860 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2861 struct reloc_control *rc,
2862 struct btrfs_path *path, int err)
2865 struct backref_cache *cache = &rc->backref_cache;
2866 struct backref_node *node;
2870 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2871 while (!list_empty(&cache->pending[level])) {
2872 node = list_entry(cache->pending[level].next,
2873 struct backref_node, list);
2874 list_move_tail(&node->list, &list);
2875 BUG_ON(!node->pending);
2878 ret = link_to_upper(trans, rc, node, path);
2883 list_splice_init(&list, &cache->pending[level]);
2888 static void mark_block_processed(struct reloc_control *rc,
2889 u64 bytenr, u32 blocksize)
2891 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2895 static void __mark_block_processed(struct reloc_control *rc,
2896 struct backref_node *node)
2899 if (node->level == 0 ||
2900 in_block_group(node->bytenr, rc->block_group)) {
2901 blocksize = rc->extent_root->fs_info->nodesize;
2902 mark_block_processed(rc, node->bytenr, blocksize);
2904 node->processed = 1;
2908 * mark a block and all blocks directly/indirectly reference the block
2911 static void update_processed_blocks(struct reloc_control *rc,
2912 struct backref_node *node)
2914 struct backref_node *next = node;
2915 struct backref_edge *edge;
2916 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2922 if (next->processed)
2925 __mark_block_processed(rc, next);
2927 if (list_empty(&next->upper))
2930 edge = list_entry(next->upper.next,
2931 struct backref_edge, list[LOWER]);
2932 edges[index++] = edge;
2933 next = edge->node[UPPER];
2935 next = walk_down_backref(edges, &index);
2939 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2941 u32 blocksize = rc->extent_root->fs_info->nodesize;
2943 if (test_range_bit(&rc->processed_blocks, bytenr,
2944 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2949 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2950 struct tree_block *block)
2952 struct extent_buffer *eb;
2954 BUG_ON(block->key_ready);
2955 eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
2956 block->level, NULL);
2959 } else if (!extent_buffer_uptodate(eb)) {
2960 free_extent_buffer(eb);
2963 if (block->level == 0)
2964 btrfs_item_key_to_cpu(eb, &block->key, 0);
2966 btrfs_node_key_to_cpu(eb, &block->key, 0);
2967 free_extent_buffer(eb);
2968 block->key_ready = 1;
2973 * helper function to relocate a tree block
2975 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2976 struct reloc_control *rc,
2977 struct backref_node *node,
2978 struct btrfs_key *key,
2979 struct btrfs_path *path)
2981 struct btrfs_root *root;
2987 BUG_ON(node->processed);
2988 root = select_one_root(node);
2989 if (root == ERR_PTR(-ENOENT)) {
2990 update_processed_blocks(rc, node);
2994 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
2995 ret = reserve_metadata_space(trans, rc, node);
3001 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
3002 BUG_ON(node->new_bytenr);
3003 BUG_ON(!list_empty(&node->list));
3004 btrfs_record_root_in_trans(trans, root);
3005 root = root->reloc_root;
3006 node->new_bytenr = root->node->start;
3008 list_add_tail(&node->list, &rc->backref_cache.changed);
3010 path->lowest_level = node->level;
3011 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
3012 btrfs_release_path(path);
3017 update_processed_blocks(rc, node);
3019 ret = do_relocation(trans, rc, node, key, path, 1);
3022 if (ret || node->level == 0 || node->cowonly)
3023 remove_backref_node(&rc->backref_cache, node);
3028 * relocate a list of blocks
3030 static noinline_for_stack
3031 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
3032 struct reloc_control *rc, struct rb_root *blocks)
3034 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3035 struct backref_node *node;
3036 struct btrfs_path *path;
3037 struct tree_block *block;
3038 struct tree_block *next;
3042 path = btrfs_alloc_path();
3045 goto out_free_blocks;
3048 /* Kick in readahead for tree blocks with missing keys */
3049 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3050 if (!block->key_ready)
3051 readahead_tree_block(fs_info, block->bytenr);
3054 /* Get first keys */
3055 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3056 if (!block->key_ready) {
3057 err = get_tree_block_key(fs_info, block);
3063 /* Do tree relocation */
3064 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3065 node = build_backref_tree(rc, &block->key,
3066 block->level, block->bytenr);
3068 err = PTR_ERR(node);
3072 ret = relocate_tree_block(trans, rc, node, &block->key,
3075 if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
3081 err = finish_pending_nodes(trans, rc, path, err);
3084 btrfs_free_path(path);
3086 free_block_list(blocks);
3090 static noinline_for_stack
3091 int prealloc_file_extent_cluster(struct inode *inode,
3092 struct file_extent_cluster *cluster)
3097 u64 offset = BTRFS_I(inode)->index_cnt;
3101 u64 prealloc_start = cluster->start - offset;
3102 u64 prealloc_end = cluster->end - offset;
3104 struct extent_changeset *data_reserved = NULL;
3106 BUG_ON(cluster->start != cluster->boundary[0]);
3109 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
3110 prealloc_end + 1 - prealloc_start);
3114 cur_offset = prealloc_start;
3115 while (nr < cluster->nr) {
3116 start = cluster->boundary[nr] - offset;
3117 if (nr + 1 < cluster->nr)
3118 end = cluster->boundary[nr + 1] - 1 - offset;
3120 end = cluster->end - offset;
3122 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3123 num_bytes = end + 1 - start;
3124 if (cur_offset < start)
3125 btrfs_free_reserved_data_space(inode, data_reserved,
3126 cur_offset, start - cur_offset);
3127 ret = btrfs_prealloc_file_range(inode, 0, start,
3128 num_bytes, num_bytes,
3129 end + 1, &alloc_hint);
3130 cur_offset = end + 1;
3131 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3136 if (cur_offset < prealloc_end)
3137 btrfs_free_reserved_data_space(inode, data_reserved,
3138 cur_offset, prealloc_end + 1 - cur_offset);
3140 inode_unlock(inode);
3141 extent_changeset_free(data_reserved);
3145 static noinline_for_stack
3146 int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
3149 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3150 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3151 struct extent_map *em;
3154 em = alloc_extent_map();
3159 em->len = end + 1 - start;
3160 em->block_len = em->len;
3161 em->block_start = block_start;
3162 em->bdev = fs_info->fs_devices->latest_bdev;
3163 set_bit(EXTENT_FLAG_PINNED, &em->flags);
3165 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3167 write_lock(&em_tree->lock);
3168 ret = add_extent_mapping(em_tree, em, 0);
3169 write_unlock(&em_tree->lock);
3170 if (ret != -EEXIST) {
3171 free_extent_map(em);
3174 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3176 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3180 static int relocate_file_extent_cluster(struct inode *inode,
3181 struct file_extent_cluster *cluster)
3183 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3186 u64 offset = BTRFS_I(inode)->index_cnt;
3187 unsigned long index;
3188 unsigned long last_index;
3190 struct file_ra_state *ra;
3191 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
3198 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3202 ret = prealloc_file_extent_cluster(inode, cluster);
3206 file_ra_state_init(ra, inode->i_mapping);
3208 ret = setup_extent_mapping(inode, cluster->start - offset,
3209 cluster->end - offset, cluster->start);
3213 index = (cluster->start - offset) >> PAGE_SHIFT;
3214 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3215 while (index <= last_index) {
3216 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3221 page = find_lock_page(inode->i_mapping, index);
3223 page_cache_sync_readahead(inode->i_mapping,
3225 last_index + 1 - index);
3226 page = find_or_create_page(inode->i_mapping, index,
3229 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3236 if (PageReadahead(page)) {
3237 page_cache_async_readahead(inode->i_mapping,
3238 ra, NULL, page, index,
3239 last_index + 1 - index);
3242 if (!PageUptodate(page)) {
3243 btrfs_readpage(NULL, page);
3245 if (!PageUptodate(page)) {
3248 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3250 btrfs_delalloc_release_extents(BTRFS_I(inode),
3257 page_start = page_offset(page);
3258 page_end = page_start + PAGE_SIZE - 1;
3260 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3262 set_page_extent_mapped(page);
3264 if (nr < cluster->nr &&
3265 page_start + offset == cluster->boundary[nr]) {
3266 set_extent_bits(&BTRFS_I(inode)->io_tree,
3267 page_start, page_end,
3272 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
3277 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3279 btrfs_delalloc_release_extents(BTRFS_I(inode),
3282 clear_extent_bits(&BTRFS_I(inode)->io_tree,
3283 page_start, page_end,
3284 EXTENT_LOCKED | EXTENT_BOUNDARY);
3288 set_page_dirty(page);
3290 unlock_extent(&BTRFS_I(inode)->io_tree,
3291 page_start, page_end);
3296 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
3298 balance_dirty_pages_ratelimited(inode->i_mapping);
3299 btrfs_throttle(fs_info);
3301 WARN_ON(nr != cluster->nr);
3307 static noinline_for_stack
3308 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3309 struct file_extent_cluster *cluster)
3313 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3314 ret = relocate_file_extent_cluster(inode, cluster);
3321 cluster->start = extent_key->objectid;
3323 BUG_ON(cluster->nr >= MAX_EXTENTS);
3324 cluster->end = extent_key->objectid + extent_key->offset - 1;
3325 cluster->boundary[cluster->nr] = extent_key->objectid;
3328 if (cluster->nr >= MAX_EXTENTS) {
3329 ret = relocate_file_extent_cluster(inode, cluster);
3338 * helper to add a tree block to the list.
3339 * the major work is getting the generation and level of the block
3341 static int add_tree_block(struct reloc_control *rc,
3342 struct btrfs_key *extent_key,
3343 struct btrfs_path *path,
3344 struct rb_root *blocks)
3346 struct extent_buffer *eb;
3347 struct btrfs_extent_item *ei;
3348 struct btrfs_tree_block_info *bi;
3349 struct tree_block *block;
3350 struct rb_node *rb_node;
3355 eb = path->nodes[0];
3356 item_size = btrfs_item_size_nr(eb, path->slots[0]);
3358 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3359 item_size >= sizeof(*ei) + sizeof(*bi)) {
3360 ei = btrfs_item_ptr(eb, path->slots[0],
3361 struct btrfs_extent_item);
3362 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3363 bi = (struct btrfs_tree_block_info *)(ei + 1);
3364 level = btrfs_tree_block_level(eb, bi);
3366 level = (int)extent_key->offset;
3368 generation = btrfs_extent_generation(eb, ei);
3369 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3370 btrfs_print_v0_err(eb->fs_info);
3371 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3377 btrfs_release_path(path);
3379 BUG_ON(level == -1);
3381 block = kmalloc(sizeof(*block), GFP_NOFS);
3385 block->bytenr = extent_key->objectid;
3386 block->key.objectid = rc->extent_root->fs_info->nodesize;
3387 block->key.offset = generation;
3388 block->level = level;
3389 block->key_ready = 0;
3391 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3393 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3399 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3401 static int __add_tree_block(struct reloc_control *rc,
3402 u64 bytenr, u32 blocksize,
3403 struct rb_root *blocks)
3405 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3406 struct btrfs_path *path;
3407 struct btrfs_key key;
3409 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3411 if (tree_block_processed(bytenr, rc))
3414 if (tree_search(blocks, bytenr))
3417 path = btrfs_alloc_path();
3421 key.objectid = bytenr;
3423 key.type = BTRFS_METADATA_ITEM_KEY;
3424 key.offset = (u64)-1;
3426 key.type = BTRFS_EXTENT_ITEM_KEY;
3427 key.offset = blocksize;
3430 path->search_commit_root = 1;
3431 path->skip_locking = 1;
3432 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3436 if (ret > 0 && skinny) {
3437 if (path->slots[0]) {
3439 btrfs_item_key_to_cpu(path->nodes[0], &key,
3441 if (key.objectid == bytenr &&
3442 (key.type == BTRFS_METADATA_ITEM_KEY ||
3443 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3444 key.offset == blocksize)))
3450 btrfs_release_path(path);
3456 btrfs_print_leaf(path->nodes[0]);
3458 "tree block extent item (%llu) is not found in extent tree",
3465 ret = add_tree_block(rc, &key, path, blocks);
3467 btrfs_free_path(path);
3472 * helper to check if the block use full backrefs for pointers in it
3474 static int block_use_full_backref(struct reloc_control *rc,
3475 struct extent_buffer *eb)
3480 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
3481 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
3484 ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
3485 eb->start, btrfs_header_level(eb), 1,
3489 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
3496 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3497 struct btrfs_block_group_cache *block_group,
3498 struct inode *inode,
3501 struct btrfs_key key;
3502 struct btrfs_root *root = fs_info->tree_root;
3503 struct btrfs_trans_handle *trans;
3510 key.type = BTRFS_INODE_ITEM_KEY;
3513 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3518 ret = btrfs_check_trunc_cache_free_space(fs_info,
3519 &fs_info->global_block_rsv);
3523 trans = btrfs_join_transaction(root);
3524 if (IS_ERR(trans)) {
3525 ret = PTR_ERR(trans);
3529 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3531 btrfs_end_transaction(trans);
3532 btrfs_btree_balance_dirty(fs_info);
3539 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
3540 * this function scans fs tree to find blocks reference the data extent
3542 static int find_data_references(struct reloc_control *rc,
3543 struct btrfs_key *extent_key,
3544 struct extent_buffer *leaf,
3545 struct btrfs_extent_data_ref *ref,
3546 struct rb_root *blocks)
3548 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3549 struct btrfs_path *path;
3550 struct tree_block *block;
3551 struct btrfs_root *root;
3552 struct btrfs_file_extent_item *fi;
3553 struct rb_node *rb_node;
3554 struct btrfs_key key;
3565 ref_root = btrfs_extent_data_ref_root(leaf, ref);
3566 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
3567 ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
3568 ref_count = btrfs_extent_data_ref_count(leaf, ref);
3571 * This is an extent belonging to the free space cache, lets just delete
3572 * it and redo the search.
3574 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
3575 ret = delete_block_group_cache(fs_info, rc->block_group,
3576 NULL, ref_objectid);
3582 path = btrfs_alloc_path();
3585 path->reada = READA_FORWARD;
3587 root = read_fs_root(fs_info, ref_root);
3589 err = PTR_ERR(root);
3593 key.objectid = ref_objectid;
3594 key.type = BTRFS_EXTENT_DATA_KEY;
3595 if (ref_offset > ((u64)-1 << 32))
3598 key.offset = ref_offset;
3600 path->search_commit_root = 1;
3601 path->skip_locking = 1;
3602 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3608 leaf = path->nodes[0];
3609 nritems = btrfs_header_nritems(leaf);
3611 * the references in tree blocks that use full backrefs
3612 * are not counted in
3614 if (block_use_full_backref(rc, leaf))
3618 rb_node = tree_search(blocks, leaf->start);
3623 path->slots[0] = nritems;
3626 while (ref_count > 0) {
3627 while (path->slots[0] >= nritems) {
3628 ret = btrfs_next_leaf(root, path);
3633 if (WARN_ON(ret > 0))
3636 leaf = path->nodes[0];
3637 nritems = btrfs_header_nritems(leaf);
3640 if (block_use_full_backref(rc, leaf))
3644 rb_node = tree_search(blocks, leaf->start);
3649 path->slots[0] = nritems;
3653 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3654 if (WARN_ON(key.objectid != ref_objectid ||
3655 key.type != BTRFS_EXTENT_DATA_KEY))
3658 fi = btrfs_item_ptr(leaf, path->slots[0],
3659 struct btrfs_file_extent_item);
3661 if (btrfs_file_extent_type(leaf, fi) ==
3662 BTRFS_FILE_EXTENT_INLINE)
3665 if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3666 extent_key->objectid)
3669 key.offset -= btrfs_file_extent_offset(leaf, fi);
3670 if (key.offset != ref_offset)
3678 if (!tree_block_processed(leaf->start, rc)) {
3679 block = kmalloc(sizeof(*block), GFP_NOFS);
3684 block->bytenr = leaf->start;
3685 btrfs_item_key_to_cpu(leaf, &block->key, 0);
3687 block->key_ready = 1;
3688 rb_node = tree_insert(blocks, block->bytenr,
3691 backref_tree_panic(rb_node, -EEXIST,
3697 path->slots[0] = nritems;
3703 btrfs_free_path(path);
3708 * helper to find all tree blocks that reference a given data extent
3710 static noinline_for_stack
3711 int add_data_references(struct reloc_control *rc,
3712 struct btrfs_key *extent_key,
3713 struct btrfs_path *path,
3714 struct rb_root *blocks)
3716 struct btrfs_key key;
3717 struct extent_buffer *eb;
3718 struct btrfs_extent_data_ref *dref;
3719 struct btrfs_extent_inline_ref *iref;
3722 u32 blocksize = rc->extent_root->fs_info->nodesize;
3726 eb = path->nodes[0];
3727 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3728 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3729 ptr += sizeof(struct btrfs_extent_item);
3732 iref = (struct btrfs_extent_inline_ref *)ptr;
3733 key.type = btrfs_get_extent_inline_ref_type(eb, iref,
3734 BTRFS_REF_TYPE_DATA);
3735 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3736 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3737 ret = __add_tree_block(rc, key.offset, blocksize,
3739 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3740 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
3741 ret = find_data_references(rc, extent_key,
3745 btrfs_err(rc->extent_root->fs_info,
3746 "extent %llu slot %d has an invalid inline ref type",
3747 eb->start, path->slots[0]);
3753 ptr += btrfs_extent_inline_ref_size(key.type);
3759 eb = path->nodes[0];
3760 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3761 ret = btrfs_next_leaf(rc->extent_root, path);
3768 eb = path->nodes[0];
3771 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3772 if (key.objectid != extent_key->objectid)
3775 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3776 ret = __add_tree_block(rc, key.offset, blocksize,
3778 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3779 dref = btrfs_item_ptr(eb, path->slots[0],
3780 struct btrfs_extent_data_ref);
3781 ret = find_data_references(rc, extent_key,
3783 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3784 btrfs_print_v0_err(eb->fs_info);
3785 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3797 btrfs_release_path(path);
3799 free_block_list(blocks);
3804 * helper to find next unprocessed extent
3806 static noinline_for_stack
3807 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3808 struct btrfs_key *extent_key)
3810 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3811 struct btrfs_key key;
3812 struct extent_buffer *leaf;
3813 u64 start, end, last;
3816 last = rc->block_group->key.objectid + rc->block_group->key.offset;
3819 if (rc->search_start >= last) {
3824 key.objectid = rc->search_start;
3825 key.type = BTRFS_EXTENT_ITEM_KEY;
3828 path->search_commit_root = 1;
3829 path->skip_locking = 1;
3830 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3835 leaf = path->nodes[0];
3836 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3837 ret = btrfs_next_leaf(rc->extent_root, path);
3840 leaf = path->nodes[0];
3843 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3844 if (key.objectid >= last) {
3849 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3850 key.type != BTRFS_METADATA_ITEM_KEY) {
3855 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3856 key.objectid + key.offset <= rc->search_start) {
3861 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3862 key.objectid + fs_info->nodesize <=
3868 ret = find_first_extent_bit(&rc->processed_blocks,
3869 key.objectid, &start, &end,
3870 EXTENT_DIRTY, NULL);
3872 if (ret == 0 && start <= key.objectid) {
3873 btrfs_release_path(path);
3874 rc->search_start = end + 1;
3876 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3877 rc->search_start = key.objectid + key.offset;
3879 rc->search_start = key.objectid +
3881 memcpy(extent_key, &key, sizeof(key));
3885 btrfs_release_path(path);
3889 static void set_reloc_control(struct reloc_control *rc)
3891 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3893 mutex_lock(&fs_info->reloc_mutex);
3894 fs_info->reloc_ctl = rc;
3895 mutex_unlock(&fs_info->reloc_mutex);
3898 static void unset_reloc_control(struct reloc_control *rc)
3900 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3902 mutex_lock(&fs_info->reloc_mutex);
3903 fs_info->reloc_ctl = NULL;
3904 mutex_unlock(&fs_info->reloc_mutex);
3907 static int check_extent_flags(u64 flags)
3909 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3910 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3912 if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3913 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3915 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3916 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3921 static noinline_for_stack
3922 int prepare_to_relocate(struct reloc_control *rc)
3924 struct btrfs_trans_handle *trans;
3927 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3928 BTRFS_BLOCK_RSV_TEMP);
3932 memset(&rc->cluster, 0, sizeof(rc->cluster));
3933 rc->search_start = rc->block_group->key.objectid;
3934 rc->extents_found = 0;
3935 rc->nodes_relocated = 0;
3936 rc->merging_rsv_size = 0;
3937 rc->reserved_bytes = 0;
3938 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3939 RELOCATION_RESERVED_NODES;
3940 ret = btrfs_block_rsv_refill(rc->extent_root,
3941 rc->block_rsv, rc->block_rsv->size,
3942 BTRFS_RESERVE_FLUSH_ALL);
3946 rc->create_reloc_tree = 1;
3947 set_reloc_control(rc);
3949 trans = btrfs_join_transaction(rc->extent_root);
3950 if (IS_ERR(trans)) {
3951 unset_reloc_control(rc);
3953 * extent tree is not a ref_cow tree and has no reloc_root to
3954 * cleanup. And callers are responsible to free the above
3957 return PTR_ERR(trans);
3959 btrfs_commit_transaction(trans);
3963 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3965 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3966 struct rb_root blocks = RB_ROOT;
3967 struct btrfs_key key;
3968 struct btrfs_trans_handle *trans = NULL;
3969 struct btrfs_path *path;
3970 struct btrfs_extent_item *ei;
3977 path = btrfs_alloc_path();
3980 path->reada = READA_FORWARD;
3982 ret = prepare_to_relocate(rc);
3989 rc->reserved_bytes = 0;
3990 ret = btrfs_block_rsv_refill(rc->extent_root,
3991 rc->block_rsv, rc->block_rsv->size,
3992 BTRFS_RESERVE_FLUSH_ALL);
3998 trans = btrfs_start_transaction(rc->extent_root, 0);
3999 if (IS_ERR(trans)) {
4000 err = PTR_ERR(trans);
4005 if (update_backref_cache(trans, &rc->backref_cache)) {
4006 btrfs_end_transaction(trans);
4011 ret = find_next_extent(rc, path, &key);
4017 rc->extents_found++;
4019 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4020 struct btrfs_extent_item);
4021 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
4022 if (item_size >= sizeof(*ei)) {
4023 flags = btrfs_extent_flags(path->nodes[0], ei);
4024 ret = check_extent_flags(flags);
4026 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
4028 btrfs_print_v0_err(trans->fs_info);
4029 btrfs_abort_transaction(trans, err);
4035 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
4036 ret = add_tree_block(rc, &key, path, &blocks);
4037 } else if (rc->stage == UPDATE_DATA_PTRS &&
4038 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4039 ret = add_data_references(rc, &key, path, &blocks);
4041 btrfs_release_path(path);
4049 if (!RB_EMPTY_ROOT(&blocks)) {
4050 ret = relocate_tree_blocks(trans, rc, &blocks);
4053 * if we fail to relocate tree blocks, force to update
4054 * backref cache when committing transaction.
4056 rc->backref_cache.last_trans = trans->transid - 1;
4058 if (ret != -EAGAIN) {
4062 rc->extents_found--;
4063 rc->search_start = key.objectid;
4067 btrfs_end_transaction_throttle(trans);
4068 btrfs_btree_balance_dirty(fs_info);
4071 if (rc->stage == MOVE_DATA_EXTENTS &&
4072 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4073 rc->found_file_extent = 1;
4074 ret = relocate_data_extent(rc->data_inode,
4075 &key, &rc->cluster);
4082 if (trans && progress && err == -ENOSPC) {
4083 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
4091 btrfs_release_path(path);
4092 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
4095 btrfs_end_transaction_throttle(trans);
4096 btrfs_btree_balance_dirty(fs_info);
4100 ret = relocate_file_extent_cluster(rc->data_inode,
4106 rc->create_reloc_tree = 0;
4107 set_reloc_control(rc);
4109 backref_cache_cleanup(&rc->backref_cache);
4110 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
4112 err = prepare_to_merge(rc, err);
4114 merge_reloc_roots(rc);
4116 rc->merge_reloc_tree = 0;
4117 unset_reloc_control(rc);
4118 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
4120 /* get rid of pinned extents */
4121 trans = btrfs_join_transaction(rc->extent_root);
4122 if (IS_ERR(trans)) {
4123 err = PTR_ERR(trans);
4126 btrfs_commit_transaction(trans);
4127 ret = clean_dirty_subvols(rc);
4128 if (ret < 0 && !err)
4131 btrfs_free_block_rsv(fs_info, rc->block_rsv);
4132 btrfs_free_path(path);
4136 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
4137 struct btrfs_root *root, u64 objectid)
4139 struct btrfs_path *path;
4140 struct btrfs_inode_item *item;
4141 struct extent_buffer *leaf;
4144 path = btrfs_alloc_path();
4148 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
4152 leaf = path->nodes[0];
4153 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4154 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
4155 btrfs_set_inode_generation(leaf, item, 1);
4156 btrfs_set_inode_size(leaf, item, 0);
4157 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
4158 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
4159 BTRFS_INODE_PREALLOC);
4160 btrfs_mark_buffer_dirty(leaf);
4162 btrfs_free_path(path);
4167 * helper to create inode for data relocation.
4168 * the inode is in data relocation tree and its link count is 0
4170 static noinline_for_stack
4171 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
4172 struct btrfs_block_group_cache *group)
4174 struct inode *inode = NULL;
4175 struct btrfs_trans_handle *trans;
4176 struct btrfs_root *root;
4177 struct btrfs_key key;
4181 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4183 return ERR_CAST(root);
4185 trans = btrfs_start_transaction(root, 6);
4187 return ERR_CAST(trans);
4189 err = btrfs_find_free_objectid(root, &objectid);
4193 err = __insert_orphan_inode(trans, root, objectid);
4196 key.objectid = objectid;
4197 key.type = BTRFS_INODE_ITEM_KEY;
4199 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4200 BUG_ON(IS_ERR(inode));
4201 BTRFS_I(inode)->index_cnt = group->key.objectid;
4203 err = btrfs_orphan_add(trans, BTRFS_I(inode));
4205 btrfs_end_transaction(trans);
4206 btrfs_btree_balance_dirty(fs_info);
4210 inode = ERR_PTR(err);
4215 static struct reloc_control *alloc_reloc_control(void)
4217 struct reloc_control *rc;
4219 rc = kzalloc(sizeof(*rc), GFP_NOFS);
4223 INIT_LIST_HEAD(&rc->reloc_roots);
4224 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
4225 backref_cache_init(&rc->backref_cache);
4226 mapping_tree_init(&rc->reloc_root_tree);
4227 extent_io_tree_init(&rc->processed_blocks, NULL);
4232 * Print the block group being relocated
4234 static void describe_relocation(struct btrfs_fs_info *fs_info,
4235 struct btrfs_block_group_cache *block_group)
4237 char buf[128] = {'\0'};
4239 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
4242 "relocating block group %llu flags %s",
4243 block_group->key.objectid, buf);
4247 * function to relocate all extents in a block group.
4249 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
4251 struct btrfs_block_group_cache *bg;
4252 struct btrfs_root *extent_root = fs_info->extent_root;
4253 struct reloc_control *rc;
4254 struct inode *inode;
4255 struct btrfs_path *path;
4260 bg = btrfs_lookup_block_group(fs_info, group_start);
4264 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4265 btrfs_put_block_group(bg);
4269 rc = alloc_reloc_control();
4271 btrfs_put_block_group(bg);
4275 rc->extent_root = extent_root;
4276 rc->block_group = bg;
4278 ret = btrfs_inc_block_group_ro(rc->block_group);
4285 path = btrfs_alloc_path();
4291 inode = lookup_free_space_inode(fs_info, rc->block_group, path);
4292 btrfs_free_path(path);
4295 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4297 ret = PTR_ERR(inode);
4299 if (ret && ret != -ENOENT) {
4304 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4305 if (IS_ERR(rc->data_inode)) {
4306 err = PTR_ERR(rc->data_inode);
4307 rc->data_inode = NULL;
4311 describe_relocation(fs_info, rc->block_group);
4313 btrfs_wait_block_group_reservations(rc->block_group);
4314 btrfs_wait_nocow_writers(rc->block_group);
4315 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4316 rc->block_group->key.objectid,
4317 rc->block_group->key.offset);
4320 mutex_lock(&fs_info->cleaner_mutex);
4321 ret = relocate_block_group(rc);
4322 mutex_unlock(&fs_info->cleaner_mutex);
4328 if (rc->extents_found == 0)
4331 btrfs_info(fs_info, "found %llu extents", rc->extents_found);
4333 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4334 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4340 invalidate_mapping_pages(rc->data_inode->i_mapping,
4342 rc->stage = UPDATE_DATA_PTRS;
4346 WARN_ON(rc->block_group->pinned > 0);
4347 WARN_ON(rc->block_group->reserved > 0);
4348 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
4351 btrfs_dec_block_group_ro(rc->block_group);
4352 iput(rc->data_inode);
4353 btrfs_put_block_group(rc->block_group);
4358 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4360 struct btrfs_fs_info *fs_info = root->fs_info;
4361 struct btrfs_trans_handle *trans;
4364 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4366 return PTR_ERR(trans);
4368 memset(&root->root_item.drop_progress, 0,
4369 sizeof(root->root_item.drop_progress));
4370 root->root_item.drop_level = 0;
4371 btrfs_set_root_refs(&root->root_item, 0);
4372 ret = btrfs_update_root(trans, fs_info->tree_root,
4373 &root->root_key, &root->root_item);
4375 err = btrfs_end_transaction(trans);
4382 * recover relocation interrupted by system crash.
4384 * this function resumes merging reloc trees with corresponding fs trees.
4385 * this is important for keeping the sharing of tree blocks
4387 int btrfs_recover_relocation(struct btrfs_root *root)
4389 struct btrfs_fs_info *fs_info = root->fs_info;
4390 LIST_HEAD(reloc_roots);
4391 struct btrfs_key key;
4392 struct btrfs_root *fs_root;
4393 struct btrfs_root *reloc_root;
4394 struct btrfs_path *path;
4395 struct extent_buffer *leaf;
4396 struct reloc_control *rc = NULL;
4397 struct btrfs_trans_handle *trans;
4401 path = btrfs_alloc_path();
4404 path->reada = READA_BACK;
4406 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4407 key.type = BTRFS_ROOT_ITEM_KEY;
4408 key.offset = (u64)-1;
4411 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4418 if (path->slots[0] == 0)
4422 leaf = path->nodes[0];
4423 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4424 btrfs_release_path(path);
4426 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4427 key.type != BTRFS_ROOT_ITEM_KEY)
4430 reloc_root = btrfs_read_fs_root(root, &key);
4431 if (IS_ERR(reloc_root)) {
4432 err = PTR_ERR(reloc_root);
4436 list_add(&reloc_root->root_list, &reloc_roots);
4438 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4439 fs_root = read_fs_root(fs_info,
4440 reloc_root->root_key.offset);
4441 if (IS_ERR(fs_root)) {
4442 ret = PTR_ERR(fs_root);
4443 if (ret != -ENOENT) {
4447 ret = mark_garbage_root(reloc_root);
4455 if (key.offset == 0)
4460 btrfs_release_path(path);
4462 if (list_empty(&reloc_roots))
4465 rc = alloc_reloc_control();
4471 rc->extent_root = fs_info->extent_root;
4473 set_reloc_control(rc);
4475 trans = btrfs_join_transaction(rc->extent_root);
4476 if (IS_ERR(trans)) {
4477 unset_reloc_control(rc);
4478 err = PTR_ERR(trans);
4482 rc->merge_reloc_tree = 1;
4484 while (!list_empty(&reloc_roots)) {
4485 reloc_root = list_entry(reloc_roots.next,
4486 struct btrfs_root, root_list);
4487 list_del(&reloc_root->root_list);
4489 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4490 list_add_tail(&reloc_root->root_list,
4495 fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
4496 if (IS_ERR(fs_root)) {
4497 err = PTR_ERR(fs_root);
4501 err = __add_reloc_root(reloc_root);
4502 BUG_ON(err < 0); /* -ENOMEM or logic error */
4503 fs_root->reloc_root = reloc_root;
4506 err = btrfs_commit_transaction(trans);
4510 merge_reloc_roots(rc);
4512 unset_reloc_control(rc);
4514 trans = btrfs_join_transaction(rc->extent_root);
4515 if (IS_ERR(trans)) {
4516 err = PTR_ERR(trans);
4519 err = btrfs_commit_transaction(trans);
4521 ret = clean_dirty_subvols(rc);
4522 if (ret < 0 && !err)
4527 if (!list_empty(&reloc_roots))
4528 free_reloc_roots(&reloc_roots);
4530 btrfs_free_path(path);
4533 /* cleanup orphan inode in data relocation tree */
4534 fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4535 if (IS_ERR(fs_root))
4536 err = PTR_ERR(fs_root);
4538 err = btrfs_orphan_cleanup(fs_root);
4544 * helper to add ordered checksum for data relocation.
4546 * cloning checksum properly handles the nodatasum extents.
4547 * it also saves CPU time to re-calculate the checksum.
4549 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4551 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4552 struct btrfs_ordered_sum *sums;
4553 struct btrfs_ordered_extent *ordered;
4559 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4560 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
4562 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4563 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
4564 disk_bytenr + len - 1, &list, 0);
4568 while (!list_empty(&list)) {
4569 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4570 list_del_init(&sums->list);
4573 * We need to offset the new_bytenr based on where the csum is.
4574 * We need to do this because we will read in entire prealloc
4575 * extents but we may have written to say the middle of the
4576 * prealloc extent, so we need to make sure the csum goes with
4577 * the right disk offset.
4579 * We can do this because the data reloc inode refers strictly
4580 * to the on disk bytes, so we don't have to worry about
4581 * disk_len vs real len like with real inodes since it's all
4584 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr);
4585 sums->bytenr = new_bytenr;
4587 btrfs_add_ordered_sum(inode, ordered, sums);
4590 btrfs_put_ordered_extent(ordered);
4594 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4595 struct btrfs_root *root, struct extent_buffer *buf,
4596 struct extent_buffer *cow)
4598 struct btrfs_fs_info *fs_info = root->fs_info;
4599 struct reloc_control *rc;
4600 struct backref_node *node;
4605 rc = fs_info->reloc_ctl;
4609 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4610 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4612 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
4613 if (buf == root->node)
4614 __update_reloc_root(root, cow->start);
4617 level = btrfs_header_level(buf);
4618 if (btrfs_header_generation(buf) <=
4619 btrfs_root_last_snapshot(&root->root_item))
4622 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4623 rc->create_reloc_tree) {
4624 WARN_ON(!first_cow && level == 0);
4626 node = rc->backref_cache.path[level];
4627 BUG_ON(node->bytenr != buf->start &&
4628 node->new_bytenr != buf->start);
4630 drop_node_buffer(node);
4631 extent_buffer_get(cow);
4633 node->new_bytenr = cow->start;
4635 if (!node->pending) {
4636 list_move_tail(&node->list,
4637 &rc->backref_cache.pending[level]);
4642 __mark_block_processed(rc, node);
4644 if (first_cow && level > 0)
4645 rc->nodes_relocated += buf->len;
4648 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4649 ret = replace_file_extents(trans, rc, root, cow);
4654 * called before creating snapshot. it calculates metadata reservation
4655 * required for relocating tree blocks in the snapshot
4657 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4658 u64 *bytes_to_reserve)
4660 struct btrfs_root *root;
4661 struct reloc_control *rc;
4663 root = pending->root;
4664 if (!root->reloc_root)
4667 rc = root->fs_info->reloc_ctl;
4668 if (!rc->merge_reloc_tree)
4671 root = root->reloc_root;
4672 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4674 * relocation is in the stage of merging trees. the space
4675 * used by merging a reloc tree is twice the size of
4676 * relocated tree nodes in the worst case. half for cowing
4677 * the reloc tree, half for cowing the fs tree. the space
4678 * used by cowing the reloc tree will be freed after the
4679 * tree is dropped. if we create snapshot, cowing the fs
4680 * tree may use more space than it frees. so we need
4681 * reserve extra space.
4683 *bytes_to_reserve += rc->nodes_relocated;
4687 * called after snapshot is created. migrate block reservation
4688 * and create reloc root for the newly created snapshot
4690 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4691 struct btrfs_pending_snapshot *pending)
4693 struct btrfs_root *root = pending->root;
4694 struct btrfs_root *reloc_root;
4695 struct btrfs_root *new_root;
4696 struct reloc_control *rc;
4699 if (!root->reloc_root)
4702 rc = root->fs_info->reloc_ctl;
4703 rc->merging_rsv_size += rc->nodes_relocated;
4705 if (rc->merge_reloc_tree) {
4706 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4708 rc->nodes_relocated, true);
4713 new_root = pending->snap;
4714 reloc_root = create_reloc_root(trans, root->reloc_root,
4715 new_root->root_key.objectid);
4716 if (IS_ERR(reloc_root))
4717 return PTR_ERR(reloc_root);
4719 ret = __add_reloc_root(reloc_root);
4721 new_root->reloc_root = reloc_root;
4723 if (rc->create_reloc_tree)
4724 ret = clone_backref_node(trans, rc, root, reloc_root);