2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
25 #include "transaction.h"
26 #include "print-tree.h"
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 const struct btrfs_key *ins_key, struct btrfs_path *path,
33 int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct btrfs_fs_info *fs_info,
36 struct extent_buffer *dst,
37 struct extent_buffer *src, int empty);
38 static int balance_node_right(struct btrfs_trans_handle *trans,
39 struct btrfs_fs_info *fs_info,
40 struct extent_buffer *dst_buf,
41 struct extent_buffer *src_buf);
42 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
45 struct btrfs_path *btrfs_alloc_path(void)
47 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
54 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
58 if (!p->nodes[i] || !p->locks[i])
60 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
61 if (p->locks[i] == BTRFS_READ_LOCK)
62 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
63 else if (p->locks[i] == BTRFS_WRITE_LOCK)
64 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
69 * reset all the locked nodes in the patch to spinning locks.
71 * held is used to keep lockdep happy, when lockdep is enabled
72 * we set held to a blocking lock before we go around and
73 * retake all the spinlocks in the path. You can safely use NULL
76 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
77 struct extent_buffer *held, int held_rw)
82 btrfs_set_lock_blocking_rw(held, held_rw);
83 if (held_rw == BTRFS_WRITE_LOCK)
84 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
85 else if (held_rw == BTRFS_READ_LOCK)
86 held_rw = BTRFS_READ_LOCK_BLOCKING;
88 btrfs_set_path_blocking(p);
90 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
91 if (p->nodes[i] && p->locks[i]) {
92 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
93 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
94 p->locks[i] = BTRFS_WRITE_LOCK;
95 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
96 p->locks[i] = BTRFS_READ_LOCK;
101 btrfs_clear_lock_blocking_rw(held, held_rw);
104 /* this also releases the path */
105 void btrfs_free_path(struct btrfs_path *p)
109 btrfs_release_path(p);
110 kmem_cache_free(btrfs_path_cachep, p);
114 * path release drops references on the extent buffers in the path
115 * and it drops any locks held by this path
117 * It is safe to call this on paths that no locks or extent buffers held.
119 noinline void btrfs_release_path(struct btrfs_path *p)
123 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
128 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
131 free_extent_buffer(p->nodes[i]);
137 * safely gets a reference on the root node of a tree. A lock
138 * is not taken, so a concurrent writer may put a different node
139 * at the root of the tree. See btrfs_lock_root_node for the
142 * The extent buffer returned by this has a reference taken, so
143 * it won't disappear. It may stop being the root of the tree
144 * at any time because there are no locks held.
146 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
148 struct extent_buffer *eb;
152 eb = rcu_dereference(root->node);
155 * RCU really hurts here, we could free up the root node because
156 * it was COWed but we may not get the new root node yet so do
157 * the inc_not_zero dance and if it doesn't work then
158 * synchronize_rcu and try again.
160 if (atomic_inc_not_zero(&eb->refs)) {
170 /* loop around taking references on and locking the root node of the
171 * tree until you end up with a lock on the root. A locked buffer
172 * is returned, with a reference held.
174 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
176 struct extent_buffer *eb;
179 eb = btrfs_root_node(root);
181 if (eb == root->node)
183 btrfs_tree_unlock(eb);
184 free_extent_buffer(eb);
189 /* loop around taking references on and locking the root node of the
190 * tree until you end up with a lock on the root. A locked buffer
191 * is returned, with a reference held.
193 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
195 struct extent_buffer *eb;
198 eb = btrfs_root_node(root);
199 btrfs_tree_read_lock(eb);
200 if (eb == root->node)
202 btrfs_tree_read_unlock(eb);
203 free_extent_buffer(eb);
208 /* cowonly root (everything not a reference counted cow subvolume), just get
209 * put onto a simple dirty list. transaction.c walks this to make sure they
210 * get properly updated on disk.
212 static void add_root_to_dirty_list(struct btrfs_root *root)
214 struct btrfs_fs_info *fs_info = root->fs_info;
216 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
217 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
220 spin_lock(&fs_info->trans_lock);
221 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
222 /* Want the extent tree to be the last on the list */
223 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
224 list_move_tail(&root->dirty_list,
225 &fs_info->dirty_cowonly_roots);
227 list_move(&root->dirty_list,
228 &fs_info->dirty_cowonly_roots);
230 spin_unlock(&fs_info->trans_lock);
234 * used by snapshot creation to make a copy of a root for a tree with
235 * a given objectid. The buffer with the new root node is returned in
236 * cow_ret, and this func returns zero on success or a negative error code.
238 int btrfs_copy_root(struct btrfs_trans_handle *trans,
239 struct btrfs_root *root,
240 struct extent_buffer *buf,
241 struct extent_buffer **cow_ret, u64 new_root_objectid)
243 struct btrfs_fs_info *fs_info = root->fs_info;
244 struct extent_buffer *cow;
247 struct btrfs_disk_key disk_key;
249 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
250 trans->transid != fs_info->running_transaction->transid);
251 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 trans->transid != root->last_trans);
254 level = btrfs_header_level(buf);
256 btrfs_item_key(buf, &disk_key, 0);
258 btrfs_node_key(buf, &disk_key, 0);
260 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
261 &disk_key, level, buf->start, 0);
265 copy_extent_buffer_full(cow, buf);
266 btrfs_set_header_bytenr(cow, cow->start);
267 btrfs_set_header_generation(cow, trans->transid);
268 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
269 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
270 BTRFS_HEADER_FLAG_RELOC);
271 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
272 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
274 btrfs_set_header_owner(cow, new_root_objectid);
276 write_extent_buffer_fsid(cow, fs_info->fsid);
278 WARN_ON(btrfs_header_generation(buf) > trans->transid);
279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
280 ret = btrfs_inc_ref(trans, root, cow, 1);
282 ret = btrfs_inc_ref(trans, root, cow, 0);
287 btrfs_mark_buffer_dirty(cow);
296 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
297 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
299 MOD_LOG_ROOT_REPLACE,
302 struct tree_mod_root {
307 struct tree_mod_elem {
313 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
316 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
319 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
320 struct btrfs_disk_key key;
323 /* this is used for op == MOD_LOG_MOVE_KEYS */
329 /* this is used for op == MOD_LOG_ROOT_REPLACE */
330 struct tree_mod_root old_root;
334 * Pull a new tree mod seq number for our operation.
336 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
338 return atomic64_inc_return(&fs_info->tree_mod_seq);
342 * This adds a new blocker to the tree mod log's blocker list if the @elem
343 * passed does not already have a sequence number set. So when a caller expects
344 * to record tree modifications, it should ensure to set elem->seq to zero
345 * before calling btrfs_get_tree_mod_seq.
346 * Returns a fresh, unused tree log modification sequence number, even if no new
349 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
350 struct seq_list *elem)
352 write_lock(&fs_info->tree_mod_log_lock);
353 spin_lock(&fs_info->tree_mod_seq_lock);
355 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
356 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
358 spin_unlock(&fs_info->tree_mod_seq_lock);
359 write_unlock(&fs_info->tree_mod_log_lock);
364 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
365 struct seq_list *elem)
367 struct rb_root *tm_root;
368 struct rb_node *node;
369 struct rb_node *next;
370 struct seq_list *cur_elem;
371 struct tree_mod_elem *tm;
372 u64 min_seq = (u64)-1;
373 u64 seq_putting = elem->seq;
378 spin_lock(&fs_info->tree_mod_seq_lock);
379 list_del(&elem->list);
382 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
383 if (cur_elem->seq < min_seq) {
384 if (seq_putting > cur_elem->seq) {
386 * blocker with lower sequence number exists, we
387 * cannot remove anything from the log
389 spin_unlock(&fs_info->tree_mod_seq_lock);
392 min_seq = cur_elem->seq;
395 spin_unlock(&fs_info->tree_mod_seq_lock);
398 * anything that's lower than the lowest existing (read: blocked)
399 * sequence number can be removed from the tree.
401 write_lock(&fs_info->tree_mod_log_lock);
402 tm_root = &fs_info->tree_mod_log;
403 for (node = rb_first(tm_root); node; node = next) {
404 next = rb_next(node);
405 tm = rb_entry(node, struct tree_mod_elem, node);
406 if (tm->seq > min_seq)
408 rb_erase(node, tm_root);
411 write_unlock(&fs_info->tree_mod_log_lock);
415 * key order of the log:
416 * node/leaf start address -> sequence
418 * The 'start address' is the logical address of the *new* root node
419 * for root replace operations, or the logical address of the affected
420 * block for all other operations.
422 * Note: must be called with write lock for fs_info::tree_mod_log_lock.
425 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
427 struct rb_root *tm_root;
428 struct rb_node **new;
429 struct rb_node *parent = NULL;
430 struct tree_mod_elem *cur;
432 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
434 tm_root = &fs_info->tree_mod_log;
435 new = &tm_root->rb_node;
437 cur = rb_entry(*new, struct tree_mod_elem, node);
439 if (cur->logical < tm->logical)
440 new = &((*new)->rb_left);
441 else if (cur->logical > tm->logical)
442 new = &((*new)->rb_right);
443 else if (cur->seq < tm->seq)
444 new = &((*new)->rb_left);
445 else if (cur->seq > tm->seq)
446 new = &((*new)->rb_right);
451 rb_link_node(&tm->node, parent, new);
452 rb_insert_color(&tm->node, tm_root);
457 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
458 * returns zero with the tree_mod_log_lock acquired. The caller must hold
459 * this until all tree mod log insertions are recorded in the rb tree and then
460 * write unlock fs_info::tree_mod_log_lock.
462 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
463 struct extent_buffer *eb) {
465 if (list_empty(&(fs_info)->tree_mod_seq_list))
467 if (eb && btrfs_header_level(eb) == 0)
470 write_lock(&fs_info->tree_mod_log_lock);
471 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
472 write_unlock(&fs_info->tree_mod_log_lock);
479 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
480 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
481 struct extent_buffer *eb)
484 if (list_empty(&(fs_info)->tree_mod_seq_list))
486 if (eb && btrfs_header_level(eb) == 0)
492 static struct tree_mod_elem *
493 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
494 enum mod_log_op op, gfp_t flags)
496 struct tree_mod_elem *tm;
498 tm = kzalloc(sizeof(*tm), flags);
502 tm->logical = eb->start;
503 if (op != MOD_LOG_KEY_ADD) {
504 btrfs_node_key(eb, &tm->key, slot);
505 tm->blockptr = btrfs_node_blockptr(eb, slot);
509 tm->generation = btrfs_node_ptr_generation(eb, slot);
510 RB_CLEAR_NODE(&tm->node);
515 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
516 enum mod_log_op op, gfp_t flags)
518 struct tree_mod_elem *tm;
521 if (!tree_mod_need_log(eb->fs_info, eb))
524 tm = alloc_tree_mod_elem(eb, slot, op, flags);
528 if (tree_mod_dont_log(eb->fs_info, eb)) {
533 ret = __tree_mod_log_insert(eb->fs_info, tm);
534 write_unlock(&eb->fs_info->tree_mod_log_lock);
541 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
542 int dst_slot, int src_slot, int nr_items)
544 struct tree_mod_elem *tm = NULL;
545 struct tree_mod_elem **tm_list = NULL;
550 if (!tree_mod_need_log(eb->fs_info, eb))
553 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
557 tm = kzalloc(sizeof(*tm), GFP_NOFS);
563 tm->logical = eb->start;
565 tm->move.dst_slot = dst_slot;
566 tm->move.nr_items = nr_items;
567 tm->op = MOD_LOG_MOVE_KEYS;
569 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
570 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
571 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
578 if (tree_mod_dont_log(eb->fs_info, eb))
583 * When we override something during the move, we log these removals.
584 * This can only happen when we move towards the beginning of the
585 * buffer, i.e. dst_slot < src_slot.
587 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
588 ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
593 ret = __tree_mod_log_insert(eb->fs_info, tm);
596 write_unlock(&eb->fs_info->tree_mod_log_lock);
601 for (i = 0; i < nr_items; i++) {
602 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
603 rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
607 write_unlock(&eb->fs_info->tree_mod_log_lock);
615 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
616 struct tree_mod_elem **tm_list,
622 for (i = nritems - 1; i >= 0; i--) {
623 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
625 for (j = nritems - 1; j > i; j--)
626 rb_erase(&tm_list[j]->node,
627 &fs_info->tree_mod_log);
635 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
636 struct extent_buffer *new_root, int log_removal)
638 struct btrfs_fs_info *fs_info = old_root->fs_info;
639 struct tree_mod_elem *tm = NULL;
640 struct tree_mod_elem **tm_list = NULL;
645 if (!tree_mod_need_log(fs_info, NULL))
648 if (log_removal && btrfs_header_level(old_root) > 0) {
649 nritems = btrfs_header_nritems(old_root);
650 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
656 for (i = 0; i < nritems; i++) {
657 tm_list[i] = alloc_tree_mod_elem(old_root, i,
658 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
666 tm = kzalloc(sizeof(*tm), GFP_NOFS);
672 tm->logical = new_root->start;
673 tm->old_root.logical = old_root->start;
674 tm->old_root.level = btrfs_header_level(old_root);
675 tm->generation = btrfs_header_generation(old_root);
676 tm->op = MOD_LOG_ROOT_REPLACE;
678 if (tree_mod_dont_log(fs_info, NULL))
682 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
684 ret = __tree_mod_log_insert(fs_info, tm);
686 write_unlock(&fs_info->tree_mod_log_lock);
695 for (i = 0; i < nritems; i++)
704 static struct tree_mod_elem *
705 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
708 struct rb_root *tm_root;
709 struct rb_node *node;
710 struct tree_mod_elem *cur = NULL;
711 struct tree_mod_elem *found = NULL;
713 read_lock(&fs_info->tree_mod_log_lock);
714 tm_root = &fs_info->tree_mod_log;
715 node = tm_root->rb_node;
717 cur = rb_entry(node, struct tree_mod_elem, node);
718 if (cur->logical < start) {
719 node = node->rb_left;
720 } else if (cur->logical > start) {
721 node = node->rb_right;
722 } else if (cur->seq < min_seq) {
723 node = node->rb_left;
724 } else if (!smallest) {
725 /* we want the node with the highest seq */
727 BUG_ON(found->seq > cur->seq);
729 node = node->rb_left;
730 } else if (cur->seq > min_seq) {
731 /* we want the node with the smallest seq */
733 BUG_ON(found->seq < cur->seq);
735 node = node->rb_right;
741 read_unlock(&fs_info->tree_mod_log_lock);
747 * this returns the element from the log with the smallest time sequence
748 * value that's in the log (the oldest log item). any element with a time
749 * sequence lower than min_seq will be ignored.
751 static struct tree_mod_elem *
752 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
755 return __tree_mod_log_search(fs_info, start, min_seq, 1);
759 * this returns the element from the log with the largest time sequence
760 * value that's in the log (the most recent log item). any element with
761 * a time sequence lower than min_seq will be ignored.
763 static struct tree_mod_elem *
764 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
766 return __tree_mod_log_search(fs_info, start, min_seq, 0);
770 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
771 struct extent_buffer *src, unsigned long dst_offset,
772 unsigned long src_offset, int nr_items)
775 struct tree_mod_elem **tm_list = NULL;
776 struct tree_mod_elem **tm_list_add, **tm_list_rem;
780 if (!tree_mod_need_log(fs_info, NULL))
783 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
786 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
791 tm_list_add = tm_list;
792 tm_list_rem = tm_list + nr_items;
793 for (i = 0; i < nr_items; i++) {
794 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
795 MOD_LOG_KEY_REMOVE, GFP_NOFS);
796 if (!tm_list_rem[i]) {
801 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
802 MOD_LOG_KEY_ADD, GFP_NOFS);
803 if (!tm_list_add[i]) {
809 if (tree_mod_dont_log(fs_info, NULL))
813 for (i = 0; i < nr_items; i++) {
814 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
817 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
822 write_unlock(&fs_info->tree_mod_log_lock);
828 for (i = 0; i < nr_items * 2; i++) {
829 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
830 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
834 write_unlock(&fs_info->tree_mod_log_lock);
840 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
842 struct tree_mod_elem **tm_list = NULL;
847 if (btrfs_header_level(eb) == 0)
850 if (!tree_mod_need_log(eb->fs_info, NULL))
853 nritems = btrfs_header_nritems(eb);
854 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
858 for (i = 0; i < nritems; i++) {
859 tm_list[i] = alloc_tree_mod_elem(eb, i,
860 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
867 if (tree_mod_dont_log(eb->fs_info, eb))
870 ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
871 write_unlock(&eb->fs_info->tree_mod_log_lock);
879 for (i = 0; i < nritems; i++)
887 * check if the tree block can be shared by multiple trees
889 int btrfs_block_can_be_shared(struct btrfs_root *root,
890 struct extent_buffer *buf)
893 * Tree blocks not in reference counted trees and tree roots
894 * are never shared. If a block was allocated after the last
895 * snapshot and the block was not allocated by tree relocation,
896 * we know the block is not shared.
898 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
899 buf != root->node && buf != root->commit_root &&
900 (btrfs_header_generation(buf) <=
901 btrfs_root_last_snapshot(&root->root_item) ||
902 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
904 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
905 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
906 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
912 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
913 struct btrfs_root *root,
914 struct extent_buffer *buf,
915 struct extent_buffer *cow,
918 struct btrfs_fs_info *fs_info = root->fs_info;
926 * Backrefs update rules:
928 * Always use full backrefs for extent pointers in tree block
929 * allocated by tree relocation.
931 * If a shared tree block is no longer referenced by its owner
932 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
933 * use full backrefs for extent pointers in tree block.
935 * If a tree block is been relocating
936 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
937 * use full backrefs for extent pointers in tree block.
938 * The reason for this is some operations (such as drop tree)
939 * are only allowed for blocks use full backrefs.
942 if (btrfs_block_can_be_shared(root, buf)) {
943 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
944 btrfs_header_level(buf), 1,
950 btrfs_handle_fs_error(fs_info, ret, NULL);
955 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
956 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
957 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
962 owner = btrfs_header_owner(buf);
963 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
964 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
967 if ((owner == root->root_key.objectid ||
968 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
969 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
970 ret = btrfs_inc_ref(trans, root, buf, 1);
974 if (root->root_key.objectid ==
975 BTRFS_TREE_RELOC_OBJECTID) {
976 ret = btrfs_dec_ref(trans, root, buf, 0);
979 ret = btrfs_inc_ref(trans, root, cow, 1);
983 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
986 if (root->root_key.objectid ==
987 BTRFS_TREE_RELOC_OBJECTID)
988 ret = btrfs_inc_ref(trans, root, cow, 1);
990 ret = btrfs_inc_ref(trans, root, cow, 0);
994 if (new_flags != 0) {
995 int level = btrfs_header_level(buf);
997 ret = btrfs_set_disk_extent_flags(trans, fs_info,
1000 new_flags, level, 0);
1005 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1006 if (root->root_key.objectid ==
1007 BTRFS_TREE_RELOC_OBJECTID)
1008 ret = btrfs_inc_ref(trans, root, cow, 1);
1010 ret = btrfs_inc_ref(trans, root, cow, 0);
1013 ret = btrfs_dec_ref(trans, root, buf, 1);
1017 clean_tree_block(fs_info, buf);
1024 * does the dirty work in cow of a single block. The parent block (if
1025 * supplied) is updated to point to the new cow copy. The new buffer is marked
1026 * dirty and returned locked. If you modify the block it needs to be marked
1029 * search_start -- an allocation hint for the new block
1031 * empty_size -- a hint that you plan on doing more cow. This is the size in
1032 * bytes the allocator should try to find free next to the block it returns.
1033 * This is just a hint and may be ignored by the allocator.
1035 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1036 struct btrfs_root *root,
1037 struct extent_buffer *buf,
1038 struct extent_buffer *parent, int parent_slot,
1039 struct extent_buffer **cow_ret,
1040 u64 search_start, u64 empty_size)
1042 struct btrfs_fs_info *fs_info = root->fs_info;
1043 struct btrfs_disk_key disk_key;
1044 struct extent_buffer *cow;
1047 int unlock_orig = 0;
1048 u64 parent_start = 0;
1050 if (*cow_ret == buf)
1053 btrfs_assert_tree_locked(buf);
1055 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1056 trans->transid != fs_info->running_transaction->transid);
1057 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1058 trans->transid != root->last_trans);
1060 level = btrfs_header_level(buf);
1063 btrfs_item_key(buf, &disk_key, 0);
1065 btrfs_node_key(buf, &disk_key, 0);
1067 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1068 parent_start = parent->start;
1070 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1071 root->root_key.objectid, &disk_key, level,
1072 search_start, empty_size);
1074 return PTR_ERR(cow);
1076 /* cow is set to blocking by btrfs_init_new_buffer */
1078 copy_extent_buffer_full(cow, buf);
1079 btrfs_set_header_bytenr(cow, cow->start);
1080 btrfs_set_header_generation(cow, trans->transid);
1081 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1082 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1083 BTRFS_HEADER_FLAG_RELOC);
1084 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1085 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1087 btrfs_set_header_owner(cow, root->root_key.objectid);
1089 write_extent_buffer_fsid(cow, fs_info->fsid);
1091 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1093 btrfs_abort_transaction(trans, ret);
1097 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1098 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1100 btrfs_abort_transaction(trans, ret);
1105 if (buf == root->node) {
1106 WARN_ON(parent && parent != buf);
1107 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1108 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1109 parent_start = buf->start;
1111 extent_buffer_get(cow);
1112 ret = tree_mod_log_insert_root(root->node, cow, 1);
1114 rcu_assign_pointer(root->node, cow);
1116 btrfs_free_tree_block(trans, root, buf, parent_start,
1118 free_extent_buffer(buf);
1119 add_root_to_dirty_list(root);
1121 WARN_ON(trans->transid != btrfs_header_generation(parent));
1122 tree_mod_log_insert_key(parent, parent_slot,
1123 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1124 btrfs_set_node_blockptr(parent, parent_slot,
1126 btrfs_set_node_ptr_generation(parent, parent_slot,
1128 btrfs_mark_buffer_dirty(parent);
1130 ret = tree_mod_log_free_eb(buf);
1132 btrfs_abort_transaction(trans, ret);
1136 btrfs_free_tree_block(trans, root, buf, parent_start,
1140 btrfs_tree_unlock(buf);
1141 free_extent_buffer_stale(buf);
1142 btrfs_mark_buffer_dirty(cow);
1148 * returns the logical address of the oldest predecessor of the given root.
1149 * entries older than time_seq are ignored.
1151 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1152 struct extent_buffer *eb_root, u64 time_seq)
1154 struct tree_mod_elem *tm;
1155 struct tree_mod_elem *found = NULL;
1156 u64 root_logical = eb_root->start;
1163 * the very last operation that's logged for a root is the
1164 * replacement operation (if it is replaced at all). this has
1165 * the logical address of the *new* root, making it the very
1166 * first operation that's logged for this root.
1169 tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1174 * if there are no tree operation for the oldest root, we simply
1175 * return it. this should only happen if that (old) root is at
1182 * if there's an operation that's not a root replacement, we
1183 * found the oldest version of our root. normally, we'll find a
1184 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1186 if (tm->op != MOD_LOG_ROOT_REPLACE)
1190 root_logical = tm->old_root.logical;
1194 /* if there's no old root to return, return what we found instead */
1202 * tm is a pointer to the first operation to rewind within eb. then, all
1203 * previous operations will be rewound (until we reach something older than
1207 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1208 u64 time_seq, struct tree_mod_elem *first_tm)
1211 struct rb_node *next;
1212 struct tree_mod_elem *tm = first_tm;
1213 unsigned long o_dst;
1214 unsigned long o_src;
1215 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1217 n = btrfs_header_nritems(eb);
1218 read_lock(&fs_info->tree_mod_log_lock);
1219 while (tm && tm->seq >= time_seq) {
1221 * all the operations are recorded with the operator used for
1222 * the modification. as we're going backwards, we do the
1223 * opposite of each operation here.
1226 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1227 BUG_ON(tm->slot < n);
1229 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1230 case MOD_LOG_KEY_REMOVE:
1231 btrfs_set_node_key(eb, &tm->key, tm->slot);
1232 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1233 btrfs_set_node_ptr_generation(eb, tm->slot,
1237 case MOD_LOG_KEY_REPLACE:
1238 BUG_ON(tm->slot >= n);
1239 btrfs_set_node_key(eb, &tm->key, tm->slot);
1240 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1241 btrfs_set_node_ptr_generation(eb, tm->slot,
1244 case MOD_LOG_KEY_ADD:
1245 /* if a move operation is needed it's in the log */
1248 case MOD_LOG_MOVE_KEYS:
1249 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1250 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1251 memmove_extent_buffer(eb, o_dst, o_src,
1252 tm->move.nr_items * p_size);
1254 case MOD_LOG_ROOT_REPLACE:
1256 * this operation is special. for roots, this must be
1257 * handled explicitly before rewinding.
1258 * for non-roots, this operation may exist if the node
1259 * was a root: root A -> child B; then A gets empty and
1260 * B is promoted to the new root. in the mod log, we'll
1261 * have a root-replace operation for B, a tree block
1262 * that is no root. we simply ignore that operation.
1266 next = rb_next(&tm->node);
1269 tm = rb_entry(next, struct tree_mod_elem, node);
1270 if (tm->logical != first_tm->logical)
1273 read_unlock(&fs_info->tree_mod_log_lock);
1274 btrfs_set_header_nritems(eb, n);
1278 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1279 * is returned. If rewind operations happen, a fresh buffer is returned. The
1280 * returned buffer is always read-locked. If the returned buffer is not the
1281 * input buffer, the lock on the input buffer is released and the input buffer
1282 * is freed (its refcount is decremented).
1284 static struct extent_buffer *
1285 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1286 struct extent_buffer *eb, u64 time_seq)
1288 struct extent_buffer *eb_rewin;
1289 struct tree_mod_elem *tm;
1294 if (btrfs_header_level(eb) == 0)
1297 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1301 btrfs_set_path_blocking(path);
1302 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1304 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1305 BUG_ON(tm->slot != 0);
1306 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1308 btrfs_tree_read_unlock_blocking(eb);
1309 free_extent_buffer(eb);
1312 btrfs_set_header_bytenr(eb_rewin, eb->start);
1313 btrfs_set_header_backref_rev(eb_rewin,
1314 btrfs_header_backref_rev(eb));
1315 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1316 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1318 eb_rewin = btrfs_clone_extent_buffer(eb);
1320 btrfs_tree_read_unlock_blocking(eb);
1321 free_extent_buffer(eb);
1326 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1327 btrfs_tree_read_unlock_blocking(eb);
1328 free_extent_buffer(eb);
1330 extent_buffer_get(eb_rewin);
1331 btrfs_tree_read_lock(eb_rewin);
1332 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1333 WARN_ON(btrfs_header_nritems(eb_rewin) >
1334 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1340 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1341 * value. If there are no changes, the current root->root_node is returned. If
1342 * anything changed in between, there's a fresh buffer allocated on which the
1343 * rewind operations are done. In any case, the returned buffer is read locked.
1344 * Returns NULL on error (with no locks held).
1346 static inline struct extent_buffer *
1347 get_old_root(struct btrfs_root *root, u64 time_seq)
1349 struct btrfs_fs_info *fs_info = root->fs_info;
1350 struct tree_mod_elem *tm;
1351 struct extent_buffer *eb = NULL;
1352 struct extent_buffer *eb_root;
1353 struct extent_buffer *old;
1354 struct tree_mod_root *old_root = NULL;
1355 u64 old_generation = 0;
1358 eb_root = btrfs_read_lock_root_node(root);
1359 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1363 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1364 old_root = &tm->old_root;
1365 old_generation = tm->generation;
1366 logical = old_root->logical;
1368 logical = eb_root->start;
1371 tm = tree_mod_log_search(fs_info, logical, time_seq);
1372 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1373 btrfs_tree_read_unlock(eb_root);
1374 free_extent_buffer(eb_root);
1375 old = read_tree_block(fs_info, logical, 0);
1376 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1378 free_extent_buffer(old);
1380 "failed to read tree block %llu from get_old_root",
1383 eb = btrfs_clone_extent_buffer(old);
1384 free_extent_buffer(old);
1386 } else if (old_root) {
1387 btrfs_tree_read_unlock(eb_root);
1388 free_extent_buffer(eb_root);
1389 eb = alloc_dummy_extent_buffer(fs_info, logical);
1391 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1392 eb = btrfs_clone_extent_buffer(eb_root);
1393 btrfs_tree_read_unlock_blocking(eb_root);
1394 free_extent_buffer(eb_root);
1399 extent_buffer_get(eb);
1400 btrfs_tree_read_lock(eb);
1402 btrfs_set_header_bytenr(eb, eb->start);
1403 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1404 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1405 btrfs_set_header_level(eb, old_root->level);
1406 btrfs_set_header_generation(eb, old_generation);
1409 __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1411 WARN_ON(btrfs_header_level(eb) != 0);
1412 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1417 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1419 struct tree_mod_elem *tm;
1421 struct extent_buffer *eb_root = btrfs_root_node(root);
1423 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1424 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1425 level = tm->old_root.level;
1427 level = btrfs_header_level(eb_root);
1429 free_extent_buffer(eb_root);
1434 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1435 struct btrfs_root *root,
1436 struct extent_buffer *buf)
1438 if (btrfs_is_testing(root->fs_info))
1441 /* ensure we can see the force_cow */
1445 * We do not need to cow a block if
1446 * 1) this block is not created or changed in this transaction;
1447 * 2) this block does not belong to TREE_RELOC tree;
1448 * 3) the root is not forced COW.
1450 * What is forced COW:
1451 * when we create snapshot during committing the transaction,
1452 * after we've finished coping src root, we must COW the shared
1453 * block to ensure the metadata consistency.
1455 if (btrfs_header_generation(buf) == trans->transid &&
1456 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1457 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1458 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1459 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1465 * cows a single block, see __btrfs_cow_block for the real work.
1466 * This version of it has extra checks so that a block isn't COWed more than
1467 * once per transaction, as long as it hasn't been written yet
1469 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1470 struct btrfs_root *root, struct extent_buffer *buf,
1471 struct extent_buffer *parent, int parent_slot,
1472 struct extent_buffer **cow_ret)
1474 struct btrfs_fs_info *fs_info = root->fs_info;
1478 if (trans->transaction != fs_info->running_transaction)
1479 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1481 fs_info->running_transaction->transid);
1483 if (trans->transid != fs_info->generation)
1484 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1485 trans->transid, fs_info->generation);
1487 if (!should_cow_block(trans, root, buf)) {
1488 trans->dirty = true;
1493 search_start = buf->start & ~((u64)SZ_1G - 1);
1496 btrfs_set_lock_blocking(parent);
1497 btrfs_set_lock_blocking(buf);
1499 ret = __btrfs_cow_block(trans, root, buf, parent,
1500 parent_slot, cow_ret, search_start, 0);
1502 trace_btrfs_cow_block(root, buf, *cow_ret);
1508 * helper function for defrag to decide if two blocks pointed to by a
1509 * node are actually close by
1511 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1513 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1515 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1521 * compare two keys in a memcmp fashion
1523 static int comp_keys(const struct btrfs_disk_key *disk,
1524 const struct btrfs_key *k2)
1526 struct btrfs_key k1;
1528 btrfs_disk_key_to_cpu(&k1, disk);
1530 return btrfs_comp_cpu_keys(&k1, k2);
1534 * same as comp_keys only with two btrfs_key's
1536 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1538 if (k1->objectid > k2->objectid)
1540 if (k1->objectid < k2->objectid)
1542 if (k1->type > k2->type)
1544 if (k1->type < k2->type)
1546 if (k1->offset > k2->offset)
1548 if (k1->offset < k2->offset)
1554 * this is used by the defrag code to go through all the
1555 * leaves pointed to by a node and reallocate them so that
1556 * disk order is close to key order
1558 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1559 struct btrfs_root *root, struct extent_buffer *parent,
1560 int start_slot, u64 *last_ret,
1561 struct btrfs_key *progress)
1563 struct btrfs_fs_info *fs_info = root->fs_info;
1564 struct extent_buffer *cur;
1567 u64 search_start = *last_ret;
1577 int progress_passed = 0;
1578 struct btrfs_disk_key disk_key;
1580 parent_level = btrfs_header_level(parent);
1582 WARN_ON(trans->transaction != fs_info->running_transaction);
1583 WARN_ON(trans->transid != fs_info->generation);
1585 parent_nritems = btrfs_header_nritems(parent);
1586 blocksize = fs_info->nodesize;
1587 end_slot = parent_nritems - 1;
1589 if (parent_nritems <= 1)
1592 btrfs_set_lock_blocking(parent);
1594 for (i = start_slot; i <= end_slot; i++) {
1597 btrfs_node_key(parent, &disk_key, i);
1598 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1601 progress_passed = 1;
1602 blocknr = btrfs_node_blockptr(parent, i);
1603 gen = btrfs_node_ptr_generation(parent, i);
1604 if (last_block == 0)
1605 last_block = blocknr;
1608 other = btrfs_node_blockptr(parent, i - 1);
1609 close = close_blocks(blocknr, other, blocksize);
1611 if (!close && i < end_slot) {
1612 other = btrfs_node_blockptr(parent, i + 1);
1613 close = close_blocks(blocknr, other, blocksize);
1616 last_block = blocknr;
1620 cur = find_extent_buffer(fs_info, blocknr);
1622 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1625 if (!cur || !uptodate) {
1627 cur = read_tree_block(fs_info, blocknr, gen);
1629 return PTR_ERR(cur);
1630 } else if (!extent_buffer_uptodate(cur)) {
1631 free_extent_buffer(cur);
1634 } else if (!uptodate) {
1635 err = btrfs_read_buffer(cur, gen);
1637 free_extent_buffer(cur);
1642 if (search_start == 0)
1643 search_start = last_block;
1645 btrfs_tree_lock(cur);
1646 btrfs_set_lock_blocking(cur);
1647 err = __btrfs_cow_block(trans, root, cur, parent, i,
1650 (end_slot - i) * blocksize));
1652 btrfs_tree_unlock(cur);
1653 free_extent_buffer(cur);
1656 search_start = cur->start;
1657 last_block = cur->start;
1658 *last_ret = search_start;
1659 btrfs_tree_unlock(cur);
1660 free_extent_buffer(cur);
1666 * search for key in the extent_buffer. The items start at offset p,
1667 * and they are item_size apart. There are 'max' items in p.
1669 * the slot in the array is returned via slot, and it points to
1670 * the place where you would insert key if it is not found in
1673 * slot may point to max if the key is bigger than all of the keys
1675 static noinline int generic_bin_search(struct extent_buffer *eb,
1676 unsigned long p, int item_size,
1677 const struct btrfs_key *key,
1684 struct btrfs_disk_key *tmp = NULL;
1685 struct btrfs_disk_key unaligned;
1686 unsigned long offset;
1688 unsigned long map_start = 0;
1689 unsigned long map_len = 0;
1693 btrfs_err(eb->fs_info,
1694 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1695 __func__, low, high, eb->start,
1696 btrfs_header_owner(eb), btrfs_header_level(eb));
1700 while (low < high) {
1701 mid = (low + high) / 2;
1702 offset = p + mid * item_size;
1704 if (!kaddr || offset < map_start ||
1705 (offset + sizeof(struct btrfs_disk_key)) >
1706 map_start + map_len) {
1708 err = map_private_extent_buffer(eb, offset,
1709 sizeof(struct btrfs_disk_key),
1710 &kaddr, &map_start, &map_len);
1713 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1715 } else if (err == 1) {
1716 read_extent_buffer(eb, &unaligned,
1717 offset, sizeof(unaligned));
1724 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1727 ret = comp_keys(tmp, key);
1743 * simple bin_search frontend that does the right thing for
1746 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1747 int level, int *slot)
1750 return generic_bin_search(eb,
1751 offsetof(struct btrfs_leaf, items),
1752 sizeof(struct btrfs_item),
1753 key, btrfs_header_nritems(eb),
1756 return generic_bin_search(eb,
1757 offsetof(struct btrfs_node, ptrs),
1758 sizeof(struct btrfs_key_ptr),
1759 key, btrfs_header_nritems(eb),
1763 static void root_add_used(struct btrfs_root *root, u32 size)
1765 spin_lock(&root->accounting_lock);
1766 btrfs_set_root_used(&root->root_item,
1767 btrfs_root_used(&root->root_item) + size);
1768 spin_unlock(&root->accounting_lock);
1771 static void root_sub_used(struct btrfs_root *root, u32 size)
1773 spin_lock(&root->accounting_lock);
1774 btrfs_set_root_used(&root->root_item,
1775 btrfs_root_used(&root->root_item) - size);
1776 spin_unlock(&root->accounting_lock);
1779 /* given a node and slot number, this reads the blocks it points to. The
1780 * extent buffer is returned with a reference taken (but unlocked).
1782 static noinline struct extent_buffer *
1783 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1786 int level = btrfs_header_level(parent);
1787 struct extent_buffer *eb;
1789 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1790 return ERR_PTR(-ENOENT);
1794 eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1795 btrfs_node_ptr_generation(parent, slot));
1796 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1797 free_extent_buffer(eb);
1805 * node level balancing, used to make sure nodes are in proper order for
1806 * item deletion. We balance from the top down, so we have to make sure
1807 * that a deletion won't leave an node completely empty later on.
1809 static noinline int balance_level(struct btrfs_trans_handle *trans,
1810 struct btrfs_root *root,
1811 struct btrfs_path *path, int level)
1813 struct btrfs_fs_info *fs_info = root->fs_info;
1814 struct extent_buffer *right = NULL;
1815 struct extent_buffer *mid;
1816 struct extent_buffer *left = NULL;
1817 struct extent_buffer *parent = NULL;
1821 int orig_slot = path->slots[level];
1827 mid = path->nodes[level];
1829 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1830 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1831 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1833 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1835 if (level < BTRFS_MAX_LEVEL - 1) {
1836 parent = path->nodes[level + 1];
1837 pslot = path->slots[level + 1];
1841 * deal with the case where there is only one pointer in the root
1842 * by promoting the node below to a root
1845 struct extent_buffer *child;
1847 if (btrfs_header_nritems(mid) != 1)
1850 /* promote the child to a root */
1851 child = read_node_slot(fs_info, mid, 0);
1852 if (IS_ERR(child)) {
1853 ret = PTR_ERR(child);
1854 btrfs_handle_fs_error(fs_info, ret, NULL);
1858 btrfs_tree_lock(child);
1859 btrfs_set_lock_blocking(child);
1860 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1862 btrfs_tree_unlock(child);
1863 free_extent_buffer(child);
1867 ret = tree_mod_log_insert_root(root->node, child, 1);
1869 rcu_assign_pointer(root->node, child);
1871 add_root_to_dirty_list(root);
1872 btrfs_tree_unlock(child);
1874 path->locks[level] = 0;
1875 path->nodes[level] = NULL;
1876 clean_tree_block(fs_info, mid);
1877 btrfs_tree_unlock(mid);
1878 /* once for the path */
1879 free_extent_buffer(mid);
1881 root_sub_used(root, mid->len);
1882 btrfs_free_tree_block(trans, root, mid, 0, 1);
1883 /* once for the root ptr */
1884 free_extent_buffer_stale(mid);
1887 if (btrfs_header_nritems(mid) >
1888 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1891 left = read_node_slot(fs_info, parent, pslot - 1);
1896 btrfs_tree_lock(left);
1897 btrfs_set_lock_blocking(left);
1898 wret = btrfs_cow_block(trans, root, left,
1899 parent, pslot - 1, &left);
1906 right = read_node_slot(fs_info, parent, pslot + 1);
1911 btrfs_tree_lock(right);
1912 btrfs_set_lock_blocking(right);
1913 wret = btrfs_cow_block(trans, root, right,
1914 parent, pslot + 1, &right);
1921 /* first, try to make some room in the middle buffer */
1923 orig_slot += btrfs_header_nritems(left);
1924 wret = push_node_left(trans, fs_info, left, mid, 1);
1930 * then try to empty the right most buffer into the middle
1933 wret = push_node_left(trans, fs_info, mid, right, 1);
1934 if (wret < 0 && wret != -ENOSPC)
1936 if (btrfs_header_nritems(right) == 0) {
1937 clean_tree_block(fs_info, right);
1938 btrfs_tree_unlock(right);
1939 del_ptr(root, path, level + 1, pslot + 1);
1940 root_sub_used(root, right->len);
1941 btrfs_free_tree_block(trans, root, right, 0, 1);
1942 free_extent_buffer_stale(right);
1945 struct btrfs_disk_key right_key;
1946 btrfs_node_key(right, &right_key, 0);
1947 ret = tree_mod_log_insert_key(parent, pslot + 1,
1948 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1950 btrfs_set_node_key(parent, &right_key, pslot + 1);
1951 btrfs_mark_buffer_dirty(parent);
1954 if (btrfs_header_nritems(mid) == 1) {
1956 * we're not allowed to leave a node with one item in the
1957 * tree during a delete. A deletion from lower in the tree
1958 * could try to delete the only pointer in this node.
1959 * So, pull some keys from the left.
1960 * There has to be a left pointer at this point because
1961 * otherwise we would have pulled some pointers from the
1966 btrfs_handle_fs_error(fs_info, ret, NULL);
1969 wret = balance_node_right(trans, fs_info, mid, left);
1975 wret = push_node_left(trans, fs_info, left, mid, 1);
1981 if (btrfs_header_nritems(mid) == 0) {
1982 clean_tree_block(fs_info, mid);
1983 btrfs_tree_unlock(mid);
1984 del_ptr(root, path, level + 1, pslot);
1985 root_sub_used(root, mid->len);
1986 btrfs_free_tree_block(trans, root, mid, 0, 1);
1987 free_extent_buffer_stale(mid);
1990 /* update the parent key to reflect our changes */
1991 struct btrfs_disk_key mid_key;
1992 btrfs_node_key(mid, &mid_key, 0);
1993 ret = tree_mod_log_insert_key(parent, pslot,
1994 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1996 btrfs_set_node_key(parent, &mid_key, pslot);
1997 btrfs_mark_buffer_dirty(parent);
2000 /* update the path */
2002 if (btrfs_header_nritems(left) > orig_slot) {
2003 extent_buffer_get(left);
2004 /* left was locked after cow */
2005 path->nodes[level] = left;
2006 path->slots[level + 1] -= 1;
2007 path->slots[level] = orig_slot;
2009 btrfs_tree_unlock(mid);
2010 free_extent_buffer(mid);
2013 orig_slot -= btrfs_header_nritems(left);
2014 path->slots[level] = orig_slot;
2017 /* double check we haven't messed things up */
2019 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2023 btrfs_tree_unlock(right);
2024 free_extent_buffer(right);
2027 if (path->nodes[level] != left)
2028 btrfs_tree_unlock(left);
2029 free_extent_buffer(left);
2034 /* Node balancing for insertion. Here we only split or push nodes around
2035 * when they are completely full. This is also done top down, so we
2036 * have to be pessimistic.
2038 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2039 struct btrfs_root *root,
2040 struct btrfs_path *path, int level)
2042 struct btrfs_fs_info *fs_info = root->fs_info;
2043 struct extent_buffer *right = NULL;
2044 struct extent_buffer *mid;
2045 struct extent_buffer *left = NULL;
2046 struct extent_buffer *parent = NULL;
2050 int orig_slot = path->slots[level];
2055 mid = path->nodes[level];
2056 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2058 if (level < BTRFS_MAX_LEVEL - 1) {
2059 parent = path->nodes[level + 1];
2060 pslot = path->slots[level + 1];
2066 left = read_node_slot(fs_info, parent, pslot - 1);
2070 /* first, try to make some room in the middle buffer */
2074 btrfs_tree_lock(left);
2075 btrfs_set_lock_blocking(left);
2077 left_nr = btrfs_header_nritems(left);
2078 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2081 ret = btrfs_cow_block(trans, root, left, parent,
2086 wret = push_node_left(trans, fs_info,
2093 struct btrfs_disk_key disk_key;
2094 orig_slot += left_nr;
2095 btrfs_node_key(mid, &disk_key, 0);
2096 ret = tree_mod_log_insert_key(parent, pslot,
2097 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2099 btrfs_set_node_key(parent, &disk_key, pslot);
2100 btrfs_mark_buffer_dirty(parent);
2101 if (btrfs_header_nritems(left) > orig_slot) {
2102 path->nodes[level] = left;
2103 path->slots[level + 1] -= 1;
2104 path->slots[level] = orig_slot;
2105 btrfs_tree_unlock(mid);
2106 free_extent_buffer(mid);
2109 btrfs_header_nritems(left);
2110 path->slots[level] = orig_slot;
2111 btrfs_tree_unlock(left);
2112 free_extent_buffer(left);
2116 btrfs_tree_unlock(left);
2117 free_extent_buffer(left);
2119 right = read_node_slot(fs_info, parent, pslot + 1);
2124 * then try to empty the right most buffer into the middle
2129 btrfs_tree_lock(right);
2130 btrfs_set_lock_blocking(right);
2132 right_nr = btrfs_header_nritems(right);
2133 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2136 ret = btrfs_cow_block(trans, root, right,
2142 wret = balance_node_right(trans, fs_info,
2149 struct btrfs_disk_key disk_key;
2151 btrfs_node_key(right, &disk_key, 0);
2152 ret = tree_mod_log_insert_key(parent, pslot + 1,
2153 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2155 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2156 btrfs_mark_buffer_dirty(parent);
2158 if (btrfs_header_nritems(mid) <= orig_slot) {
2159 path->nodes[level] = right;
2160 path->slots[level + 1] += 1;
2161 path->slots[level] = orig_slot -
2162 btrfs_header_nritems(mid);
2163 btrfs_tree_unlock(mid);
2164 free_extent_buffer(mid);
2166 btrfs_tree_unlock(right);
2167 free_extent_buffer(right);
2171 btrfs_tree_unlock(right);
2172 free_extent_buffer(right);
2178 * readahead one full node of leaves, finding things that are close
2179 * to the block in 'slot', and triggering ra on them.
2181 static void reada_for_search(struct btrfs_fs_info *fs_info,
2182 struct btrfs_path *path,
2183 int level, int slot, u64 objectid)
2185 struct extent_buffer *node;
2186 struct btrfs_disk_key disk_key;
2191 struct extent_buffer *eb;
2199 if (!path->nodes[level])
2202 node = path->nodes[level];
2204 search = btrfs_node_blockptr(node, slot);
2205 blocksize = fs_info->nodesize;
2206 eb = find_extent_buffer(fs_info, search);
2208 free_extent_buffer(eb);
2214 nritems = btrfs_header_nritems(node);
2218 if (path->reada == READA_BACK) {
2222 } else if (path->reada == READA_FORWARD) {
2227 if (path->reada == READA_BACK && objectid) {
2228 btrfs_node_key(node, &disk_key, nr);
2229 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2232 search = btrfs_node_blockptr(node, nr);
2233 if ((search <= target && target - search <= 65536) ||
2234 (search > target && search - target <= 65536)) {
2235 readahead_tree_block(fs_info, search);
2239 if ((nread > 65536 || nscan > 32))
2244 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2245 struct btrfs_path *path, int level)
2249 struct extent_buffer *parent;
2250 struct extent_buffer *eb;
2255 parent = path->nodes[level + 1];
2259 nritems = btrfs_header_nritems(parent);
2260 slot = path->slots[level + 1];
2263 block1 = btrfs_node_blockptr(parent, slot - 1);
2264 gen = btrfs_node_ptr_generation(parent, slot - 1);
2265 eb = find_extent_buffer(fs_info, block1);
2267 * if we get -eagain from btrfs_buffer_uptodate, we
2268 * don't want to return eagain here. That will loop
2271 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2273 free_extent_buffer(eb);
2275 if (slot + 1 < nritems) {
2276 block2 = btrfs_node_blockptr(parent, slot + 1);
2277 gen = btrfs_node_ptr_generation(parent, slot + 1);
2278 eb = find_extent_buffer(fs_info, block2);
2279 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2281 free_extent_buffer(eb);
2285 readahead_tree_block(fs_info, block1);
2287 readahead_tree_block(fs_info, block2);
2292 * when we walk down the tree, it is usually safe to unlock the higher layers
2293 * in the tree. The exceptions are when our path goes through slot 0, because
2294 * operations on the tree might require changing key pointers higher up in the
2297 * callers might also have set path->keep_locks, which tells this code to keep
2298 * the lock if the path points to the last slot in the block. This is part of
2299 * walking through the tree, and selecting the next slot in the higher block.
2301 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2302 * if lowest_unlock is 1, level 0 won't be unlocked
2304 static noinline void unlock_up(struct btrfs_path *path, int level,
2305 int lowest_unlock, int min_write_lock_level,
2306 int *write_lock_level)
2309 int skip_level = level;
2311 struct extent_buffer *t;
2313 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2314 if (!path->nodes[i])
2316 if (!path->locks[i])
2318 if (!no_skips && path->slots[i] == 0) {
2322 if (!no_skips && path->keep_locks) {
2325 nritems = btrfs_header_nritems(t);
2326 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2331 if (skip_level < i && i >= lowest_unlock)
2335 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2336 btrfs_tree_unlock_rw(t, path->locks[i]);
2338 if (write_lock_level &&
2339 i > min_write_lock_level &&
2340 i <= *write_lock_level) {
2341 *write_lock_level = i - 1;
2348 * This releases any locks held in the path starting at level and
2349 * going all the way up to the root.
2351 * btrfs_search_slot will keep the lock held on higher nodes in a few
2352 * corner cases, such as COW of the block at slot zero in the node. This
2353 * ignores those rules, and it should only be called when there are no
2354 * more updates to be done higher up in the tree.
2356 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2360 if (path->keep_locks)
2363 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2364 if (!path->nodes[i])
2366 if (!path->locks[i])
2368 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2374 * helper function for btrfs_search_slot. The goal is to find a block
2375 * in cache without setting the path to blocking. If we find the block
2376 * we return zero and the path is unchanged.
2378 * If we can't find the block, we set the path blocking and do some
2379 * reada. -EAGAIN is returned and the search must be repeated.
2382 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2383 struct extent_buffer **eb_ret, int level, int slot,
2384 const struct btrfs_key *key)
2386 struct btrfs_fs_info *fs_info = root->fs_info;
2389 struct extent_buffer *b = *eb_ret;
2390 struct extent_buffer *tmp;
2393 blocknr = btrfs_node_blockptr(b, slot);
2394 gen = btrfs_node_ptr_generation(b, slot);
2396 tmp = find_extent_buffer(fs_info, blocknr);
2398 /* first we do an atomic uptodate check */
2399 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2404 /* the pages were up to date, but we failed
2405 * the generation number check. Do a full
2406 * read for the generation number that is correct.
2407 * We must do this without dropping locks so
2408 * we can trust our generation number
2410 btrfs_set_path_blocking(p);
2412 /* now we're allowed to do a blocking uptodate check */
2413 ret = btrfs_read_buffer(tmp, gen);
2418 free_extent_buffer(tmp);
2419 btrfs_release_path(p);
2424 * reduce lock contention at high levels
2425 * of the btree by dropping locks before
2426 * we read. Don't release the lock on the current
2427 * level because we need to walk this node to figure
2428 * out which blocks to read.
2430 btrfs_unlock_up_safe(p, level + 1);
2431 btrfs_set_path_blocking(p);
2433 free_extent_buffer(tmp);
2434 if (p->reada != READA_NONE)
2435 reada_for_search(fs_info, p, level, slot, key->objectid);
2437 btrfs_release_path(p);
2440 tmp = read_tree_block(fs_info, blocknr, 0);
2443 * If the read above didn't mark this buffer up to date,
2444 * it will never end up being up to date. Set ret to EIO now
2445 * and give up so that our caller doesn't loop forever
2448 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2450 free_extent_buffer(tmp);
2458 * helper function for btrfs_search_slot. This does all of the checks
2459 * for node-level blocks and does any balancing required based on
2462 * If no extra work was required, zero is returned. If we had to
2463 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2467 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2468 struct btrfs_root *root, struct btrfs_path *p,
2469 struct extent_buffer *b, int level, int ins_len,
2470 int *write_lock_level)
2472 struct btrfs_fs_info *fs_info = root->fs_info;
2475 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2476 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2479 if (*write_lock_level < level + 1) {
2480 *write_lock_level = level + 1;
2481 btrfs_release_path(p);
2485 btrfs_set_path_blocking(p);
2486 reada_for_balance(fs_info, p, level);
2487 sret = split_node(trans, root, p, level);
2488 btrfs_clear_path_blocking(p, NULL, 0);
2495 b = p->nodes[level];
2496 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2497 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2500 if (*write_lock_level < level + 1) {
2501 *write_lock_level = level + 1;
2502 btrfs_release_path(p);
2506 btrfs_set_path_blocking(p);
2507 reada_for_balance(fs_info, p, level);
2508 sret = balance_level(trans, root, p, level);
2509 btrfs_clear_path_blocking(p, NULL, 0);
2515 b = p->nodes[level];
2517 btrfs_release_path(p);
2520 BUG_ON(btrfs_header_nritems(b) == 1);
2530 static void key_search_validate(struct extent_buffer *b,
2531 const struct btrfs_key *key,
2534 #ifdef CONFIG_BTRFS_ASSERT
2535 struct btrfs_disk_key disk_key;
2537 btrfs_cpu_key_to_disk(&disk_key, key);
2540 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2541 offsetof(struct btrfs_leaf, items[0].key),
2544 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2545 offsetof(struct btrfs_node, ptrs[0].key),
2550 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2551 int level, int *prev_cmp, int *slot)
2553 if (*prev_cmp != 0) {
2554 *prev_cmp = btrfs_bin_search(b, key, level, slot);
2558 key_search_validate(b, key, level);
2564 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2565 u64 iobjectid, u64 ioff, u8 key_type,
2566 struct btrfs_key *found_key)
2569 struct btrfs_key key;
2570 struct extent_buffer *eb;
2575 key.type = key_type;
2576 key.objectid = iobjectid;
2579 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2583 eb = path->nodes[0];
2584 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2585 ret = btrfs_next_leaf(fs_root, path);
2588 eb = path->nodes[0];
2591 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2592 if (found_key->type != key.type ||
2593 found_key->objectid != key.objectid)
2600 * btrfs_search_slot - look for a key in a tree and perform necessary
2601 * modifications to preserve tree invariants.
2603 * @trans: Handle of transaction, used when modifying the tree
2604 * @p: Holds all btree nodes along the search path
2605 * @root: The root node of the tree
2606 * @key: The key we are looking for
2607 * @ins_len: Indicates purpose of search, for inserts it is 1, for
2608 * deletions it's -1. 0 for plain searches
2609 * @cow: boolean should CoW operations be performed. Must always be 1
2610 * when modifying the tree.
2612 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2613 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2615 * If @key is found, 0 is returned and you can find the item in the leaf level
2616 * of the path (level 0)
2618 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2619 * points to the slot where it should be inserted
2621 * If an error is encountered while searching the tree a negative error number
2624 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2625 const struct btrfs_key *key, struct btrfs_path *p,
2626 int ins_len, int cow)
2628 struct btrfs_fs_info *fs_info = root->fs_info;
2629 struct extent_buffer *b;
2634 int lowest_unlock = 1;
2636 /* everything at write_lock_level or lower must be write locked */
2637 int write_lock_level = 0;
2638 u8 lowest_level = 0;
2639 int min_write_lock_level;
2642 lowest_level = p->lowest_level;
2643 WARN_ON(lowest_level && ins_len > 0);
2644 WARN_ON(p->nodes[0] != NULL);
2645 BUG_ON(!cow && ins_len);
2650 /* when we are removing items, we might have to go up to level
2651 * two as we update tree pointers Make sure we keep write
2652 * for those levels as well
2654 write_lock_level = 2;
2655 } else if (ins_len > 0) {
2657 * for inserting items, make sure we have a write lock on
2658 * level 1 so we can update keys
2660 write_lock_level = 1;
2664 write_lock_level = -1;
2666 if (cow && (p->keep_locks || p->lowest_level))
2667 write_lock_level = BTRFS_MAX_LEVEL;
2669 min_write_lock_level = write_lock_level;
2674 * we try very hard to do read locks on the root
2676 root_lock = BTRFS_READ_LOCK;
2678 if (p->search_commit_root) {
2680 * the commit roots are read only
2681 * so we always do read locks
2683 if (p->need_commit_sem)
2684 down_read(&fs_info->commit_root_sem);
2685 b = root->commit_root;
2686 extent_buffer_get(b);
2687 level = btrfs_header_level(b);
2688 if (p->need_commit_sem)
2689 up_read(&fs_info->commit_root_sem);
2690 if (!p->skip_locking)
2691 btrfs_tree_read_lock(b);
2693 if (p->skip_locking) {
2694 b = btrfs_root_node(root);
2695 level = btrfs_header_level(b);
2697 /* we don't know the level of the root node
2698 * until we actually have it read locked
2700 b = btrfs_read_lock_root_node(root);
2701 level = btrfs_header_level(b);
2702 if (level <= write_lock_level) {
2703 /* whoops, must trade for write lock */
2704 btrfs_tree_read_unlock(b);
2705 free_extent_buffer(b);
2706 b = btrfs_lock_root_node(root);
2707 root_lock = BTRFS_WRITE_LOCK;
2709 /* the level might have changed, check again */
2710 level = btrfs_header_level(b);
2714 p->nodes[level] = b;
2715 if (!p->skip_locking)
2716 p->locks[level] = root_lock;
2719 level = btrfs_header_level(b);
2722 * setup the path here so we can release it under lock
2723 * contention with the cow code
2726 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2729 * if we don't really need to cow this block
2730 * then we don't want to set the path blocking,
2731 * so we test it here
2733 if (!should_cow_block(trans, root, b)) {
2734 trans->dirty = true;
2739 * must have write locks on this node and the
2742 if (level > write_lock_level ||
2743 (level + 1 > write_lock_level &&
2744 level + 1 < BTRFS_MAX_LEVEL &&
2745 p->nodes[level + 1])) {
2746 write_lock_level = level + 1;
2747 btrfs_release_path(p);
2751 btrfs_set_path_blocking(p);
2753 err = btrfs_cow_block(trans, root, b, NULL, 0,
2756 err = btrfs_cow_block(trans, root, b,
2757 p->nodes[level + 1],
2758 p->slots[level + 1], &b);
2765 p->nodes[level] = b;
2766 btrfs_clear_path_blocking(p, NULL, 0);
2769 * we have a lock on b and as long as we aren't changing
2770 * the tree, there is no way to for the items in b to change.
2771 * It is safe to drop the lock on our parent before we
2772 * go through the expensive btree search on b.
2774 * If we're inserting or deleting (ins_len != 0), then we might
2775 * be changing slot zero, which may require changing the parent.
2776 * So, we can't drop the lock until after we know which slot
2777 * we're operating on.
2779 if (!ins_len && !p->keep_locks) {
2782 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2783 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2788 ret = key_search(b, key, level, &prev_cmp, &slot);
2794 if (ret && slot > 0) {
2798 p->slots[level] = slot;
2799 err = setup_nodes_for_search(trans, root, p, b, level,
2800 ins_len, &write_lock_level);
2807 b = p->nodes[level];
2808 slot = p->slots[level];
2811 * slot 0 is special, if we change the key
2812 * we have to update the parent pointer
2813 * which means we must have a write lock
2816 if (slot == 0 && ins_len &&
2817 write_lock_level < level + 1) {
2818 write_lock_level = level + 1;
2819 btrfs_release_path(p);
2823 unlock_up(p, level, lowest_unlock,
2824 min_write_lock_level, &write_lock_level);
2826 if (level == lowest_level) {
2832 err = read_block_for_search(root, p, &b, level,
2841 if (!p->skip_locking) {
2842 level = btrfs_header_level(b);
2843 if (level <= write_lock_level) {
2844 err = btrfs_try_tree_write_lock(b);
2846 btrfs_set_path_blocking(p);
2848 btrfs_clear_path_blocking(p, b,
2851 p->locks[level] = BTRFS_WRITE_LOCK;
2853 err = btrfs_tree_read_lock_atomic(b);
2855 btrfs_set_path_blocking(p);
2856 btrfs_tree_read_lock(b);
2857 btrfs_clear_path_blocking(p, b,
2860 p->locks[level] = BTRFS_READ_LOCK;
2862 p->nodes[level] = b;
2865 p->slots[level] = slot;
2867 btrfs_leaf_free_space(fs_info, b) < ins_len) {
2868 if (write_lock_level < 1) {
2869 write_lock_level = 1;
2870 btrfs_release_path(p);
2874 btrfs_set_path_blocking(p);
2875 err = split_leaf(trans, root, key,
2876 p, ins_len, ret == 0);
2877 btrfs_clear_path_blocking(p, NULL, 0);
2885 if (!p->search_for_split)
2886 unlock_up(p, level, lowest_unlock,
2887 min_write_lock_level, &write_lock_level);
2894 * we don't really know what they plan on doing with the path
2895 * from here on, so for now just mark it as blocking
2897 if (!p->leave_spinning)
2898 btrfs_set_path_blocking(p);
2899 if (ret < 0 && !p->skip_release_on_error)
2900 btrfs_release_path(p);
2905 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2906 * current state of the tree together with the operations recorded in the tree
2907 * modification log to search for the key in a previous version of this tree, as
2908 * denoted by the time_seq parameter.
2910 * Naturally, there is no support for insert, delete or cow operations.
2912 * The resulting path and return value will be set up as if we called
2913 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2915 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2916 struct btrfs_path *p, u64 time_seq)
2918 struct btrfs_fs_info *fs_info = root->fs_info;
2919 struct extent_buffer *b;
2924 int lowest_unlock = 1;
2925 u8 lowest_level = 0;
2928 lowest_level = p->lowest_level;
2929 WARN_ON(p->nodes[0] != NULL);
2931 if (p->search_commit_root) {
2933 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2937 b = get_old_root(root, time_seq);
2938 level = btrfs_header_level(b);
2939 p->locks[level] = BTRFS_READ_LOCK;
2942 level = btrfs_header_level(b);
2943 p->nodes[level] = b;
2944 btrfs_clear_path_blocking(p, NULL, 0);
2947 * we have a lock on b and as long as we aren't changing
2948 * the tree, there is no way to for the items in b to change.
2949 * It is safe to drop the lock on our parent before we
2950 * go through the expensive btree search on b.
2952 btrfs_unlock_up_safe(p, level + 1);
2955 * Since we can unwind ebs we want to do a real search every
2959 ret = key_search(b, key, level, &prev_cmp, &slot);
2963 if (ret && slot > 0) {
2967 p->slots[level] = slot;
2968 unlock_up(p, level, lowest_unlock, 0, NULL);
2970 if (level == lowest_level) {
2976 err = read_block_for_search(root, p, &b, level,
2985 level = btrfs_header_level(b);
2986 err = btrfs_tree_read_lock_atomic(b);
2988 btrfs_set_path_blocking(p);
2989 btrfs_tree_read_lock(b);
2990 btrfs_clear_path_blocking(p, b,
2993 b = tree_mod_log_rewind(fs_info, p, b, time_seq);
2998 p->locks[level] = BTRFS_READ_LOCK;
2999 p->nodes[level] = b;
3001 p->slots[level] = slot;
3002 unlock_up(p, level, lowest_unlock, 0, NULL);
3008 if (!p->leave_spinning)
3009 btrfs_set_path_blocking(p);
3011 btrfs_release_path(p);
3017 * helper to use instead of search slot if no exact match is needed but
3018 * instead the next or previous item should be returned.
3019 * When find_higher is true, the next higher item is returned, the next lower
3021 * When return_any and find_higher are both true, and no higher item is found,
3022 * return the next lower instead.
3023 * When return_any is true and find_higher is false, and no lower item is found,
3024 * return the next higher instead.
3025 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3028 int btrfs_search_slot_for_read(struct btrfs_root *root,
3029 const struct btrfs_key *key,
3030 struct btrfs_path *p, int find_higher,
3034 struct extent_buffer *leaf;
3037 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3041 * a return value of 1 means the path is at the position where the
3042 * item should be inserted. Normally this is the next bigger item,
3043 * but in case the previous item is the last in a leaf, path points
3044 * to the first free slot in the previous leaf, i.e. at an invalid
3050 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3051 ret = btrfs_next_leaf(root, p);
3057 * no higher item found, return the next
3062 btrfs_release_path(p);
3066 if (p->slots[0] == 0) {
3067 ret = btrfs_prev_leaf(root, p);
3072 if (p->slots[0] == btrfs_header_nritems(leaf))
3079 * no lower item found, return the next
3084 btrfs_release_path(p);
3094 * adjust the pointers going up the tree, starting at level
3095 * making sure the right key of each node is points to 'key'.
3096 * This is used after shifting pointers to the left, so it stops
3097 * fixing up pointers when a given leaf/node is not in slot 0 of the
3101 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3102 struct btrfs_path *path,
3103 struct btrfs_disk_key *key, int level)
3106 struct extent_buffer *t;
3109 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3110 int tslot = path->slots[i];
3112 if (!path->nodes[i])
3115 ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3118 btrfs_set_node_key(t, key, tslot);
3119 btrfs_mark_buffer_dirty(path->nodes[i]);
3128 * This function isn't completely safe. It's the caller's responsibility
3129 * that the new key won't break the order
3131 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3132 struct btrfs_path *path,
3133 const struct btrfs_key *new_key)
3135 struct btrfs_disk_key disk_key;
3136 struct extent_buffer *eb;
3139 eb = path->nodes[0];
3140 slot = path->slots[0];
3142 btrfs_item_key(eb, &disk_key, slot - 1);
3143 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3145 if (slot < btrfs_header_nritems(eb) - 1) {
3146 btrfs_item_key(eb, &disk_key, slot + 1);
3147 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3150 btrfs_cpu_key_to_disk(&disk_key, new_key);
3151 btrfs_set_item_key(eb, &disk_key, slot);
3152 btrfs_mark_buffer_dirty(eb);
3154 fixup_low_keys(fs_info, path, &disk_key, 1);
3158 * try to push data from one node into the next node left in the
3161 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3162 * error, and > 0 if there was no room in the left hand block.
3164 static int push_node_left(struct btrfs_trans_handle *trans,
3165 struct btrfs_fs_info *fs_info,
3166 struct extent_buffer *dst,
3167 struct extent_buffer *src, int empty)
3174 src_nritems = btrfs_header_nritems(src);
3175 dst_nritems = btrfs_header_nritems(dst);
3176 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3177 WARN_ON(btrfs_header_generation(src) != trans->transid);
3178 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3180 if (!empty && src_nritems <= 8)
3183 if (push_items <= 0)
3187 push_items = min(src_nritems, push_items);
3188 if (push_items < src_nritems) {
3189 /* leave at least 8 pointers in the node if
3190 * we aren't going to empty it
3192 if (src_nritems - push_items < 8) {
3193 if (push_items <= 8)
3199 push_items = min(src_nritems - 8, push_items);
3201 ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3204 btrfs_abort_transaction(trans, ret);
3207 copy_extent_buffer(dst, src,
3208 btrfs_node_key_ptr_offset(dst_nritems),
3209 btrfs_node_key_ptr_offset(0),
3210 push_items * sizeof(struct btrfs_key_ptr));
3212 if (push_items < src_nritems) {
3214 * Don't call tree_mod_log_insert_move here, key removal was
3215 * already fully logged by tree_mod_log_eb_copy above.
3217 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3218 btrfs_node_key_ptr_offset(push_items),
3219 (src_nritems - push_items) *
3220 sizeof(struct btrfs_key_ptr));
3222 btrfs_set_header_nritems(src, src_nritems - push_items);
3223 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3224 btrfs_mark_buffer_dirty(src);
3225 btrfs_mark_buffer_dirty(dst);
3231 * try to push data from one node into the next node right in the
3234 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3235 * error, and > 0 if there was no room in the right hand block.
3237 * this will only push up to 1/2 the contents of the left node over
3239 static int balance_node_right(struct btrfs_trans_handle *trans,
3240 struct btrfs_fs_info *fs_info,
3241 struct extent_buffer *dst,
3242 struct extent_buffer *src)
3250 WARN_ON(btrfs_header_generation(src) != trans->transid);
3251 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3253 src_nritems = btrfs_header_nritems(src);
3254 dst_nritems = btrfs_header_nritems(dst);
3255 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3256 if (push_items <= 0)
3259 if (src_nritems < 4)
3262 max_push = src_nritems / 2 + 1;
3263 /* don't try to empty the node */
3264 if (max_push >= src_nritems)
3267 if (max_push < push_items)
3268 push_items = max_push;
3270 ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3272 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3273 btrfs_node_key_ptr_offset(0),
3275 sizeof(struct btrfs_key_ptr));
3277 ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3278 src_nritems - push_items, push_items);
3280 btrfs_abort_transaction(trans, ret);
3283 copy_extent_buffer(dst, src,
3284 btrfs_node_key_ptr_offset(0),
3285 btrfs_node_key_ptr_offset(src_nritems - push_items),
3286 push_items * sizeof(struct btrfs_key_ptr));
3288 btrfs_set_header_nritems(src, src_nritems - push_items);
3289 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3291 btrfs_mark_buffer_dirty(src);
3292 btrfs_mark_buffer_dirty(dst);
3298 * helper function to insert a new root level in the tree.
3299 * A new node is allocated, and a single item is inserted to
3300 * point to the existing root
3302 * returns zero on success or < 0 on failure.
3304 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3305 struct btrfs_root *root,
3306 struct btrfs_path *path, int level)
3308 struct btrfs_fs_info *fs_info = root->fs_info;
3310 struct extent_buffer *lower;
3311 struct extent_buffer *c;
3312 struct extent_buffer *old;
3313 struct btrfs_disk_key lower_key;
3316 BUG_ON(path->nodes[level]);
3317 BUG_ON(path->nodes[level-1] != root->node);
3319 lower = path->nodes[level-1];
3321 btrfs_item_key(lower, &lower_key, 0);
3323 btrfs_node_key(lower, &lower_key, 0);
3325 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3326 &lower_key, level, root->node->start, 0);
3330 root_add_used(root, fs_info->nodesize);
3332 memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
3333 btrfs_set_header_nritems(c, 1);
3334 btrfs_set_header_level(c, level);
3335 btrfs_set_header_bytenr(c, c->start);
3336 btrfs_set_header_generation(c, trans->transid);
3337 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3338 btrfs_set_header_owner(c, root->root_key.objectid);
3340 write_extent_buffer_fsid(c, fs_info->fsid);
3341 write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
3343 btrfs_set_node_key(c, &lower_key, 0);
3344 btrfs_set_node_blockptr(c, 0, lower->start);
3345 lower_gen = btrfs_header_generation(lower);
3346 WARN_ON(lower_gen != trans->transid);
3348 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3350 btrfs_mark_buffer_dirty(c);
3353 ret = tree_mod_log_insert_root(root->node, c, 0);
3355 rcu_assign_pointer(root->node, c);
3357 /* the super has an extra ref to root->node */
3358 free_extent_buffer(old);
3360 add_root_to_dirty_list(root);
3361 extent_buffer_get(c);
3362 path->nodes[level] = c;
3363 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3364 path->slots[level] = 0;
3369 * worker function to insert a single pointer in a node.
3370 * the node should have enough room for the pointer already
3372 * slot and level indicate where you want the key to go, and
3373 * blocknr is the block the key points to.
3375 static void insert_ptr(struct btrfs_trans_handle *trans,
3376 struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3377 struct btrfs_disk_key *key, u64 bytenr,
3378 int slot, int level)
3380 struct extent_buffer *lower;
3384 BUG_ON(!path->nodes[level]);
3385 btrfs_assert_tree_locked(path->nodes[level]);
3386 lower = path->nodes[level];
3387 nritems = btrfs_header_nritems(lower);
3388 BUG_ON(slot > nritems);
3389 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3390 if (slot != nritems) {
3392 ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3396 memmove_extent_buffer(lower,
3397 btrfs_node_key_ptr_offset(slot + 1),
3398 btrfs_node_key_ptr_offset(slot),
3399 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3402 ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3406 btrfs_set_node_key(lower, key, slot);
3407 btrfs_set_node_blockptr(lower, slot, bytenr);
3408 WARN_ON(trans->transid == 0);
3409 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3410 btrfs_set_header_nritems(lower, nritems + 1);
3411 btrfs_mark_buffer_dirty(lower);
3415 * split the node at the specified level in path in two.
3416 * The path is corrected to point to the appropriate node after the split
3418 * Before splitting this tries to make some room in the node by pushing
3419 * left and right, if either one works, it returns right away.
3421 * returns 0 on success and < 0 on failure
3423 static noinline int split_node(struct btrfs_trans_handle *trans,
3424 struct btrfs_root *root,
3425 struct btrfs_path *path, int level)
3427 struct btrfs_fs_info *fs_info = root->fs_info;
3428 struct extent_buffer *c;
3429 struct extent_buffer *split;
3430 struct btrfs_disk_key disk_key;
3435 c = path->nodes[level];
3436 WARN_ON(btrfs_header_generation(c) != trans->transid);
3437 if (c == root->node) {
3439 * trying to split the root, lets make a new one
3441 * tree mod log: We don't log_removal old root in
3442 * insert_new_root, because that root buffer will be kept as a
3443 * normal node. We are going to log removal of half of the
3444 * elements below with tree_mod_log_eb_copy. We're holding a
3445 * tree lock on the buffer, which is why we cannot race with
3446 * other tree_mod_log users.
3448 ret = insert_new_root(trans, root, path, level + 1);
3452 ret = push_nodes_for_insert(trans, root, path, level);
3453 c = path->nodes[level];
3454 if (!ret && btrfs_header_nritems(c) <
3455 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3461 c_nritems = btrfs_header_nritems(c);
3462 mid = (c_nritems + 1) / 2;
3463 btrfs_node_key(c, &disk_key, mid);
3465 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3466 &disk_key, level, c->start, 0);
3468 return PTR_ERR(split);
3470 root_add_used(root, fs_info->nodesize);
3472 memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
3473 btrfs_set_header_level(split, btrfs_header_level(c));
3474 btrfs_set_header_bytenr(split, split->start);
3475 btrfs_set_header_generation(split, trans->transid);
3476 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3477 btrfs_set_header_owner(split, root->root_key.objectid);
3478 write_extent_buffer_fsid(split, fs_info->fsid);
3479 write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
3481 ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3483 btrfs_abort_transaction(trans, ret);
3486 copy_extent_buffer(split, c,
3487 btrfs_node_key_ptr_offset(0),
3488 btrfs_node_key_ptr_offset(mid),
3489 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3490 btrfs_set_header_nritems(split, c_nritems - mid);
3491 btrfs_set_header_nritems(c, mid);
3494 btrfs_mark_buffer_dirty(c);
3495 btrfs_mark_buffer_dirty(split);
3497 insert_ptr(trans, fs_info, path, &disk_key, split->start,
3498 path->slots[level + 1] + 1, level + 1);
3500 if (path->slots[level] >= mid) {
3501 path->slots[level] -= mid;
3502 btrfs_tree_unlock(c);
3503 free_extent_buffer(c);
3504 path->nodes[level] = split;
3505 path->slots[level + 1] += 1;
3507 btrfs_tree_unlock(split);
3508 free_extent_buffer(split);
3514 * how many bytes are required to store the items in a leaf. start
3515 * and nr indicate which items in the leaf to check. This totals up the
3516 * space used both by the item structs and the item data
3518 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3520 struct btrfs_item *start_item;
3521 struct btrfs_item *end_item;
3522 struct btrfs_map_token token;
3524 int nritems = btrfs_header_nritems(l);
3525 int end = min(nritems, start + nr) - 1;
3529 btrfs_init_map_token(&token);
3530 start_item = btrfs_item_nr(start);
3531 end_item = btrfs_item_nr(end);
3532 data_len = btrfs_token_item_offset(l, start_item, &token) +
3533 btrfs_token_item_size(l, start_item, &token);
3534 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3535 data_len += sizeof(struct btrfs_item) * nr;
3536 WARN_ON(data_len < 0);
3541 * The space between the end of the leaf items and
3542 * the start of the leaf data. IOW, how much room
3543 * the leaf has left for both items and data
3545 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3546 struct extent_buffer *leaf)
3548 int nritems = btrfs_header_nritems(leaf);
3551 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3554 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3556 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3557 leaf_space_used(leaf, 0, nritems), nritems);
3563 * min slot controls the lowest index we're willing to push to the
3564 * right. We'll push up to and including min_slot, but no lower
3566 static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3567 struct btrfs_path *path,
3568 int data_size, int empty,
3569 struct extent_buffer *right,
3570 int free_space, u32 left_nritems,
3573 struct extent_buffer *left = path->nodes[0];
3574 struct extent_buffer *upper = path->nodes[1];
3575 struct btrfs_map_token token;
3576 struct btrfs_disk_key disk_key;
3581 struct btrfs_item *item;
3587 btrfs_init_map_token(&token);
3592 nr = max_t(u32, 1, min_slot);
3594 if (path->slots[0] >= left_nritems)
3595 push_space += data_size;
3597 slot = path->slots[1];
3598 i = left_nritems - 1;
3600 item = btrfs_item_nr(i);
3602 if (!empty && push_items > 0) {
3603 if (path->slots[0] > i)
3605 if (path->slots[0] == i) {
3606 int space = btrfs_leaf_free_space(fs_info, left);
3607 if (space + push_space * 2 > free_space)
3612 if (path->slots[0] == i)
3613 push_space += data_size;
3615 this_item_size = btrfs_item_size(left, item);
3616 if (this_item_size + sizeof(*item) + push_space > free_space)
3620 push_space += this_item_size + sizeof(*item);
3626 if (push_items == 0)
3629 WARN_ON(!empty && push_items == left_nritems);
3631 /* push left to right */
3632 right_nritems = btrfs_header_nritems(right);
3634 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3635 push_space -= leaf_data_end(fs_info, left);
3637 /* make room in the right data area */
3638 data_end = leaf_data_end(fs_info, right);
3639 memmove_extent_buffer(right,
3640 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3641 BTRFS_LEAF_DATA_OFFSET + data_end,
3642 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3644 /* copy from the left data area */
3645 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3646 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3647 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3650 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3651 btrfs_item_nr_offset(0),
3652 right_nritems * sizeof(struct btrfs_item));
3654 /* copy the items from left to right */
3655 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3656 btrfs_item_nr_offset(left_nritems - push_items),
3657 push_items * sizeof(struct btrfs_item));
3659 /* update the item pointers */
3660 right_nritems += push_items;
3661 btrfs_set_header_nritems(right, right_nritems);
3662 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3663 for (i = 0; i < right_nritems; i++) {
3664 item = btrfs_item_nr(i);
3665 push_space -= btrfs_token_item_size(right, item, &token);
3666 btrfs_set_token_item_offset(right, item, push_space, &token);
3669 left_nritems -= push_items;
3670 btrfs_set_header_nritems(left, left_nritems);
3673 btrfs_mark_buffer_dirty(left);
3675 clean_tree_block(fs_info, left);
3677 btrfs_mark_buffer_dirty(right);
3679 btrfs_item_key(right, &disk_key, 0);
3680 btrfs_set_node_key(upper, &disk_key, slot + 1);
3681 btrfs_mark_buffer_dirty(upper);
3683 /* then fixup the leaf pointer in the path */
3684 if (path->slots[0] >= left_nritems) {
3685 path->slots[0] -= left_nritems;
3686 if (btrfs_header_nritems(path->nodes[0]) == 0)
3687 clean_tree_block(fs_info, path->nodes[0]);
3688 btrfs_tree_unlock(path->nodes[0]);
3689 free_extent_buffer(path->nodes[0]);
3690 path->nodes[0] = right;
3691 path->slots[1] += 1;
3693 btrfs_tree_unlock(right);
3694 free_extent_buffer(right);
3699 btrfs_tree_unlock(right);
3700 free_extent_buffer(right);
3705 * push some data in the path leaf to the right, trying to free up at
3706 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3708 * returns 1 if the push failed because the other node didn't have enough
3709 * room, 0 if everything worked out and < 0 if there were major errors.
3711 * this will push starting from min_slot to the end of the leaf. It won't
3712 * push any slot lower than min_slot
3714 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3715 *root, struct btrfs_path *path,
3716 int min_data_size, int data_size,
3717 int empty, u32 min_slot)
3719 struct btrfs_fs_info *fs_info = root->fs_info;
3720 struct extent_buffer *left = path->nodes[0];
3721 struct extent_buffer *right;
3722 struct extent_buffer *upper;
3728 if (!path->nodes[1])
3731 slot = path->slots[1];
3732 upper = path->nodes[1];
3733 if (slot >= btrfs_header_nritems(upper) - 1)
3736 btrfs_assert_tree_locked(path->nodes[1]);
3738 right = read_node_slot(fs_info, upper, slot + 1);
3740 * slot + 1 is not valid or we fail to read the right node,
3741 * no big deal, just return.
3746 btrfs_tree_lock(right);
3747 btrfs_set_lock_blocking(right);
3749 free_space = btrfs_leaf_free_space(fs_info, right);
3750 if (free_space < data_size)
3753 /* cow and double check */
3754 ret = btrfs_cow_block(trans, root, right, upper,
3759 free_space = btrfs_leaf_free_space(fs_info, right);
3760 if (free_space < data_size)
3763 left_nritems = btrfs_header_nritems(left);
3764 if (left_nritems == 0)
3767 if (path->slots[0] == left_nritems && !empty) {
3768 /* Key greater than all keys in the leaf, right neighbor has
3769 * enough room for it and we're not emptying our leaf to delete
3770 * it, therefore use right neighbor to insert the new item and
3771 * no need to touch/dirty our left leaft. */
3772 btrfs_tree_unlock(left);
3773 free_extent_buffer(left);
3774 path->nodes[0] = right;
3780 return __push_leaf_right(fs_info, path, min_data_size, empty,
3781 right, free_space, left_nritems, min_slot);
3783 btrfs_tree_unlock(right);
3784 free_extent_buffer(right);
3789 * push some data in the path leaf to the left, trying to free up at
3790 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3792 * max_slot can put a limit on how far into the leaf we'll push items. The
3793 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3796 static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3797 struct btrfs_path *path, int data_size,
3798 int empty, struct extent_buffer *left,
3799 int free_space, u32 right_nritems,
3802 struct btrfs_disk_key disk_key;
3803 struct extent_buffer *right = path->nodes[0];
3807 struct btrfs_item *item;
3808 u32 old_left_nritems;
3812 u32 old_left_item_size;
3813 struct btrfs_map_token token;
3815 btrfs_init_map_token(&token);
3818 nr = min(right_nritems, max_slot);
3820 nr = min(right_nritems - 1, max_slot);
3822 for (i = 0; i < nr; i++) {
3823 item = btrfs_item_nr(i);
3825 if (!empty && push_items > 0) {
3826 if (path->slots[0] < i)
3828 if (path->slots[0] == i) {
3829 int space = btrfs_leaf_free_space(fs_info, right);
3830 if (space + push_space * 2 > free_space)
3835 if (path->slots[0] == i)
3836 push_space += data_size;
3838 this_item_size = btrfs_item_size(right, item);
3839 if (this_item_size + sizeof(*item) + push_space > free_space)
3843 push_space += this_item_size + sizeof(*item);
3846 if (push_items == 0) {
3850 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3852 /* push data from right to left */
3853 copy_extent_buffer(left, right,
3854 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3855 btrfs_item_nr_offset(0),
3856 push_items * sizeof(struct btrfs_item));
3858 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3859 btrfs_item_offset_nr(right, push_items - 1);
3861 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3862 leaf_data_end(fs_info, left) - push_space,
3863 BTRFS_LEAF_DATA_OFFSET +
3864 btrfs_item_offset_nr(right, push_items - 1),
3866 old_left_nritems = btrfs_header_nritems(left);
3867 BUG_ON(old_left_nritems <= 0);
3869 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3870 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3873 item = btrfs_item_nr(i);
3875 ioff = btrfs_token_item_offset(left, item, &token);
3876 btrfs_set_token_item_offset(left, item,
3877 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3880 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3882 /* fixup right node */
3883 if (push_items > right_nritems)
3884 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3887 if (push_items < right_nritems) {
3888 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3889 leaf_data_end(fs_info, right);
3890 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3891 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3892 BTRFS_LEAF_DATA_OFFSET +
3893 leaf_data_end(fs_info, right), push_space);
3895 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3896 btrfs_item_nr_offset(push_items),
3897 (btrfs_header_nritems(right) - push_items) *
3898 sizeof(struct btrfs_item));
3900 right_nritems -= push_items;
3901 btrfs_set_header_nritems(right, right_nritems);
3902 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3903 for (i = 0; i < right_nritems; i++) {
3904 item = btrfs_item_nr(i);
3906 push_space = push_space - btrfs_token_item_size(right,
3908 btrfs_set_token_item_offset(right, item, push_space, &token);
3911 btrfs_mark_buffer_dirty(left);
3913 btrfs_mark_buffer_dirty(right);
3915 clean_tree_block(fs_info, right);
3917 btrfs_item_key(right, &disk_key, 0);
3918 fixup_low_keys(fs_info, path, &disk_key, 1);
3920 /* then fixup the leaf pointer in the path */
3921 if (path->slots[0] < push_items) {
3922 path->slots[0] += old_left_nritems;
3923 btrfs_tree_unlock(path->nodes[0]);
3924 free_extent_buffer(path->nodes[0]);
3925 path->nodes[0] = left;
3926 path->slots[1] -= 1;
3928 btrfs_tree_unlock(left);
3929 free_extent_buffer(left);
3930 path->slots[0] -= push_items;
3932 BUG_ON(path->slots[0] < 0);
3935 btrfs_tree_unlock(left);
3936 free_extent_buffer(left);
3941 * push some data in the path leaf to the left, trying to free up at
3942 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3944 * max_slot can put a limit on how far into the leaf we'll push items. The
3945 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3948 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3949 *root, struct btrfs_path *path, int min_data_size,
3950 int data_size, int empty, u32 max_slot)
3952 struct btrfs_fs_info *fs_info = root->fs_info;
3953 struct extent_buffer *right = path->nodes[0];
3954 struct extent_buffer *left;
3960 slot = path->slots[1];
3963 if (!path->nodes[1])
3966 right_nritems = btrfs_header_nritems(right);
3967 if (right_nritems == 0)
3970 btrfs_assert_tree_locked(path->nodes[1]);
3972 left = read_node_slot(fs_info, path->nodes[1], slot - 1);
3974 * slot - 1 is not valid or we fail to read the left node,
3975 * no big deal, just return.
3980 btrfs_tree_lock(left);
3981 btrfs_set_lock_blocking(left);
3983 free_space = btrfs_leaf_free_space(fs_info, left);
3984 if (free_space < data_size) {
3989 /* cow and double check */
3990 ret = btrfs_cow_block(trans, root, left,
3991 path->nodes[1], slot - 1, &left);
3993 /* we hit -ENOSPC, but it isn't fatal here */
3999 free_space = btrfs_leaf_free_space(fs_info, left);
4000 if (free_space < data_size) {
4005 return __push_leaf_left(fs_info, path, min_data_size,
4006 empty, left, free_space, right_nritems,
4009 btrfs_tree_unlock(left);
4010 free_extent_buffer(left);
4015 * split the path's leaf in two, making sure there is at least data_size
4016 * available for the resulting leaf level of the path.
4018 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4019 struct btrfs_fs_info *fs_info,
4020 struct btrfs_path *path,
4021 struct extent_buffer *l,
4022 struct extent_buffer *right,
4023 int slot, int mid, int nritems)
4028 struct btrfs_disk_key disk_key;
4029 struct btrfs_map_token token;
4031 btrfs_init_map_token(&token);
4033 nritems = nritems - mid;
4034 btrfs_set_header_nritems(right, nritems);
4035 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4037 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4038 btrfs_item_nr_offset(mid),
4039 nritems * sizeof(struct btrfs_item));
4041 copy_extent_buffer(right, l,
4042 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4043 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4044 leaf_data_end(fs_info, l), data_copy_size);
4046 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4048 for (i = 0; i < nritems; i++) {
4049 struct btrfs_item *item = btrfs_item_nr(i);
4052 ioff = btrfs_token_item_offset(right, item, &token);
4053 btrfs_set_token_item_offset(right, item,
4054 ioff + rt_data_off, &token);
4057 btrfs_set_header_nritems(l, mid);
4058 btrfs_item_key(right, &disk_key, 0);
4059 insert_ptr(trans, fs_info, path, &disk_key, right->start,
4060 path->slots[1] + 1, 1);
4062 btrfs_mark_buffer_dirty(right);
4063 btrfs_mark_buffer_dirty(l);
4064 BUG_ON(path->slots[0] != slot);
4067 btrfs_tree_unlock(path->nodes[0]);
4068 free_extent_buffer(path->nodes[0]);
4069 path->nodes[0] = right;
4070 path->slots[0] -= mid;
4071 path->slots[1] += 1;
4073 btrfs_tree_unlock(right);
4074 free_extent_buffer(right);
4077 BUG_ON(path->slots[0] < 0);
4081 * double splits happen when we need to insert a big item in the middle
4082 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4083 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4086 * We avoid this by trying to push the items on either side of our target
4087 * into the adjacent leaves. If all goes well we can avoid the double split
4090 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4091 struct btrfs_root *root,
4092 struct btrfs_path *path,
4095 struct btrfs_fs_info *fs_info = root->fs_info;
4100 int space_needed = data_size;
4102 slot = path->slots[0];
4103 if (slot < btrfs_header_nritems(path->nodes[0]))
4104 space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4107 * try to push all the items after our slot into the
4110 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4117 nritems = btrfs_header_nritems(path->nodes[0]);
4119 * our goal is to get our slot at the start or end of a leaf. If
4120 * we've done so we're done
4122 if (path->slots[0] == 0 || path->slots[0] == nritems)
4125 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4128 /* try to push all the items before our slot into the next leaf */
4129 slot = path->slots[0];
4130 space_needed = data_size;
4132 space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4133 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4146 * split the path's leaf in two, making sure there is at least data_size
4147 * available for the resulting leaf level of the path.
4149 * returns 0 if all went well and < 0 on failure.
4151 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4152 struct btrfs_root *root,
4153 const struct btrfs_key *ins_key,
4154 struct btrfs_path *path, int data_size,
4157 struct btrfs_disk_key disk_key;
4158 struct extent_buffer *l;
4162 struct extent_buffer *right;
4163 struct btrfs_fs_info *fs_info = root->fs_info;
4167 int num_doubles = 0;
4168 int tried_avoid_double = 0;
4171 slot = path->slots[0];
4172 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4173 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4176 /* first try to make some room by pushing left and right */
4177 if (data_size && path->nodes[1]) {
4178 int space_needed = data_size;
4180 if (slot < btrfs_header_nritems(l))
4181 space_needed -= btrfs_leaf_free_space(fs_info, l);
4183 wret = push_leaf_right(trans, root, path, space_needed,
4184 space_needed, 0, 0);
4188 space_needed = data_size;
4190 space_needed -= btrfs_leaf_free_space(fs_info,
4192 wret = push_leaf_left(trans, root, path, space_needed,
4193 space_needed, 0, (u32)-1);
4199 /* did the pushes work? */
4200 if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4204 if (!path->nodes[1]) {
4205 ret = insert_new_root(trans, root, path, 1);
4212 slot = path->slots[0];
4213 nritems = btrfs_header_nritems(l);
4214 mid = (nritems + 1) / 2;
4218 leaf_space_used(l, mid, nritems - mid) + data_size >
4219 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4220 if (slot >= nritems) {
4224 if (mid != nritems &&
4225 leaf_space_used(l, mid, nritems - mid) +
4226 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4227 if (data_size && !tried_avoid_double)
4228 goto push_for_double;
4234 if (leaf_space_used(l, 0, mid) + data_size >
4235 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4236 if (!extend && data_size && slot == 0) {
4238 } else if ((extend || !data_size) && slot == 0) {
4242 if (mid != nritems &&
4243 leaf_space_used(l, mid, nritems - mid) +
4244 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4245 if (data_size && !tried_avoid_double)
4246 goto push_for_double;
4254 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4256 btrfs_item_key(l, &disk_key, mid);
4258 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4259 &disk_key, 0, l->start, 0);
4261 return PTR_ERR(right);
4263 root_add_used(root, fs_info->nodesize);
4265 memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
4266 btrfs_set_header_bytenr(right, right->start);
4267 btrfs_set_header_generation(right, trans->transid);
4268 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4269 btrfs_set_header_owner(right, root->root_key.objectid);
4270 btrfs_set_header_level(right, 0);
4271 write_extent_buffer_fsid(right, fs_info->fsid);
4272 write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
4276 btrfs_set_header_nritems(right, 0);
4277 insert_ptr(trans, fs_info, path, &disk_key,
4278 right->start, path->slots[1] + 1, 1);
4279 btrfs_tree_unlock(path->nodes[0]);
4280 free_extent_buffer(path->nodes[0]);
4281 path->nodes[0] = right;
4283 path->slots[1] += 1;
4285 btrfs_set_header_nritems(right, 0);
4286 insert_ptr(trans, fs_info, path, &disk_key,
4287 right->start, path->slots[1], 1);
4288 btrfs_tree_unlock(path->nodes[0]);
4289 free_extent_buffer(path->nodes[0]);
4290 path->nodes[0] = right;
4292 if (path->slots[1] == 0)
4293 fixup_low_keys(fs_info, path, &disk_key, 1);
4296 * We create a new leaf 'right' for the required ins_len and
4297 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4298 * the content of ins_len to 'right'.
4303 copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4306 BUG_ON(num_doubles != 0);
4314 push_for_double_split(trans, root, path, data_size);
4315 tried_avoid_double = 1;
4316 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4321 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4322 struct btrfs_root *root,
4323 struct btrfs_path *path, int ins_len)
4325 struct btrfs_fs_info *fs_info = root->fs_info;
4326 struct btrfs_key key;
4327 struct extent_buffer *leaf;
4328 struct btrfs_file_extent_item *fi;
4333 leaf = path->nodes[0];
4334 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4336 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4337 key.type != BTRFS_EXTENT_CSUM_KEY);
4339 if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4342 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4343 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4344 fi = btrfs_item_ptr(leaf, path->slots[0],
4345 struct btrfs_file_extent_item);
4346 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4348 btrfs_release_path(path);
4350 path->keep_locks = 1;
4351 path->search_for_split = 1;
4352 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4353 path->search_for_split = 0;
4360 leaf = path->nodes[0];
4361 /* if our item isn't there, return now */
4362 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4365 /* the leaf has changed, it now has room. return now */
4366 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4369 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4370 fi = btrfs_item_ptr(leaf, path->slots[0],
4371 struct btrfs_file_extent_item);
4372 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4376 btrfs_set_path_blocking(path);
4377 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4381 path->keep_locks = 0;
4382 btrfs_unlock_up_safe(path, 1);
4385 path->keep_locks = 0;
4389 static noinline int split_item(struct btrfs_fs_info *fs_info,
4390 struct btrfs_path *path,
4391 const struct btrfs_key *new_key,
4392 unsigned long split_offset)
4394 struct extent_buffer *leaf;
4395 struct btrfs_item *item;
4396 struct btrfs_item *new_item;
4402 struct btrfs_disk_key disk_key;
4404 leaf = path->nodes[0];
4405 BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4407 btrfs_set_path_blocking(path);
4409 item = btrfs_item_nr(path->slots[0]);
4410 orig_offset = btrfs_item_offset(leaf, item);
4411 item_size = btrfs_item_size(leaf, item);
4413 buf = kmalloc(item_size, GFP_NOFS);
4417 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4418 path->slots[0]), item_size);
4420 slot = path->slots[0] + 1;
4421 nritems = btrfs_header_nritems(leaf);
4422 if (slot != nritems) {
4423 /* shift the items */
4424 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4425 btrfs_item_nr_offset(slot),
4426 (nritems - slot) * sizeof(struct btrfs_item));
4429 btrfs_cpu_key_to_disk(&disk_key, new_key);
4430 btrfs_set_item_key(leaf, &disk_key, slot);
4432 new_item = btrfs_item_nr(slot);
4434 btrfs_set_item_offset(leaf, new_item, orig_offset);
4435 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4437 btrfs_set_item_offset(leaf, item,
4438 orig_offset + item_size - split_offset);
4439 btrfs_set_item_size(leaf, item, split_offset);
4441 btrfs_set_header_nritems(leaf, nritems + 1);
4443 /* write the data for the start of the original item */
4444 write_extent_buffer(leaf, buf,
4445 btrfs_item_ptr_offset(leaf, path->slots[0]),
4448 /* write the data for the new item */
4449 write_extent_buffer(leaf, buf + split_offset,
4450 btrfs_item_ptr_offset(leaf, slot),
4451 item_size - split_offset);
4452 btrfs_mark_buffer_dirty(leaf);
4454 BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4460 * This function splits a single item into two items,
4461 * giving 'new_key' to the new item and splitting the
4462 * old one at split_offset (from the start of the item).
4464 * The path may be released by this operation. After
4465 * the split, the path is pointing to the old item. The
4466 * new item is going to be in the same node as the old one.
4468 * Note, the item being split must be smaller enough to live alone on
4469 * a tree block with room for one extra struct btrfs_item
4471 * This allows us to split the item in place, keeping a lock on the
4472 * leaf the entire time.
4474 int btrfs_split_item(struct btrfs_trans_handle *trans,
4475 struct btrfs_root *root,
4476 struct btrfs_path *path,
4477 const struct btrfs_key *new_key,
4478 unsigned long split_offset)
4481 ret = setup_leaf_for_split(trans, root, path,
4482 sizeof(struct btrfs_item));
4486 ret = split_item(root->fs_info, path, new_key, split_offset);
4491 * This function duplicate a item, giving 'new_key' to the new item.
4492 * It guarantees both items live in the same tree leaf and the new item
4493 * is contiguous with the original item.
4495 * This allows us to split file extent in place, keeping a lock on the
4496 * leaf the entire time.
4498 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4499 struct btrfs_root *root,
4500 struct btrfs_path *path,
4501 const struct btrfs_key *new_key)
4503 struct extent_buffer *leaf;
4507 leaf = path->nodes[0];
4508 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4509 ret = setup_leaf_for_split(trans, root, path,
4510 item_size + sizeof(struct btrfs_item));
4515 setup_items_for_insert(root, path, new_key, &item_size,
4516 item_size, item_size +
4517 sizeof(struct btrfs_item), 1);
4518 leaf = path->nodes[0];
4519 memcpy_extent_buffer(leaf,
4520 btrfs_item_ptr_offset(leaf, path->slots[0]),
4521 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4527 * make the item pointed to by the path smaller. new_size indicates
4528 * how small to make it, and from_end tells us if we just chop bytes
4529 * off the end of the item or if we shift the item to chop bytes off
4532 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4533 struct btrfs_path *path, u32 new_size, int from_end)
4536 struct extent_buffer *leaf;
4537 struct btrfs_item *item;
4539 unsigned int data_end;
4540 unsigned int old_data_start;
4541 unsigned int old_size;
4542 unsigned int size_diff;
4544 struct btrfs_map_token token;
4546 btrfs_init_map_token(&token);
4548 leaf = path->nodes[0];
4549 slot = path->slots[0];
4551 old_size = btrfs_item_size_nr(leaf, slot);
4552 if (old_size == new_size)
4555 nritems = btrfs_header_nritems(leaf);
4556 data_end = leaf_data_end(fs_info, leaf);
4558 old_data_start = btrfs_item_offset_nr(leaf, slot);
4560 size_diff = old_size - new_size;
4563 BUG_ON(slot >= nritems);
4566 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4568 /* first correct the data pointers */
4569 for (i = slot; i < nritems; i++) {
4571 item = btrfs_item_nr(i);
4573 ioff = btrfs_token_item_offset(leaf, item, &token);
4574 btrfs_set_token_item_offset(leaf, item,
4575 ioff + size_diff, &token);
4578 /* shift the data */
4580 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4581 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4582 data_end, old_data_start + new_size - data_end);
4584 struct btrfs_disk_key disk_key;
4587 btrfs_item_key(leaf, &disk_key, slot);
4589 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4591 struct btrfs_file_extent_item *fi;
4593 fi = btrfs_item_ptr(leaf, slot,
4594 struct btrfs_file_extent_item);
4595 fi = (struct btrfs_file_extent_item *)(
4596 (unsigned long)fi - size_diff);
4598 if (btrfs_file_extent_type(leaf, fi) ==
4599 BTRFS_FILE_EXTENT_INLINE) {
4600 ptr = btrfs_item_ptr_offset(leaf, slot);
4601 memmove_extent_buffer(leaf, ptr,
4603 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4607 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4608 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4609 data_end, old_data_start - data_end);
4611 offset = btrfs_disk_key_offset(&disk_key);
4612 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4613 btrfs_set_item_key(leaf, &disk_key, slot);
4615 fixup_low_keys(fs_info, path, &disk_key, 1);
4618 item = btrfs_item_nr(slot);
4619 btrfs_set_item_size(leaf, item, new_size);
4620 btrfs_mark_buffer_dirty(leaf);
4622 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4623 btrfs_print_leaf(leaf);
4629 * make the item pointed to by the path bigger, data_size is the added size.
4631 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4635 struct extent_buffer *leaf;
4636 struct btrfs_item *item;
4638 unsigned int data_end;
4639 unsigned int old_data;
4640 unsigned int old_size;
4642 struct btrfs_map_token token;
4644 btrfs_init_map_token(&token);
4646 leaf = path->nodes[0];
4648 nritems = btrfs_header_nritems(leaf);
4649 data_end = leaf_data_end(fs_info, leaf);
4651 if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4652 btrfs_print_leaf(leaf);
4655 slot = path->slots[0];
4656 old_data = btrfs_item_end_nr(leaf, slot);
4659 if (slot >= nritems) {
4660 btrfs_print_leaf(leaf);
4661 btrfs_crit(fs_info, "slot %d too large, nritems %d",
4667 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4669 /* first correct the data pointers */
4670 for (i = slot; i < nritems; i++) {
4672 item = btrfs_item_nr(i);
4674 ioff = btrfs_token_item_offset(leaf, item, &token);
4675 btrfs_set_token_item_offset(leaf, item,
4676 ioff - data_size, &token);
4679 /* shift the data */
4680 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4681 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4682 data_end, old_data - data_end);
4684 data_end = old_data;
4685 old_size = btrfs_item_size_nr(leaf, slot);
4686 item = btrfs_item_nr(slot);
4687 btrfs_set_item_size(leaf, item, old_size + data_size);
4688 btrfs_mark_buffer_dirty(leaf);
4690 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4691 btrfs_print_leaf(leaf);
4697 * this is a helper for btrfs_insert_empty_items, the main goal here is
4698 * to save stack depth by doing the bulk of the work in a function
4699 * that doesn't call btrfs_search_slot
4701 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4702 const struct btrfs_key *cpu_key, u32 *data_size,
4703 u32 total_data, u32 total_size, int nr)
4705 struct btrfs_fs_info *fs_info = root->fs_info;
4706 struct btrfs_item *item;
4709 unsigned int data_end;
4710 struct btrfs_disk_key disk_key;
4711 struct extent_buffer *leaf;
4713 struct btrfs_map_token token;
4715 if (path->slots[0] == 0) {
4716 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4717 fixup_low_keys(fs_info, path, &disk_key, 1);
4719 btrfs_unlock_up_safe(path, 1);
4721 btrfs_init_map_token(&token);
4723 leaf = path->nodes[0];
4724 slot = path->slots[0];
4726 nritems = btrfs_header_nritems(leaf);
4727 data_end = leaf_data_end(fs_info, leaf);
4729 if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4730 btrfs_print_leaf(leaf);
4731 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4732 total_size, btrfs_leaf_free_space(fs_info, leaf));
4736 if (slot != nritems) {
4737 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4739 if (old_data < data_end) {
4740 btrfs_print_leaf(leaf);
4741 btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4742 slot, old_data, data_end);
4746 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4748 /* first correct the data pointers */
4749 for (i = slot; i < nritems; i++) {
4752 item = btrfs_item_nr(i);
4753 ioff = btrfs_token_item_offset(leaf, item, &token);
4754 btrfs_set_token_item_offset(leaf, item,
4755 ioff - total_data, &token);
4757 /* shift the items */
4758 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4759 btrfs_item_nr_offset(slot),
4760 (nritems - slot) * sizeof(struct btrfs_item));
4762 /* shift the data */
4763 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4764 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4765 data_end, old_data - data_end);
4766 data_end = old_data;
4769 /* setup the item for the new data */
4770 for (i = 0; i < nr; i++) {
4771 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4772 btrfs_set_item_key(leaf, &disk_key, slot + i);
4773 item = btrfs_item_nr(slot + i);
4774 btrfs_set_token_item_offset(leaf, item,
4775 data_end - data_size[i], &token);
4776 data_end -= data_size[i];
4777 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4780 btrfs_set_header_nritems(leaf, nritems + nr);
4781 btrfs_mark_buffer_dirty(leaf);
4783 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4784 btrfs_print_leaf(leaf);
4790 * Given a key and some data, insert items into the tree.
4791 * This does all the path init required, making room in the tree if needed.
4793 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4794 struct btrfs_root *root,
4795 struct btrfs_path *path,
4796 const struct btrfs_key *cpu_key, u32 *data_size,
4805 for (i = 0; i < nr; i++)
4806 total_data += data_size[i];
4808 total_size = total_data + (nr * sizeof(struct btrfs_item));
4809 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4815 slot = path->slots[0];
4818 setup_items_for_insert(root, path, cpu_key, data_size,
4819 total_data, total_size, nr);
4824 * Given a key and some data, insert an item into the tree.
4825 * This does all the path init required, making room in the tree if needed.
4827 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4828 const struct btrfs_key *cpu_key, void *data,
4832 struct btrfs_path *path;
4833 struct extent_buffer *leaf;
4836 path = btrfs_alloc_path();
4839 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4841 leaf = path->nodes[0];
4842 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4843 write_extent_buffer(leaf, data, ptr, data_size);
4844 btrfs_mark_buffer_dirty(leaf);
4846 btrfs_free_path(path);
4851 * delete the pointer from a given node.
4853 * the tree should have been previously balanced so the deletion does not
4856 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4857 int level, int slot)
4859 struct btrfs_fs_info *fs_info = root->fs_info;
4860 struct extent_buffer *parent = path->nodes[level];
4864 nritems = btrfs_header_nritems(parent);
4865 if (slot != nritems - 1) {
4867 ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4868 nritems - slot - 1);
4871 memmove_extent_buffer(parent,
4872 btrfs_node_key_ptr_offset(slot),
4873 btrfs_node_key_ptr_offset(slot + 1),
4874 sizeof(struct btrfs_key_ptr) *
4875 (nritems - slot - 1));
4877 ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4883 btrfs_set_header_nritems(parent, nritems);
4884 if (nritems == 0 && parent == root->node) {
4885 BUG_ON(btrfs_header_level(root->node) != 1);
4886 /* just turn the root into a leaf and break */
4887 btrfs_set_header_level(root->node, 0);
4888 } else if (slot == 0) {
4889 struct btrfs_disk_key disk_key;
4891 btrfs_node_key(parent, &disk_key, 0);
4892 fixup_low_keys(fs_info, path, &disk_key, level + 1);
4894 btrfs_mark_buffer_dirty(parent);
4898 * a helper function to delete the leaf pointed to by path->slots[1] and
4901 * This deletes the pointer in path->nodes[1] and frees the leaf
4902 * block extent. zero is returned if it all worked out, < 0 otherwise.
4904 * The path must have already been setup for deleting the leaf, including
4905 * all the proper balancing. path->nodes[1] must be locked.
4907 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4908 struct btrfs_root *root,
4909 struct btrfs_path *path,
4910 struct extent_buffer *leaf)
4912 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4913 del_ptr(root, path, 1, path->slots[1]);
4916 * btrfs_free_extent is expensive, we want to make sure we
4917 * aren't holding any locks when we call it
4919 btrfs_unlock_up_safe(path, 0);
4921 root_sub_used(root, leaf->len);
4923 extent_buffer_get(leaf);
4924 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4925 free_extent_buffer_stale(leaf);
4928 * delete the item at the leaf level in path. If that empties
4929 * the leaf, remove it from the tree
4931 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4932 struct btrfs_path *path, int slot, int nr)
4934 struct btrfs_fs_info *fs_info = root->fs_info;
4935 struct extent_buffer *leaf;
4936 struct btrfs_item *item;
4943 struct btrfs_map_token token;
4945 btrfs_init_map_token(&token);
4947 leaf = path->nodes[0];
4948 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4950 for (i = 0; i < nr; i++)
4951 dsize += btrfs_item_size_nr(leaf, slot + i);
4953 nritems = btrfs_header_nritems(leaf);
4955 if (slot + nr != nritems) {
4956 int data_end = leaf_data_end(fs_info, leaf);
4958 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4960 BTRFS_LEAF_DATA_OFFSET + data_end,
4961 last_off - data_end);
4963 for (i = slot + nr; i < nritems; i++) {
4966 item = btrfs_item_nr(i);
4967 ioff = btrfs_token_item_offset(leaf, item, &token);
4968 btrfs_set_token_item_offset(leaf, item,
4969 ioff + dsize, &token);
4972 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4973 btrfs_item_nr_offset(slot + nr),
4974 sizeof(struct btrfs_item) *
4975 (nritems - slot - nr));
4977 btrfs_set_header_nritems(leaf, nritems - nr);
4980 /* delete the leaf if we've emptied it */
4982 if (leaf == root->node) {
4983 btrfs_set_header_level(leaf, 0);
4985 btrfs_set_path_blocking(path);
4986 clean_tree_block(fs_info, leaf);
4987 btrfs_del_leaf(trans, root, path, leaf);
4990 int used = leaf_space_used(leaf, 0, nritems);
4992 struct btrfs_disk_key disk_key;
4994 btrfs_item_key(leaf, &disk_key, 0);
4995 fixup_low_keys(fs_info, path, &disk_key, 1);
4998 /* delete the leaf if it is mostly empty */
4999 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5000 /* push_leaf_left fixes the path.
5001 * make sure the path still points to our leaf
5002 * for possible call to del_ptr below
5004 slot = path->slots[1];
5005 extent_buffer_get(leaf);
5007 btrfs_set_path_blocking(path);
5008 wret = push_leaf_left(trans, root, path, 1, 1,
5010 if (wret < 0 && wret != -ENOSPC)
5013 if (path->nodes[0] == leaf &&
5014 btrfs_header_nritems(leaf)) {
5015 wret = push_leaf_right(trans, root, path, 1,
5017 if (wret < 0 && wret != -ENOSPC)
5021 if (btrfs_header_nritems(leaf) == 0) {
5022 path->slots[1] = slot;
5023 btrfs_del_leaf(trans, root, path, leaf);
5024 free_extent_buffer(leaf);
5027 /* if we're still in the path, make sure
5028 * we're dirty. Otherwise, one of the
5029 * push_leaf functions must have already
5030 * dirtied this buffer
5032 if (path->nodes[0] == leaf)
5033 btrfs_mark_buffer_dirty(leaf);
5034 free_extent_buffer(leaf);
5037 btrfs_mark_buffer_dirty(leaf);
5044 * search the tree again to find a leaf with lesser keys
5045 * returns 0 if it found something or 1 if there are no lesser leaves.
5046 * returns < 0 on io errors.
5048 * This may release the path, and so you may lose any locks held at the
5051 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5053 struct btrfs_key key;
5054 struct btrfs_disk_key found_key;
5057 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5059 if (key.offset > 0) {
5061 } else if (key.type > 0) {
5063 key.offset = (u64)-1;
5064 } else if (key.objectid > 0) {
5067 key.offset = (u64)-1;
5072 btrfs_release_path(path);
5073 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5076 btrfs_item_key(path->nodes[0], &found_key, 0);
5077 ret = comp_keys(&found_key, &key);
5079 * We might have had an item with the previous key in the tree right
5080 * before we released our path. And after we released our path, that
5081 * item might have been pushed to the first slot (0) of the leaf we
5082 * were holding due to a tree balance. Alternatively, an item with the
5083 * previous key can exist as the only element of a leaf (big fat item).
5084 * Therefore account for these 2 cases, so that our callers (like
5085 * btrfs_previous_item) don't miss an existing item with a key matching
5086 * the previous key we computed above.
5094 * A helper function to walk down the tree starting at min_key, and looking
5095 * for nodes or leaves that are have a minimum transaction id.
5096 * This is used by the btree defrag code, and tree logging
5098 * This does not cow, but it does stuff the starting key it finds back
5099 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5100 * key and get a writable path.
5102 * This honors path->lowest_level to prevent descent past a given level
5105 * min_trans indicates the oldest transaction that you are interested
5106 * in walking through. Any nodes or leaves older than min_trans are
5107 * skipped over (without reading them).
5109 * returns zero if something useful was found, < 0 on error and 1 if there
5110 * was nothing in the tree that matched the search criteria.
5112 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5113 struct btrfs_path *path,
5116 struct btrfs_fs_info *fs_info = root->fs_info;
5117 struct extent_buffer *cur;
5118 struct btrfs_key found_key;
5124 int keep_locks = path->keep_locks;
5126 path->keep_locks = 1;
5128 cur = btrfs_read_lock_root_node(root);
5129 level = btrfs_header_level(cur);
5130 WARN_ON(path->nodes[level]);
5131 path->nodes[level] = cur;
5132 path->locks[level] = BTRFS_READ_LOCK;
5134 if (btrfs_header_generation(cur) < min_trans) {
5139 nritems = btrfs_header_nritems(cur);
5140 level = btrfs_header_level(cur);
5141 sret = btrfs_bin_search(cur, min_key, level, &slot);
5143 /* at the lowest level, we're done, setup the path and exit */
5144 if (level == path->lowest_level) {
5145 if (slot >= nritems)
5148 path->slots[level] = slot;
5149 btrfs_item_key_to_cpu(cur, &found_key, slot);
5152 if (sret && slot > 0)
5155 * check this node pointer against the min_trans parameters.
5156 * If it is too old, old, skip to the next one.
5158 while (slot < nritems) {
5161 gen = btrfs_node_ptr_generation(cur, slot);
5162 if (gen < min_trans) {
5170 * we didn't find a candidate key in this node, walk forward
5171 * and find another one
5173 if (slot >= nritems) {
5174 path->slots[level] = slot;
5175 btrfs_set_path_blocking(path);
5176 sret = btrfs_find_next_key(root, path, min_key, level,
5179 btrfs_release_path(path);
5185 /* save our key for returning back */
5186 btrfs_node_key_to_cpu(cur, &found_key, slot);
5187 path->slots[level] = slot;
5188 if (level == path->lowest_level) {
5192 btrfs_set_path_blocking(path);
5193 cur = read_node_slot(fs_info, cur, slot);
5199 btrfs_tree_read_lock(cur);
5201 path->locks[level - 1] = BTRFS_READ_LOCK;
5202 path->nodes[level - 1] = cur;
5203 unlock_up(path, level, 1, 0, NULL);
5204 btrfs_clear_path_blocking(path, NULL, 0);
5207 path->keep_locks = keep_locks;
5209 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5210 btrfs_set_path_blocking(path);
5211 memcpy(min_key, &found_key, sizeof(found_key));
5216 static int tree_move_down(struct btrfs_fs_info *fs_info,
5217 struct btrfs_path *path,
5220 struct extent_buffer *eb;
5222 BUG_ON(*level == 0);
5223 eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5227 path->nodes[*level - 1] = eb;
5228 path->slots[*level - 1] = 0;
5233 static int tree_move_next_or_upnext(struct btrfs_path *path,
5234 int *level, int root_level)
5238 nritems = btrfs_header_nritems(path->nodes[*level]);
5240 path->slots[*level]++;
5242 while (path->slots[*level] >= nritems) {
5243 if (*level == root_level)
5247 path->slots[*level] = 0;
5248 free_extent_buffer(path->nodes[*level]);
5249 path->nodes[*level] = NULL;
5251 path->slots[*level]++;
5253 nritems = btrfs_header_nritems(path->nodes[*level]);
5260 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5263 static int tree_advance(struct btrfs_fs_info *fs_info,
5264 struct btrfs_path *path,
5265 int *level, int root_level,
5267 struct btrfs_key *key)
5271 if (*level == 0 || !allow_down) {
5272 ret = tree_move_next_or_upnext(path, level, root_level);
5274 ret = tree_move_down(fs_info, path, level);
5278 btrfs_item_key_to_cpu(path->nodes[*level], key,
5279 path->slots[*level]);
5281 btrfs_node_key_to_cpu(path->nodes[*level], key,
5282 path->slots[*level]);
5287 static int tree_compare_item(struct btrfs_path *left_path,
5288 struct btrfs_path *right_path,
5293 unsigned long off1, off2;
5295 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5296 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5300 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5301 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5302 right_path->slots[0]);
5304 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5306 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5313 #define ADVANCE_ONLY_NEXT -1
5316 * This function compares two trees and calls the provided callback for
5317 * every changed/new/deleted item it finds.
5318 * If shared tree blocks are encountered, whole subtrees are skipped, making
5319 * the compare pretty fast on snapshotted subvolumes.
5321 * This currently works on commit roots only. As commit roots are read only,
5322 * we don't do any locking. The commit roots are protected with transactions.
5323 * Transactions are ended and rejoined when a commit is tried in between.
5325 * This function checks for modifications done to the trees while comparing.
5326 * If it detects a change, it aborts immediately.
5328 int btrfs_compare_trees(struct btrfs_root *left_root,
5329 struct btrfs_root *right_root,
5330 btrfs_changed_cb_t changed_cb, void *ctx)
5332 struct btrfs_fs_info *fs_info = left_root->fs_info;
5335 struct btrfs_path *left_path = NULL;
5336 struct btrfs_path *right_path = NULL;
5337 struct btrfs_key left_key;
5338 struct btrfs_key right_key;
5339 char *tmp_buf = NULL;
5340 int left_root_level;
5341 int right_root_level;
5344 int left_end_reached;
5345 int right_end_reached;
5353 left_path = btrfs_alloc_path();
5358 right_path = btrfs_alloc_path();
5364 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5370 left_path->search_commit_root = 1;
5371 left_path->skip_locking = 1;
5372 right_path->search_commit_root = 1;
5373 right_path->skip_locking = 1;
5376 * Strategy: Go to the first items of both trees. Then do
5378 * If both trees are at level 0
5379 * Compare keys of current items
5380 * If left < right treat left item as new, advance left tree
5382 * If left > right treat right item as deleted, advance right tree
5384 * If left == right do deep compare of items, treat as changed if
5385 * needed, advance both trees and repeat
5386 * If both trees are at the same level but not at level 0
5387 * Compare keys of current nodes/leafs
5388 * If left < right advance left tree and repeat
5389 * If left > right advance right tree and repeat
5390 * If left == right compare blockptrs of the next nodes/leafs
5391 * If they match advance both trees but stay at the same level
5393 * If they don't match advance both trees while allowing to go
5395 * If tree levels are different
5396 * Advance the tree that needs it and repeat
5398 * Advancing a tree means:
5399 * If we are at level 0, try to go to the next slot. If that's not
5400 * possible, go one level up and repeat. Stop when we found a level
5401 * where we could go to the next slot. We may at this point be on a
5404 * If we are not at level 0 and not on shared tree blocks, go one
5407 * If we are not at level 0 and on shared tree blocks, go one slot to
5408 * the right if possible or go up and right.
5411 down_read(&fs_info->commit_root_sem);
5412 left_level = btrfs_header_level(left_root->commit_root);
5413 left_root_level = left_level;
5414 left_path->nodes[left_level] = left_root->commit_root;
5415 extent_buffer_get(left_path->nodes[left_level]);
5417 right_level = btrfs_header_level(right_root->commit_root);
5418 right_root_level = right_level;
5419 right_path->nodes[right_level] = right_root->commit_root;
5420 extent_buffer_get(right_path->nodes[right_level]);
5421 up_read(&fs_info->commit_root_sem);
5423 if (left_level == 0)
5424 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5425 &left_key, left_path->slots[left_level]);
5427 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5428 &left_key, left_path->slots[left_level]);
5429 if (right_level == 0)
5430 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5431 &right_key, right_path->slots[right_level]);
5433 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5434 &right_key, right_path->slots[right_level]);
5436 left_end_reached = right_end_reached = 0;
5437 advance_left = advance_right = 0;
5440 if (advance_left && !left_end_reached) {
5441 ret = tree_advance(fs_info, left_path, &left_level,
5443 advance_left != ADVANCE_ONLY_NEXT,
5446 left_end_reached = ADVANCE;
5451 if (advance_right && !right_end_reached) {
5452 ret = tree_advance(fs_info, right_path, &right_level,
5454 advance_right != ADVANCE_ONLY_NEXT,
5457 right_end_reached = ADVANCE;
5463 if (left_end_reached && right_end_reached) {
5466 } else if (left_end_reached) {
5467 if (right_level == 0) {
5468 ret = changed_cb(left_path, right_path,
5470 BTRFS_COMPARE_TREE_DELETED,
5475 advance_right = ADVANCE;
5477 } else if (right_end_reached) {
5478 if (left_level == 0) {
5479 ret = changed_cb(left_path, right_path,
5481 BTRFS_COMPARE_TREE_NEW,
5486 advance_left = ADVANCE;
5490 if (left_level == 0 && right_level == 0) {
5491 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5493 ret = changed_cb(left_path, right_path,
5495 BTRFS_COMPARE_TREE_NEW,
5499 advance_left = ADVANCE;
5500 } else if (cmp > 0) {
5501 ret = changed_cb(left_path, right_path,
5503 BTRFS_COMPARE_TREE_DELETED,
5507 advance_right = ADVANCE;
5509 enum btrfs_compare_tree_result result;
5511 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5512 ret = tree_compare_item(left_path, right_path,
5515 result = BTRFS_COMPARE_TREE_CHANGED;
5517 result = BTRFS_COMPARE_TREE_SAME;
5518 ret = changed_cb(left_path, right_path,
5519 &left_key, result, ctx);
5522 advance_left = ADVANCE;
5523 advance_right = ADVANCE;
5525 } else if (left_level == right_level) {
5526 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5528 advance_left = ADVANCE;
5529 } else if (cmp > 0) {
5530 advance_right = ADVANCE;
5532 left_blockptr = btrfs_node_blockptr(
5533 left_path->nodes[left_level],
5534 left_path->slots[left_level]);
5535 right_blockptr = btrfs_node_blockptr(
5536 right_path->nodes[right_level],
5537 right_path->slots[right_level]);
5538 left_gen = btrfs_node_ptr_generation(
5539 left_path->nodes[left_level],
5540 left_path->slots[left_level]);
5541 right_gen = btrfs_node_ptr_generation(
5542 right_path->nodes[right_level],
5543 right_path->slots[right_level]);
5544 if (left_blockptr == right_blockptr &&
5545 left_gen == right_gen) {
5547 * As we're on a shared block, don't
5548 * allow to go deeper.
5550 advance_left = ADVANCE_ONLY_NEXT;
5551 advance_right = ADVANCE_ONLY_NEXT;
5553 advance_left = ADVANCE;
5554 advance_right = ADVANCE;
5557 } else if (left_level < right_level) {
5558 advance_right = ADVANCE;
5560 advance_left = ADVANCE;
5565 btrfs_free_path(left_path);
5566 btrfs_free_path(right_path);
5572 * this is similar to btrfs_next_leaf, but does not try to preserve
5573 * and fixup the path. It looks for and returns the next key in the
5574 * tree based on the current path and the min_trans parameters.
5576 * 0 is returned if another key is found, < 0 if there are any errors
5577 * and 1 is returned if there are no higher keys in the tree
5579 * path->keep_locks should be set to 1 on the search made before
5580 * calling this function.
5582 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5583 struct btrfs_key *key, int level, u64 min_trans)
5586 struct extent_buffer *c;
5588 WARN_ON(!path->keep_locks);
5589 while (level < BTRFS_MAX_LEVEL) {
5590 if (!path->nodes[level])
5593 slot = path->slots[level] + 1;
5594 c = path->nodes[level];
5596 if (slot >= btrfs_header_nritems(c)) {
5599 struct btrfs_key cur_key;
5600 if (level + 1 >= BTRFS_MAX_LEVEL ||
5601 !path->nodes[level + 1])
5604 if (path->locks[level + 1]) {
5609 slot = btrfs_header_nritems(c) - 1;
5611 btrfs_item_key_to_cpu(c, &cur_key, slot);
5613 btrfs_node_key_to_cpu(c, &cur_key, slot);
5615 orig_lowest = path->lowest_level;
5616 btrfs_release_path(path);
5617 path->lowest_level = level;
5618 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5620 path->lowest_level = orig_lowest;
5624 c = path->nodes[level];
5625 slot = path->slots[level];
5632 btrfs_item_key_to_cpu(c, key, slot);
5634 u64 gen = btrfs_node_ptr_generation(c, slot);
5636 if (gen < min_trans) {
5640 btrfs_node_key_to_cpu(c, key, slot);
5648 * search the tree again to find a leaf with greater keys
5649 * returns 0 if it found something or 1 if there are no greater leaves.
5650 * returns < 0 on io errors.
5652 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5654 return btrfs_next_old_leaf(root, path, 0);
5657 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5662 struct extent_buffer *c;
5663 struct extent_buffer *next;
5664 struct btrfs_key key;
5667 int old_spinning = path->leave_spinning;
5668 int next_rw_lock = 0;
5670 nritems = btrfs_header_nritems(path->nodes[0]);
5674 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5679 btrfs_release_path(path);
5681 path->keep_locks = 1;
5682 path->leave_spinning = 1;
5685 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5687 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5688 path->keep_locks = 0;
5693 nritems = btrfs_header_nritems(path->nodes[0]);
5695 * by releasing the path above we dropped all our locks. A balance
5696 * could have added more items next to the key that used to be
5697 * at the very end of the block. So, check again here and
5698 * advance the path if there are now more items available.
5700 if (nritems > 0 && path->slots[0] < nritems - 1) {
5707 * So the above check misses one case:
5708 * - after releasing the path above, someone has removed the item that
5709 * used to be at the very end of the block, and balance between leafs
5710 * gets another one with bigger key.offset to replace it.
5712 * This one should be returned as well, or we can get leaf corruption
5713 * later(esp. in __btrfs_drop_extents()).
5715 * And a bit more explanation about this check,
5716 * with ret > 0, the key isn't found, the path points to the slot
5717 * where it should be inserted, so the path->slots[0] item must be the
5720 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5725 while (level < BTRFS_MAX_LEVEL) {
5726 if (!path->nodes[level]) {
5731 slot = path->slots[level] + 1;
5732 c = path->nodes[level];
5733 if (slot >= btrfs_header_nritems(c)) {
5735 if (level == BTRFS_MAX_LEVEL) {
5743 btrfs_tree_unlock_rw(next, next_rw_lock);
5744 free_extent_buffer(next);
5748 next_rw_lock = path->locks[level];
5749 ret = read_block_for_search(root, path, &next, level,
5755 btrfs_release_path(path);
5759 if (!path->skip_locking) {
5760 ret = btrfs_try_tree_read_lock(next);
5761 if (!ret && time_seq) {
5763 * If we don't get the lock, we may be racing
5764 * with push_leaf_left, holding that lock while
5765 * itself waiting for the leaf we've currently
5766 * locked. To solve this situation, we give up
5767 * on our lock and cycle.
5769 free_extent_buffer(next);
5770 btrfs_release_path(path);
5775 btrfs_set_path_blocking(path);
5776 btrfs_tree_read_lock(next);
5777 btrfs_clear_path_blocking(path, next,
5780 next_rw_lock = BTRFS_READ_LOCK;
5784 path->slots[level] = slot;
5787 c = path->nodes[level];
5788 if (path->locks[level])
5789 btrfs_tree_unlock_rw(c, path->locks[level]);
5791 free_extent_buffer(c);
5792 path->nodes[level] = next;
5793 path->slots[level] = 0;
5794 if (!path->skip_locking)
5795 path->locks[level] = next_rw_lock;
5799 ret = read_block_for_search(root, path, &next, level,
5805 btrfs_release_path(path);
5809 if (!path->skip_locking) {
5810 ret = btrfs_try_tree_read_lock(next);
5812 btrfs_set_path_blocking(path);
5813 btrfs_tree_read_lock(next);
5814 btrfs_clear_path_blocking(path, next,
5817 next_rw_lock = BTRFS_READ_LOCK;
5822 unlock_up(path, 0, 1, 0, NULL);
5823 path->leave_spinning = old_spinning;
5825 btrfs_set_path_blocking(path);
5831 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5832 * searching until it gets past min_objectid or finds an item of 'type'
5834 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5836 int btrfs_previous_item(struct btrfs_root *root,
5837 struct btrfs_path *path, u64 min_objectid,
5840 struct btrfs_key found_key;
5841 struct extent_buffer *leaf;
5846 if (path->slots[0] == 0) {
5847 btrfs_set_path_blocking(path);
5848 ret = btrfs_prev_leaf(root, path);
5854 leaf = path->nodes[0];
5855 nritems = btrfs_header_nritems(leaf);
5858 if (path->slots[0] == nritems)
5861 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5862 if (found_key.objectid < min_objectid)
5864 if (found_key.type == type)
5866 if (found_key.objectid == min_objectid &&
5867 found_key.type < type)
5874 * search in extent tree to find a previous Metadata/Data extent item with
5877 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5879 int btrfs_previous_extent_item(struct btrfs_root *root,
5880 struct btrfs_path *path, u64 min_objectid)
5882 struct btrfs_key found_key;
5883 struct extent_buffer *leaf;
5888 if (path->slots[0] == 0) {
5889 btrfs_set_path_blocking(path);
5890 ret = btrfs_prev_leaf(root, path);
5896 leaf = path->nodes[0];
5897 nritems = btrfs_header_nritems(leaf);
5900 if (path->slots[0] == nritems)
5903 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5904 if (found_key.objectid < min_objectid)
5906 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5907 found_key.type == BTRFS_METADATA_ITEM_KEY)
5909 if (found_key.objectid == min_objectid &&
5910 found_key.type < BTRFS_EXTENT_ITEM_KEY)