1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
13 #include "delayed-inode.h"
15 #include "transaction.h"
18 #include "inode-item.h"
19 #include "space-info.h"
20 #include "accessors.h"
21 #include "file-item.h"
23 #define BTRFS_DELAYED_WRITEBACK 512
24 #define BTRFS_DELAYED_BACKGROUND 128
25 #define BTRFS_DELAYED_BATCH 16
27 static struct kmem_cache *delayed_node_cache;
29 int __init btrfs_delayed_inode_init(void)
31 delayed_node_cache = KMEM_CACHE(btrfs_delayed_node, 0);
32 if (!delayed_node_cache)
37 void __cold btrfs_delayed_inode_exit(void)
39 kmem_cache_destroy(delayed_node_cache);
42 void btrfs_init_delayed_root(struct btrfs_delayed_root *delayed_root)
44 atomic_set(&delayed_root->items, 0);
45 atomic_set(&delayed_root->items_seq, 0);
46 delayed_root->nodes = 0;
47 spin_lock_init(&delayed_root->lock);
48 init_waitqueue_head(&delayed_root->wait);
49 INIT_LIST_HEAD(&delayed_root->node_list);
50 INIT_LIST_HEAD(&delayed_root->prepare_list);
53 static inline void btrfs_init_delayed_node(
54 struct btrfs_delayed_node *delayed_node,
55 struct btrfs_root *root, u64 inode_id)
57 delayed_node->root = root;
58 delayed_node->inode_id = inode_id;
59 refcount_set(&delayed_node->refs, 0);
60 delayed_node->ins_root = RB_ROOT_CACHED;
61 delayed_node->del_root = RB_ROOT_CACHED;
62 mutex_init(&delayed_node->mutex);
63 INIT_LIST_HEAD(&delayed_node->n_list);
64 INIT_LIST_HEAD(&delayed_node->p_list);
67 static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 struct btrfs_inode *btrfs_inode)
70 struct btrfs_root *root = btrfs_inode->root;
71 u64 ino = btrfs_ino(btrfs_inode);
72 struct btrfs_delayed_node *node;
74 node = READ_ONCE(btrfs_inode->delayed_node);
76 refcount_inc(&node->refs);
80 spin_lock(&root->inode_lock);
81 node = xa_load(&root->delayed_nodes, ino);
84 if (btrfs_inode->delayed_node) {
85 refcount_inc(&node->refs); /* can be accessed */
86 BUG_ON(btrfs_inode->delayed_node != node);
87 spin_unlock(&root->inode_lock);
92 * It's possible that we're racing into the middle of removing
93 * this node from the xarray. In this case, the refcount
94 * was zero and it should never go back to one. Just return
95 * NULL like it was never in the xarray at all; our release
96 * function is in the process of removing it.
98 * Some implementations of refcount_inc refuse to bump the
99 * refcount once it has hit zero. If we don't do this dance
100 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 * of actually bumping the refcount.
103 * If this node is properly in the xarray, we want to bump the
104 * refcount twice, once for the inode and once for this get
107 if (refcount_inc_not_zero(&node->refs)) {
108 refcount_inc(&node->refs);
109 btrfs_inode->delayed_node = node;
114 spin_unlock(&root->inode_lock);
117 spin_unlock(&root->inode_lock);
122 /* Will return either the node or PTR_ERR(-ENOMEM) */
123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 struct btrfs_inode *btrfs_inode)
126 struct btrfs_delayed_node *node;
127 struct btrfs_root *root = btrfs_inode->root;
128 u64 ino = btrfs_ino(btrfs_inode);
133 node = btrfs_get_delayed_node(btrfs_inode);
137 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
139 return ERR_PTR(-ENOMEM);
140 btrfs_init_delayed_node(node, root, ino);
142 /* Cached in the inode and can be accessed. */
143 refcount_set(&node->refs, 2);
145 /* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
146 ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
147 if (ret == -ENOMEM) {
148 kmem_cache_free(delayed_node_cache, node);
149 return ERR_PTR(-ENOMEM);
151 spin_lock(&root->inode_lock);
152 ptr = xa_load(&root->delayed_nodes, ino);
154 /* Somebody inserted it, go back and read it. */
155 spin_unlock(&root->inode_lock);
156 kmem_cache_free(delayed_node_cache, node);
160 ptr = xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
161 ASSERT(xa_err(ptr) != -EINVAL);
162 ASSERT(xa_err(ptr) != -ENOMEM);
164 btrfs_inode->delayed_node = node;
165 spin_unlock(&root->inode_lock);
171 * Call it when holding delayed_node->mutex
173 * If mod = 1, add this node into the prepared list.
175 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
176 struct btrfs_delayed_node *node,
179 spin_lock(&root->lock);
180 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
181 if (!list_empty(&node->p_list))
182 list_move_tail(&node->p_list, &root->prepare_list);
184 list_add_tail(&node->p_list, &root->prepare_list);
186 list_add_tail(&node->n_list, &root->node_list);
187 list_add_tail(&node->p_list, &root->prepare_list);
188 refcount_inc(&node->refs); /* inserted into list */
190 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
192 spin_unlock(&root->lock);
195 /* Call it when holding delayed_node->mutex */
196 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
197 struct btrfs_delayed_node *node)
199 spin_lock(&root->lock);
200 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
202 refcount_dec(&node->refs); /* not in the list */
203 list_del_init(&node->n_list);
204 if (!list_empty(&node->p_list))
205 list_del_init(&node->p_list);
206 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
208 spin_unlock(&root->lock);
211 static struct btrfs_delayed_node *btrfs_first_delayed_node(
212 struct btrfs_delayed_root *delayed_root)
215 struct btrfs_delayed_node *node = NULL;
217 spin_lock(&delayed_root->lock);
218 if (list_empty(&delayed_root->node_list))
221 p = delayed_root->node_list.next;
222 node = list_entry(p, struct btrfs_delayed_node, n_list);
223 refcount_inc(&node->refs);
225 spin_unlock(&delayed_root->lock);
230 static struct btrfs_delayed_node *btrfs_next_delayed_node(
231 struct btrfs_delayed_node *node)
233 struct btrfs_delayed_root *delayed_root;
235 struct btrfs_delayed_node *next = NULL;
237 delayed_root = node->root->fs_info->delayed_root;
238 spin_lock(&delayed_root->lock);
239 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
240 /* not in the list */
241 if (list_empty(&delayed_root->node_list))
243 p = delayed_root->node_list.next;
244 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
247 p = node->n_list.next;
249 next = list_entry(p, struct btrfs_delayed_node, n_list);
250 refcount_inc(&next->refs);
252 spin_unlock(&delayed_root->lock);
257 static void __btrfs_release_delayed_node(
258 struct btrfs_delayed_node *delayed_node,
261 struct btrfs_delayed_root *delayed_root;
266 delayed_root = delayed_node->root->fs_info->delayed_root;
268 mutex_lock(&delayed_node->mutex);
269 if (delayed_node->count)
270 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
272 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
273 mutex_unlock(&delayed_node->mutex);
275 if (refcount_dec_and_test(&delayed_node->refs)) {
276 struct btrfs_root *root = delayed_node->root;
278 spin_lock(&root->inode_lock);
280 * Once our refcount goes to zero, nobody is allowed to bump it
281 * back up. We can delete it now.
283 ASSERT(refcount_read(&delayed_node->refs) == 0);
284 xa_erase(&root->delayed_nodes, delayed_node->inode_id);
285 spin_unlock(&root->inode_lock);
286 kmem_cache_free(delayed_node_cache, delayed_node);
290 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
292 __btrfs_release_delayed_node(node, 0);
295 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
296 struct btrfs_delayed_root *delayed_root)
299 struct btrfs_delayed_node *node = NULL;
301 spin_lock(&delayed_root->lock);
302 if (list_empty(&delayed_root->prepare_list))
305 p = delayed_root->prepare_list.next;
307 node = list_entry(p, struct btrfs_delayed_node, p_list);
308 refcount_inc(&node->refs);
310 spin_unlock(&delayed_root->lock);
315 static inline void btrfs_release_prepared_delayed_node(
316 struct btrfs_delayed_node *node)
318 __btrfs_release_delayed_node(node, 1);
321 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
322 struct btrfs_delayed_node *node,
323 enum btrfs_delayed_item_type type)
325 struct btrfs_delayed_item *item;
327 item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
329 item->data_len = data_len;
331 item->bytes_reserved = 0;
332 item->delayed_node = node;
333 RB_CLEAR_NODE(&item->rb_node);
334 INIT_LIST_HEAD(&item->log_list);
335 item->logged = false;
336 refcount_set(&item->refs, 1);
342 * Look up the delayed item by key.
344 * @delayed_node: pointer to the delayed node
345 * @index: the dir index value to lookup (offset of a dir index key)
347 * Note: if we don't find the right item, we will return the prev item and
350 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
351 struct rb_root *root,
354 struct rb_node *node = root->rb_node;
355 struct btrfs_delayed_item *delayed_item = NULL;
358 delayed_item = rb_entry(node, struct btrfs_delayed_item,
360 if (delayed_item->index < index)
361 node = node->rb_right;
362 else if (delayed_item->index > index)
363 node = node->rb_left;
371 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
372 struct btrfs_delayed_item *ins)
374 struct rb_node **p, *node;
375 struct rb_node *parent_node = NULL;
376 struct rb_root_cached *root;
377 struct btrfs_delayed_item *item;
378 bool leftmost = true;
380 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
381 root = &delayed_node->ins_root;
383 root = &delayed_node->del_root;
385 p = &root->rb_root.rb_node;
386 node = &ins->rb_node;
390 item = rb_entry(parent_node, struct btrfs_delayed_item,
393 if (item->index < ins->index) {
396 } else if (item->index > ins->index) {
403 rb_link_node(node, parent_node, p);
404 rb_insert_color_cached(node, root, leftmost);
406 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
407 ins->index >= delayed_node->index_cnt)
408 delayed_node->index_cnt = ins->index + 1;
410 delayed_node->count++;
411 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
415 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
417 int seq = atomic_inc_return(&delayed_root->items_seq);
419 /* atomic_dec_return implies a barrier */
420 if ((atomic_dec_return(&delayed_root->items) <
421 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
422 cond_wake_up_nomb(&delayed_root->wait);
425 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
427 struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
428 struct rb_root_cached *root;
429 struct btrfs_delayed_root *delayed_root;
431 /* Not inserted, ignore it. */
432 if (RB_EMPTY_NODE(&delayed_item->rb_node))
435 /* If it's in a rbtree, then we need to have delayed node locked. */
436 lockdep_assert_held(&delayed_node->mutex);
438 delayed_root = delayed_node->root->fs_info->delayed_root;
440 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
441 root = &delayed_node->ins_root;
443 root = &delayed_node->del_root;
445 rb_erase_cached(&delayed_item->rb_node, root);
446 RB_CLEAR_NODE(&delayed_item->rb_node);
447 delayed_node->count--;
449 finish_one_item(delayed_root);
452 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
455 __btrfs_remove_delayed_item(item);
456 if (refcount_dec_and_test(&item->refs))
461 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
462 struct btrfs_delayed_node *delayed_node)
465 struct btrfs_delayed_item *item = NULL;
467 p = rb_first_cached(&delayed_node->ins_root);
469 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
474 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
475 struct btrfs_delayed_node *delayed_node)
478 struct btrfs_delayed_item *item = NULL;
480 p = rb_first_cached(&delayed_node->del_root);
482 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
487 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
488 struct btrfs_delayed_item *item)
491 struct btrfs_delayed_item *next = NULL;
493 p = rb_next(&item->rb_node);
495 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
500 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
501 struct btrfs_delayed_item *item)
503 struct btrfs_block_rsv *src_rsv;
504 struct btrfs_block_rsv *dst_rsv;
505 struct btrfs_fs_info *fs_info = trans->fs_info;
509 if (!trans->bytes_reserved)
512 src_rsv = trans->block_rsv;
513 dst_rsv = &fs_info->delayed_block_rsv;
515 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
518 * Here we migrate space rsv from transaction rsv, since have already
519 * reserved space when starting a transaction. So no need to reserve
522 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
524 trace_btrfs_space_reservation(fs_info, "delayed_item",
525 item->delayed_node->inode_id,
528 * For insertions we track reserved metadata space by accounting
529 * for the number of leaves that will be used, based on the delayed
530 * node's curr_index_batch_size and index_item_leaves fields.
532 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
533 item->bytes_reserved = num_bytes;
539 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
540 struct btrfs_delayed_item *item)
542 struct btrfs_block_rsv *rsv;
543 struct btrfs_fs_info *fs_info = root->fs_info;
545 if (!item->bytes_reserved)
548 rsv = &fs_info->delayed_block_rsv;
550 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
551 * to release/reserve qgroup space.
553 trace_btrfs_space_reservation(fs_info, "delayed_item",
554 item->delayed_node->inode_id,
555 item->bytes_reserved, 0);
556 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
559 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
560 unsigned int num_leaves)
562 struct btrfs_fs_info *fs_info = node->root->fs_info;
563 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
565 /* There are no space reservations during log replay, bail out. */
566 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
569 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
571 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
574 static int btrfs_delayed_inode_reserve_metadata(
575 struct btrfs_trans_handle *trans,
576 struct btrfs_root *root,
577 struct btrfs_delayed_node *node)
579 struct btrfs_fs_info *fs_info = root->fs_info;
580 struct btrfs_block_rsv *src_rsv;
581 struct btrfs_block_rsv *dst_rsv;
585 src_rsv = trans->block_rsv;
586 dst_rsv = &fs_info->delayed_block_rsv;
588 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
591 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
592 * which doesn't reserve space for speed. This is a problem since we
593 * still need to reserve space for this update, so try to reserve the
596 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
597 * we always reserve enough to update the inode item.
599 if (!src_rsv || (!trans->bytes_reserved &&
600 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
601 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
602 BTRFS_QGROUP_RSV_META_PREALLOC, true);
605 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
606 BTRFS_RESERVE_NO_FLUSH);
607 /* NO_FLUSH could only fail with -ENOSPC */
608 ASSERT(ret == 0 || ret == -ENOSPC);
610 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
612 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
616 trace_btrfs_space_reservation(fs_info, "delayed_inode",
617 node->inode_id, num_bytes, 1);
618 node->bytes_reserved = num_bytes;
624 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
625 struct btrfs_delayed_node *node,
628 struct btrfs_block_rsv *rsv;
630 if (!node->bytes_reserved)
633 rsv = &fs_info->delayed_block_rsv;
634 trace_btrfs_space_reservation(fs_info, "delayed_inode",
635 node->inode_id, node->bytes_reserved, 0);
636 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
638 btrfs_qgroup_free_meta_prealloc(node->root,
639 node->bytes_reserved);
641 btrfs_qgroup_convert_reserved_meta(node->root,
642 node->bytes_reserved);
643 node->bytes_reserved = 0;
647 * Insert a single delayed item or a batch of delayed items, as many as possible
648 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
649 * in the rbtree, and if there's a gap between two consecutive dir index items,
650 * then it means at some point we had delayed dir indexes to add but they got
651 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
652 * into the subvolume tree. Dir index keys also have their offsets coming from a
653 * monotonically increasing counter, so we can't get new keys with an offset that
654 * fits within a gap between delayed dir index items.
656 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
657 struct btrfs_root *root,
658 struct btrfs_path *path,
659 struct btrfs_delayed_item *first_item)
661 struct btrfs_fs_info *fs_info = root->fs_info;
662 struct btrfs_delayed_node *node = first_item->delayed_node;
663 LIST_HEAD(item_list);
664 struct btrfs_delayed_item *curr;
665 struct btrfs_delayed_item *next;
666 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
667 struct btrfs_item_batch batch;
668 struct btrfs_key first_key;
669 const u32 first_data_size = first_item->data_len;
671 char *ins_data = NULL;
673 bool continuous_keys_only = false;
675 lockdep_assert_held(&node->mutex);
678 * During normal operation the delayed index offset is continuously
679 * increasing, so we can batch insert all items as there will not be any
680 * overlapping keys in the tree.
682 * The exception to this is log replay, where we may have interleaved
683 * offsets in the tree, so our batch needs to be continuous keys only in
684 * order to ensure we do not end up with out of order items in our leaf.
686 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
687 continuous_keys_only = true;
690 * For delayed items to insert, we track reserved metadata bytes based
691 * on the number of leaves that we will use.
692 * See btrfs_insert_delayed_dir_index() and
693 * btrfs_delayed_item_reserve_metadata()).
695 ASSERT(first_item->bytes_reserved == 0);
697 list_add_tail(&first_item->tree_list, &item_list);
698 batch.total_data_size = first_data_size;
700 total_size = first_data_size + sizeof(struct btrfs_item);
706 next = __btrfs_next_delayed_item(curr);
711 * We cannot allow gaps in the key space if we're doing log
714 if (continuous_keys_only && (next->index != curr->index + 1))
717 ASSERT(next->bytes_reserved == 0);
719 next_size = next->data_len + sizeof(struct btrfs_item);
720 if (total_size + next_size > max_size)
723 list_add_tail(&next->tree_list, &item_list);
725 total_size += next_size;
726 batch.total_data_size += next->data_len;
731 first_key.objectid = node->inode_id;
732 first_key.type = BTRFS_DIR_INDEX_KEY;
733 first_key.offset = first_item->index;
734 batch.keys = &first_key;
735 batch.data_sizes = &first_data_size;
737 struct btrfs_key *ins_keys;
741 ins_data = kmalloc(batch.nr * sizeof(u32) +
742 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
747 ins_sizes = (u32 *)ins_data;
748 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
749 batch.keys = ins_keys;
750 batch.data_sizes = ins_sizes;
751 list_for_each_entry(curr, &item_list, tree_list) {
752 ins_keys[i].objectid = node->inode_id;
753 ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
754 ins_keys[i].offset = curr->index;
755 ins_sizes[i] = curr->data_len;
760 ret = btrfs_insert_empty_items(trans, root, path, &batch);
764 list_for_each_entry(curr, &item_list, tree_list) {
767 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
768 write_extent_buffer(path->nodes[0], &curr->data,
769 (unsigned long)data_ptr, curr->data_len);
774 * Now release our path before releasing the delayed items and their
775 * metadata reservations, so that we don't block other tasks for more
778 btrfs_release_path(path);
780 ASSERT(node->index_item_leaves > 0);
783 * For normal operations we will batch an entire leaf's worth of delayed
784 * items, so if there are more items to process we can decrement
785 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
787 * However for log replay we may not have inserted an entire leaf's
788 * worth of items, we may have not had continuous items, so decrementing
789 * here would mess up the index_item_leaves accounting. For this case
790 * only clean up the accounting when there are no items left.
792 if (next && !continuous_keys_only) {
794 * We inserted one batch of items into a leaf a there are more
795 * items to flush in a future batch, now release one unit of
796 * metadata space from the delayed block reserve, corresponding
797 * the leaf we just flushed to.
799 btrfs_delayed_item_release_leaves(node, 1);
800 node->index_item_leaves--;
803 * There are no more items to insert. We can have a number of
804 * reserved leaves > 1 here - this happens when many dir index
805 * items are added and then removed before they are flushed (file
806 * names with a very short life, never span a transaction). So
807 * release all remaining leaves.
809 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
810 node->index_item_leaves = 0;
813 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
814 list_del(&curr->tree_list);
815 btrfs_release_delayed_item(curr);
822 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
823 struct btrfs_path *path,
824 struct btrfs_root *root,
825 struct btrfs_delayed_node *node)
830 struct btrfs_delayed_item *curr;
832 mutex_lock(&node->mutex);
833 curr = __btrfs_first_delayed_insertion_item(node);
835 mutex_unlock(&node->mutex);
838 ret = btrfs_insert_delayed_item(trans, root, path, curr);
839 mutex_unlock(&node->mutex);
845 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
846 struct btrfs_root *root,
847 struct btrfs_path *path,
848 struct btrfs_delayed_item *item)
850 const u64 ino = item->delayed_node->inode_id;
851 struct btrfs_fs_info *fs_info = root->fs_info;
852 struct btrfs_delayed_item *curr, *next;
853 struct extent_buffer *leaf = path->nodes[0];
854 LIST_HEAD(batch_list);
855 int nitems, slot, last_slot;
857 u64 total_reserved_size = item->bytes_reserved;
859 ASSERT(leaf != NULL);
861 slot = path->slots[0];
862 last_slot = btrfs_header_nritems(leaf) - 1;
864 * Our caller always gives us a path pointing to an existing item, so
865 * this can not happen.
867 ASSERT(slot <= last_slot);
868 if (WARN_ON(slot > last_slot))
873 list_add_tail(&curr->tree_list, &batch_list);
876 * Keep checking if the next delayed item matches the next item in the
877 * leaf - if so, we can add it to the batch of items to delete from the
880 while (slot < last_slot) {
881 struct btrfs_key key;
883 next = __btrfs_next_delayed_item(curr);
888 btrfs_item_key_to_cpu(leaf, &key, slot);
889 if (key.objectid != ino ||
890 key.type != BTRFS_DIR_INDEX_KEY ||
891 key.offset != next->index)
895 list_add_tail(&curr->tree_list, &batch_list);
896 total_reserved_size += curr->bytes_reserved;
899 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
903 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
904 if (total_reserved_size > 0) {
906 * Check btrfs_delayed_item_reserve_metadata() to see why we
907 * don't need to release/reserve qgroup space.
909 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
910 total_reserved_size, 0);
911 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
912 total_reserved_size, NULL);
915 list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
916 list_del(&curr->tree_list);
917 btrfs_release_delayed_item(curr);
923 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
924 struct btrfs_path *path,
925 struct btrfs_root *root,
926 struct btrfs_delayed_node *node)
928 struct btrfs_key key;
931 key.objectid = node->inode_id;
932 key.type = BTRFS_DIR_INDEX_KEY;
935 struct btrfs_delayed_item *item;
937 mutex_lock(&node->mutex);
938 item = __btrfs_first_delayed_deletion_item(node);
940 mutex_unlock(&node->mutex);
944 key.offset = item->index;
945 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
948 * There's no matching item in the leaf. This means we
949 * have already deleted this item in a past run of the
950 * delayed items. We ignore errors when running delayed
951 * items from an async context, through a work queue job
952 * running btrfs_async_run_delayed_root(), and don't
953 * release delayed items that failed to complete. This
954 * is because we will retry later, and at transaction
955 * commit time we always run delayed items and will
956 * then deal with errors if they fail to run again.
958 * So just release delayed items for which we can't find
959 * an item in the tree, and move to the next item.
961 btrfs_release_path(path);
962 btrfs_release_delayed_item(item);
964 } else if (ret == 0) {
965 ret = btrfs_batch_delete_items(trans, root, path, item);
966 btrfs_release_path(path);
970 * We unlock and relock on each iteration, this is to prevent
971 * blocking other tasks for too long while we are being run from
972 * the async context (work queue job). Those tasks are typically
973 * running system calls like creat/mkdir/rename/unlink/etc which
974 * need to add delayed items to this delayed node.
976 mutex_unlock(&node->mutex);
982 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
984 struct btrfs_delayed_root *delayed_root;
987 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
988 ASSERT(delayed_node->root);
989 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
990 delayed_node->count--;
992 delayed_root = delayed_node->root->fs_info->delayed_root;
993 finish_one_item(delayed_root);
997 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1000 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
1001 struct btrfs_delayed_root *delayed_root;
1003 ASSERT(delayed_node->root);
1004 delayed_node->count--;
1006 delayed_root = delayed_node->root->fs_info->delayed_root;
1007 finish_one_item(delayed_root);
1011 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1012 struct btrfs_root *root,
1013 struct btrfs_path *path,
1014 struct btrfs_delayed_node *node)
1016 struct btrfs_fs_info *fs_info = root->fs_info;
1017 struct btrfs_key key;
1018 struct btrfs_inode_item *inode_item;
1019 struct extent_buffer *leaf;
1023 key.objectid = node->inode_id;
1024 key.type = BTRFS_INODE_ITEM_KEY;
1027 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1032 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1038 leaf = path->nodes[0];
1039 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1040 struct btrfs_inode_item);
1041 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1042 sizeof(struct btrfs_inode_item));
1043 btrfs_mark_buffer_dirty(trans, leaf);
1045 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1049 * Now we're going to delete the INODE_REF/EXTREF, which should be the
1050 * only one ref left. Check if the next item is an INODE_REF/EXTREF.
1052 * But if we're the last item already, release and search for the last
1055 if (path->slots[0] + 1 >= btrfs_header_nritems(leaf)) {
1056 key.objectid = node->inode_id;
1057 key.type = BTRFS_INODE_EXTREF_KEY;
1058 key.offset = (u64)-1;
1060 btrfs_release_path(path);
1061 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1065 ASSERT(path->slots[0] > 0);
1068 leaf = path->nodes[0];
1072 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1073 if (key.objectid != node->inode_id)
1075 if (key.type != BTRFS_INODE_REF_KEY &&
1076 key.type != BTRFS_INODE_EXTREF_KEY)
1080 * Delayed iref deletion is for the inode who has only one link,
1081 * so there is only one iref. The case that several irefs are
1082 * in the same item doesn't exist.
1084 ret = btrfs_del_item(trans, root, path);
1086 btrfs_release_delayed_iref(node);
1087 btrfs_release_path(path);
1089 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1090 btrfs_release_delayed_inode(node);
1093 * If we fail to update the delayed inode we need to abort the
1094 * transaction, because we could leave the inode with the improper
1097 if (ret && ret != -ENOENT)
1098 btrfs_abort_transaction(trans, ret);
1103 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1104 struct btrfs_root *root,
1105 struct btrfs_path *path,
1106 struct btrfs_delayed_node *node)
1110 mutex_lock(&node->mutex);
1111 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1112 mutex_unlock(&node->mutex);
1116 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1117 mutex_unlock(&node->mutex);
1122 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1123 struct btrfs_path *path,
1124 struct btrfs_delayed_node *node)
1128 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1132 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1136 ret = btrfs_record_root_in_trans(trans, node->root);
1139 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1144 * Called when committing the transaction.
1145 * Returns 0 on success.
1146 * Returns < 0 on error and returns with an aborted transaction with any
1147 * outstanding delayed items cleaned up.
1149 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1151 struct btrfs_fs_info *fs_info = trans->fs_info;
1152 struct btrfs_delayed_root *delayed_root;
1153 struct btrfs_delayed_node *curr_node, *prev_node;
1154 struct btrfs_path *path;
1155 struct btrfs_block_rsv *block_rsv;
1157 bool count = (nr > 0);
1159 if (TRANS_ABORTED(trans))
1162 path = btrfs_alloc_path();
1166 block_rsv = trans->block_rsv;
1167 trans->block_rsv = &fs_info->delayed_block_rsv;
1169 delayed_root = fs_info->delayed_root;
1171 curr_node = btrfs_first_delayed_node(delayed_root);
1172 while (curr_node && (!count || nr--)) {
1173 ret = __btrfs_commit_inode_delayed_items(trans, path,
1176 btrfs_abort_transaction(trans, ret);
1180 prev_node = curr_node;
1181 curr_node = btrfs_next_delayed_node(curr_node);
1183 * See the comment below about releasing path before releasing
1184 * node. If the commit of delayed items was successful the path
1185 * should always be released, but in case of an error, it may
1186 * point to locked extent buffers (a leaf at the very least).
1188 ASSERT(path->nodes[0] == NULL);
1189 btrfs_release_delayed_node(prev_node);
1193 * Release the path to avoid a potential deadlock and lockdep splat when
1194 * releasing the delayed node, as that requires taking the delayed node's
1195 * mutex. If another task starts running delayed items before we take
1196 * the mutex, it will first lock the mutex and then it may try to lock
1197 * the same btree path (leaf).
1199 btrfs_free_path(path);
1202 btrfs_release_delayed_node(curr_node);
1203 trans->block_rsv = block_rsv;
1208 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1210 return __btrfs_run_delayed_items(trans, -1);
1213 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1215 return __btrfs_run_delayed_items(trans, nr);
1218 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1219 struct btrfs_inode *inode)
1221 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1222 struct btrfs_path *path;
1223 struct btrfs_block_rsv *block_rsv;
1229 mutex_lock(&delayed_node->mutex);
1230 if (!delayed_node->count) {
1231 mutex_unlock(&delayed_node->mutex);
1232 btrfs_release_delayed_node(delayed_node);
1235 mutex_unlock(&delayed_node->mutex);
1237 path = btrfs_alloc_path();
1239 btrfs_release_delayed_node(delayed_node);
1243 block_rsv = trans->block_rsv;
1244 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1246 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1248 btrfs_release_delayed_node(delayed_node);
1249 btrfs_free_path(path);
1250 trans->block_rsv = block_rsv;
1255 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1257 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1258 struct btrfs_trans_handle *trans;
1259 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1260 struct btrfs_path *path;
1261 struct btrfs_block_rsv *block_rsv;
1267 mutex_lock(&delayed_node->mutex);
1268 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1269 mutex_unlock(&delayed_node->mutex);
1270 btrfs_release_delayed_node(delayed_node);
1273 mutex_unlock(&delayed_node->mutex);
1275 trans = btrfs_join_transaction(delayed_node->root);
1276 if (IS_ERR(trans)) {
1277 ret = PTR_ERR(trans);
1281 path = btrfs_alloc_path();
1287 block_rsv = trans->block_rsv;
1288 trans->block_rsv = &fs_info->delayed_block_rsv;
1290 mutex_lock(&delayed_node->mutex);
1291 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1292 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1293 path, delayed_node);
1296 mutex_unlock(&delayed_node->mutex);
1298 btrfs_free_path(path);
1299 trans->block_rsv = block_rsv;
1301 btrfs_end_transaction(trans);
1302 btrfs_btree_balance_dirty(fs_info);
1304 btrfs_release_delayed_node(delayed_node);
1309 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1311 struct btrfs_delayed_node *delayed_node;
1313 delayed_node = READ_ONCE(inode->delayed_node);
1317 inode->delayed_node = NULL;
1318 btrfs_release_delayed_node(delayed_node);
1321 struct btrfs_async_delayed_work {
1322 struct btrfs_delayed_root *delayed_root;
1324 struct btrfs_work work;
1327 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1329 struct btrfs_async_delayed_work *async_work;
1330 struct btrfs_delayed_root *delayed_root;
1331 struct btrfs_trans_handle *trans;
1332 struct btrfs_path *path;
1333 struct btrfs_delayed_node *delayed_node = NULL;
1334 struct btrfs_root *root;
1335 struct btrfs_block_rsv *block_rsv;
1338 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1339 delayed_root = async_work->delayed_root;
1341 path = btrfs_alloc_path();
1346 if (atomic_read(&delayed_root->items) <
1347 BTRFS_DELAYED_BACKGROUND / 2)
1350 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1354 root = delayed_node->root;
1356 trans = btrfs_join_transaction(root);
1357 if (IS_ERR(trans)) {
1358 btrfs_release_path(path);
1359 btrfs_release_prepared_delayed_node(delayed_node);
1364 block_rsv = trans->block_rsv;
1365 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1367 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1369 trans->block_rsv = block_rsv;
1370 btrfs_end_transaction(trans);
1371 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1373 btrfs_release_path(path);
1374 btrfs_release_prepared_delayed_node(delayed_node);
1377 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1378 || total_done < async_work->nr);
1380 btrfs_free_path(path);
1382 wake_up(&delayed_root->wait);
1387 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1388 struct btrfs_fs_info *fs_info, int nr)
1390 struct btrfs_async_delayed_work *async_work;
1392 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1396 async_work->delayed_root = delayed_root;
1397 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
1398 async_work->nr = nr;
1400 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1404 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1406 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1409 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1411 int val = atomic_read(&delayed_root->items_seq);
1413 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1416 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1422 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1424 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1426 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1427 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1430 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1434 seq = atomic_read(&delayed_root->items_seq);
1436 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1440 wait_event_interruptible(delayed_root->wait,
1441 could_end_wait(delayed_root, seq));
1445 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1448 static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1450 struct btrfs_fs_info *fs_info = trans->fs_info;
1451 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1453 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1457 * Adding the new dir index item does not require touching another
1458 * leaf, so we can release 1 unit of metadata that was previously
1459 * reserved when starting the transaction. This applies only to
1460 * the case where we had a transaction start and excludes the
1461 * transaction join case (when replaying log trees).
1463 trace_btrfs_space_reservation(fs_info, "transaction",
1464 trans->transid, bytes, 0);
1465 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1466 ASSERT(trans->bytes_reserved >= bytes);
1467 trans->bytes_reserved -= bytes;
1470 /* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
1471 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1472 const char *name, int name_len,
1473 struct btrfs_inode *dir,
1474 struct btrfs_disk_key *disk_key, u8 flags,
1477 struct btrfs_fs_info *fs_info = trans->fs_info;
1478 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1479 struct btrfs_delayed_node *delayed_node;
1480 struct btrfs_delayed_item *delayed_item;
1481 struct btrfs_dir_item *dir_item;
1482 bool reserve_leaf_space;
1486 delayed_node = btrfs_get_or_create_delayed_node(dir);
1487 if (IS_ERR(delayed_node))
1488 return PTR_ERR(delayed_node);
1490 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1492 BTRFS_DELAYED_INSERTION_ITEM);
1493 if (!delayed_item) {
1498 delayed_item->index = index;
1500 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1501 dir_item->location = *disk_key;
1502 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1503 btrfs_set_stack_dir_data_len(dir_item, 0);
1504 btrfs_set_stack_dir_name_len(dir_item, name_len);
1505 btrfs_set_stack_dir_flags(dir_item, flags);
1506 memcpy((char *)(dir_item + 1), name, name_len);
1508 data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1510 mutex_lock(&delayed_node->mutex);
1513 * First attempt to insert the delayed item. This is to make the error
1514 * handling path simpler in case we fail (-EEXIST). There's no risk of
1515 * any other task coming in and running the delayed item before we do
1516 * the metadata space reservation below, because we are holding the
1517 * delayed node's mutex and that mutex must also be locked before the
1518 * node's delayed items can be run.
1520 ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1521 if (unlikely(ret)) {
1522 btrfs_err(trans->fs_info,
1523 "error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1524 name_len, name, index, btrfs_root_id(delayed_node->root),
1525 delayed_node->inode_id, dir->index_cnt,
1526 delayed_node->index_cnt, ret);
1527 btrfs_release_delayed_item(delayed_item);
1528 btrfs_release_dir_index_item_space(trans);
1529 mutex_unlock(&delayed_node->mutex);
1533 if (delayed_node->index_item_leaves == 0 ||
1534 delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1535 delayed_node->curr_index_batch_size = data_len;
1536 reserve_leaf_space = true;
1538 delayed_node->curr_index_batch_size += data_len;
1539 reserve_leaf_space = false;
1542 if (reserve_leaf_space) {
1543 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1545 * Space was reserved for a dir index item insertion when we
1546 * started the transaction, so getting a failure here should be
1550 btrfs_release_delayed_item(delayed_item);
1551 mutex_unlock(&delayed_node->mutex);
1555 delayed_node->index_item_leaves++;
1557 btrfs_release_dir_index_item_space(trans);
1559 mutex_unlock(&delayed_node->mutex);
1562 btrfs_release_delayed_node(delayed_node);
1566 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1567 struct btrfs_delayed_node *node,
1570 struct btrfs_delayed_item *item;
1572 mutex_lock(&node->mutex);
1573 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1575 mutex_unlock(&node->mutex);
1580 * For delayed items to insert, we track reserved metadata bytes based
1581 * on the number of leaves that we will use.
1582 * See btrfs_insert_delayed_dir_index() and
1583 * btrfs_delayed_item_reserve_metadata()).
1585 ASSERT(item->bytes_reserved == 0);
1586 ASSERT(node->index_item_leaves > 0);
1589 * If there's only one leaf reserved, we can decrement this item from the
1590 * current batch, otherwise we can not because we don't know which leaf
1591 * it belongs to. With the current limit on delayed items, we rarely
1592 * accumulate enough dir index items to fill more than one leaf (even
1593 * when using a leaf size of 4K).
1595 if (node->index_item_leaves == 1) {
1596 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1598 ASSERT(node->curr_index_batch_size >= data_len);
1599 node->curr_index_batch_size -= data_len;
1602 btrfs_release_delayed_item(item);
1604 /* If we now have no more dir index items, we can release all leaves. */
1605 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1606 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1607 node->index_item_leaves = 0;
1610 mutex_unlock(&node->mutex);
1614 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1615 struct btrfs_inode *dir, u64 index)
1617 struct btrfs_delayed_node *node;
1618 struct btrfs_delayed_item *item;
1621 node = btrfs_get_or_create_delayed_node(dir);
1623 return PTR_ERR(node);
1625 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1629 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1635 item->index = index;
1637 ret = btrfs_delayed_item_reserve_metadata(trans, item);
1639 * we have reserved enough space when we start a new transaction,
1640 * so reserving metadata failure is impossible.
1643 btrfs_err(trans->fs_info,
1644 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1645 btrfs_release_delayed_item(item);
1649 mutex_lock(&node->mutex);
1650 ret = __btrfs_add_delayed_item(node, item);
1651 if (unlikely(ret)) {
1652 btrfs_err(trans->fs_info,
1653 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1654 index, node->root->root_key.objectid,
1655 node->inode_id, ret);
1656 btrfs_delayed_item_release_metadata(dir->root, item);
1657 btrfs_release_delayed_item(item);
1659 mutex_unlock(&node->mutex);
1661 btrfs_release_delayed_node(node);
1665 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1667 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1673 * Since we have held i_mutex of this directory, it is impossible that
1674 * a new directory index is added into the delayed node and index_cnt
1675 * is updated now. So we needn't lock the delayed node.
1677 if (!delayed_node->index_cnt) {
1678 btrfs_release_delayed_node(delayed_node);
1682 inode->index_cnt = delayed_node->index_cnt;
1683 btrfs_release_delayed_node(delayed_node);
1687 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1689 struct list_head *ins_list,
1690 struct list_head *del_list)
1692 struct btrfs_delayed_node *delayed_node;
1693 struct btrfs_delayed_item *item;
1695 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1700 * We can only do one readdir with delayed items at a time because of
1701 * item->readdir_list.
1703 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
1704 btrfs_inode_lock(BTRFS_I(inode), 0);
1706 mutex_lock(&delayed_node->mutex);
1707 item = __btrfs_first_delayed_insertion_item(delayed_node);
1708 while (item && item->index <= last_index) {
1709 refcount_inc(&item->refs);
1710 list_add_tail(&item->readdir_list, ins_list);
1711 item = __btrfs_next_delayed_item(item);
1714 item = __btrfs_first_delayed_deletion_item(delayed_node);
1715 while (item && item->index <= last_index) {
1716 refcount_inc(&item->refs);
1717 list_add_tail(&item->readdir_list, del_list);
1718 item = __btrfs_next_delayed_item(item);
1720 mutex_unlock(&delayed_node->mutex);
1722 * This delayed node is still cached in the btrfs inode, so refs
1723 * must be > 1 now, and we needn't check it is going to be freed
1726 * Besides that, this function is used to read dir, we do not
1727 * insert/delete delayed items in this period. So we also needn't
1728 * requeue or dequeue this delayed node.
1730 refcount_dec(&delayed_node->refs);
1735 void btrfs_readdir_put_delayed_items(struct inode *inode,
1736 struct list_head *ins_list,
1737 struct list_head *del_list)
1739 struct btrfs_delayed_item *curr, *next;
1741 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1742 list_del(&curr->readdir_list);
1743 if (refcount_dec_and_test(&curr->refs))
1747 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1748 list_del(&curr->readdir_list);
1749 if (refcount_dec_and_test(&curr->refs))
1754 * The VFS is going to do up_read(), so we need to downgrade back to a
1757 downgrade_write(&inode->i_rwsem);
1760 int btrfs_should_delete_dir_index(struct list_head *del_list,
1763 struct btrfs_delayed_item *curr;
1766 list_for_each_entry(curr, del_list, readdir_list) {
1767 if (curr->index > index)
1769 if (curr->index == index) {
1778 * Read dir info stored in the delayed tree.
1780 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1781 struct list_head *ins_list)
1783 struct btrfs_dir_item *di;
1784 struct btrfs_delayed_item *curr, *next;
1785 struct btrfs_key location;
1789 unsigned char d_type;
1792 * Changing the data of the delayed item is impossible. So
1793 * we needn't lock them. And we have held i_mutex of the
1794 * directory, nobody can delete any directory indexes now.
1796 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1797 list_del(&curr->readdir_list);
1799 if (curr->index < ctx->pos) {
1800 if (refcount_dec_and_test(&curr->refs))
1805 ctx->pos = curr->index;
1807 di = (struct btrfs_dir_item *)curr->data;
1808 name = (char *)(di + 1);
1809 name_len = btrfs_stack_dir_name_len(di);
1811 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1812 btrfs_disk_key_to_cpu(&location, &di->location);
1814 over = !dir_emit(ctx, name, name_len,
1815 location.objectid, d_type);
1817 if (refcount_dec_and_test(&curr->refs))
1827 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1828 struct btrfs_inode_item *inode_item,
1829 struct inode *inode)
1833 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1834 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1835 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1836 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1837 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1838 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1839 btrfs_set_stack_inode_generation(inode_item,
1840 BTRFS_I(inode)->generation);
1841 btrfs_set_stack_inode_sequence(inode_item,
1842 inode_peek_iversion(inode));
1843 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1844 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1845 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1846 BTRFS_I(inode)->ro_flags);
1847 btrfs_set_stack_inode_flags(inode_item, flags);
1848 btrfs_set_stack_inode_block_group(inode_item, 0);
1850 btrfs_set_stack_timespec_sec(&inode_item->atime,
1851 inode_get_atime_sec(inode));
1852 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1853 inode_get_atime_nsec(inode));
1855 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1856 inode_get_mtime_sec(inode));
1857 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1858 inode_get_mtime_nsec(inode));
1860 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1861 inode_get_ctime_sec(inode));
1862 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1863 inode_get_ctime_nsec(inode));
1865 btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
1866 btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
1869 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1871 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1872 struct btrfs_delayed_node *delayed_node;
1873 struct btrfs_inode_item *inode_item;
1875 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1879 mutex_lock(&delayed_node->mutex);
1880 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1881 mutex_unlock(&delayed_node->mutex);
1882 btrfs_release_delayed_node(delayed_node);
1886 inode_item = &delayed_node->inode_item;
1888 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1889 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1890 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1891 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1892 round_up(i_size_read(inode), fs_info->sectorsize));
1893 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1894 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1895 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1896 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1897 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1899 inode_set_iversion_queried(inode,
1900 btrfs_stack_inode_sequence(inode_item));
1902 *rdev = btrfs_stack_inode_rdev(inode_item);
1903 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1904 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1906 inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
1907 btrfs_stack_timespec_nsec(&inode_item->atime));
1909 inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
1910 btrfs_stack_timespec_nsec(&inode_item->mtime));
1912 inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
1913 btrfs_stack_timespec_nsec(&inode_item->ctime));
1915 BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
1916 BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
1918 inode->i_generation = BTRFS_I(inode)->generation;
1919 BTRFS_I(inode)->index_cnt = (u64)-1;
1921 mutex_unlock(&delayed_node->mutex);
1922 btrfs_release_delayed_node(delayed_node);
1926 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1927 struct btrfs_inode *inode)
1929 struct btrfs_root *root = inode->root;
1930 struct btrfs_delayed_node *delayed_node;
1933 delayed_node = btrfs_get_or_create_delayed_node(inode);
1934 if (IS_ERR(delayed_node))
1935 return PTR_ERR(delayed_node);
1937 mutex_lock(&delayed_node->mutex);
1938 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1939 fill_stack_inode_item(trans, &delayed_node->inode_item,
1944 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1948 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1949 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1950 delayed_node->count++;
1951 atomic_inc(&root->fs_info->delayed_root->items);
1953 mutex_unlock(&delayed_node->mutex);
1954 btrfs_release_delayed_node(delayed_node);
1958 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1960 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1961 struct btrfs_delayed_node *delayed_node;
1964 * we don't do delayed inode updates during log recovery because it
1965 * leads to enospc problems. This means we also can't do
1966 * delayed inode refs
1968 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1971 delayed_node = btrfs_get_or_create_delayed_node(inode);
1972 if (IS_ERR(delayed_node))
1973 return PTR_ERR(delayed_node);
1976 * We don't reserve space for inode ref deletion is because:
1977 * - We ONLY do async inode ref deletion for the inode who has only
1978 * one link(i_nlink == 1), it means there is only one inode ref.
1979 * And in most case, the inode ref and the inode item are in the
1980 * same leaf, and we will deal with them at the same time.
1981 * Since we are sure we will reserve the space for the inode item,
1982 * it is unnecessary to reserve space for inode ref deletion.
1983 * - If the inode ref and the inode item are not in the same leaf,
1984 * We also needn't worry about enospc problem, because we reserve
1985 * much more space for the inode update than it needs.
1986 * - At the worst, we can steal some space from the global reservation.
1989 mutex_lock(&delayed_node->mutex);
1990 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1993 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1994 delayed_node->count++;
1995 atomic_inc(&fs_info->delayed_root->items);
1997 mutex_unlock(&delayed_node->mutex);
1998 btrfs_release_delayed_node(delayed_node);
2002 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
2004 struct btrfs_root *root = delayed_node->root;
2005 struct btrfs_fs_info *fs_info = root->fs_info;
2006 struct btrfs_delayed_item *curr_item, *prev_item;
2008 mutex_lock(&delayed_node->mutex);
2009 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
2011 prev_item = curr_item;
2012 curr_item = __btrfs_next_delayed_item(prev_item);
2013 btrfs_release_delayed_item(prev_item);
2016 if (delayed_node->index_item_leaves > 0) {
2017 btrfs_delayed_item_release_leaves(delayed_node,
2018 delayed_node->index_item_leaves);
2019 delayed_node->index_item_leaves = 0;
2022 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2024 btrfs_delayed_item_release_metadata(root, curr_item);
2025 prev_item = curr_item;
2026 curr_item = __btrfs_next_delayed_item(prev_item);
2027 btrfs_release_delayed_item(prev_item);
2030 btrfs_release_delayed_iref(delayed_node);
2032 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2033 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2034 btrfs_release_delayed_inode(delayed_node);
2036 mutex_unlock(&delayed_node->mutex);
2039 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2041 struct btrfs_delayed_node *delayed_node;
2043 delayed_node = btrfs_get_delayed_node(inode);
2047 __btrfs_kill_delayed_node(delayed_node);
2048 btrfs_release_delayed_node(delayed_node);
2051 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2053 unsigned long index = 0;
2054 struct btrfs_delayed_node *delayed_nodes[8];
2057 struct btrfs_delayed_node *node;
2060 spin_lock(&root->inode_lock);
2061 if (xa_empty(&root->delayed_nodes)) {
2062 spin_unlock(&root->inode_lock);
2067 xa_for_each_start(&root->delayed_nodes, index, node, index) {
2069 * Don't increase refs in case the node is dead and
2070 * about to be removed from the tree in the loop below
2072 if (refcount_inc_not_zero(&node->refs)) {
2073 delayed_nodes[count] = node;
2076 if (count >= ARRAY_SIZE(delayed_nodes))
2079 spin_unlock(&root->inode_lock);
2082 for (int i = 0; i < count; i++) {
2083 __btrfs_kill_delayed_node(delayed_nodes[i]);
2084 btrfs_release_delayed_node(delayed_nodes[i]);
2089 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2091 struct btrfs_delayed_node *curr_node, *prev_node;
2093 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2095 __btrfs_kill_delayed_node(curr_node);
2097 prev_node = curr_node;
2098 curr_node = btrfs_next_delayed_node(curr_node);
2099 btrfs_release_delayed_node(prev_node);
2103 void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2104 struct list_head *ins_list,
2105 struct list_head *del_list)
2107 struct btrfs_delayed_node *node;
2108 struct btrfs_delayed_item *item;
2110 node = btrfs_get_delayed_node(inode);
2114 mutex_lock(&node->mutex);
2115 item = __btrfs_first_delayed_insertion_item(node);
2118 * It's possible that the item is already in a log list. This
2119 * can happen in case two tasks are trying to log the same
2120 * directory. For example if we have tasks A and task B:
2122 * Task A collected the delayed items into a log list while
2123 * under the inode's log_mutex (at btrfs_log_inode()), but it
2124 * only releases the items after logging the inodes they point
2125 * to (if they are new inodes), which happens after unlocking
2128 * Task B enters btrfs_log_inode() and acquires the log_mutex
2129 * of the same directory inode, before task B releases the
2130 * delayed items. This can happen for example when logging some
2131 * inode we need to trigger logging of its parent directory, so
2132 * logging two files that have the same parent directory can
2135 * If this happens, just ignore delayed items already in a log
2136 * list. All the tasks logging the directory are under a log
2137 * transaction and whichever finishes first can not sync the log
2138 * before the other completes and leaves the log transaction.
2140 if (!item->logged && list_empty(&item->log_list)) {
2141 refcount_inc(&item->refs);
2142 list_add_tail(&item->log_list, ins_list);
2144 item = __btrfs_next_delayed_item(item);
2147 item = __btrfs_first_delayed_deletion_item(node);
2149 /* It may be non-empty, for the same reason mentioned above. */
2150 if (!item->logged && list_empty(&item->log_list)) {
2151 refcount_inc(&item->refs);
2152 list_add_tail(&item->log_list, del_list);
2154 item = __btrfs_next_delayed_item(item);
2156 mutex_unlock(&node->mutex);
2159 * We are called during inode logging, which means the inode is in use
2160 * and can not be evicted before we finish logging the inode. So we never
2161 * have the last reference on the delayed inode.
2162 * Also, we don't use btrfs_release_delayed_node() because that would
2163 * requeue the delayed inode (change its order in the list of prepared
2164 * nodes) and we don't want to do such change because we don't create or
2165 * delete delayed items.
2167 ASSERT(refcount_read(&node->refs) > 1);
2168 refcount_dec(&node->refs);
2171 void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2172 struct list_head *ins_list,
2173 struct list_head *del_list)
2175 struct btrfs_delayed_node *node;
2176 struct btrfs_delayed_item *item;
2177 struct btrfs_delayed_item *next;
2179 node = btrfs_get_delayed_node(inode);
2183 mutex_lock(&node->mutex);
2185 list_for_each_entry_safe(item, next, ins_list, log_list) {
2186 item->logged = true;
2187 list_del_init(&item->log_list);
2188 if (refcount_dec_and_test(&item->refs))
2192 list_for_each_entry_safe(item, next, del_list, log_list) {
2193 item->logged = true;
2194 list_del_init(&item->log_list);
2195 if (refcount_dec_and_test(&item->refs))
2199 mutex_unlock(&node->mutex);
2202 * We are called during inode logging, which means the inode is in use
2203 * and can not be evicted before we finish logging the inode. So we never
2204 * have the last reference on the delayed inode.
2205 * Also, we don't use btrfs_release_delayed_node() because that would
2206 * requeue the delayed inode (change its order in the list of prepared
2207 * nodes) and we don't want to do such change because we don't create or
2208 * delete delayed items.
2210 ASSERT(refcount_read(&node->refs) > 1);
2211 refcount_dec(&node->refs);