1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
10 #include "delayed-ref.h"
11 #include "transaction.h"
14 struct kmem_cache *btrfs_delayed_ref_head_cachep;
15 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16 struct kmem_cache *btrfs_delayed_data_ref_cachep;
17 struct kmem_cache *btrfs_delayed_extent_op_cachep;
19 * delayed back reference update tracking. For subvolume trees
20 * we queue up extent allocations and backref maintenance for
21 * delayed processing. This avoids deep call chains where we
22 * add extents in the middle of btrfs_search_slot, and it allows
23 * us to buffer up frequently modified backrefs in an rb tree instead
24 * of hammering updates on the extent allocation tree.
28 * compare two delayed tree backrefs with same bytenr and type
30 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31 struct btrfs_delayed_tree_ref *ref2)
33 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
34 if (ref1->root < ref2->root)
36 if (ref1->root > ref2->root)
39 if (ref1->parent < ref2->parent)
41 if (ref1->parent > ref2->parent)
48 * compare two delayed data backrefs with same bytenr and type
50 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51 struct btrfs_delayed_data_ref *ref2)
53 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54 if (ref1->root < ref2->root)
56 if (ref1->root > ref2->root)
58 if (ref1->objectid < ref2->objectid)
60 if (ref1->objectid > ref2->objectid)
62 if (ref1->offset < ref2->offset)
64 if (ref1->offset > ref2->offset)
67 if (ref1->parent < ref2->parent)
69 if (ref1->parent > ref2->parent)
75 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76 struct btrfs_delayed_ref_node *ref2,
81 if (ref1->type < ref2->type)
83 if (ref1->type > ref2->type)
85 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88 btrfs_delayed_node_to_tree_ref(ref2));
90 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91 btrfs_delayed_node_to_data_ref(ref2));
95 if (ref1->seq < ref2->seq)
97 if (ref1->seq > ref2->seq)
103 /* insert a new ref to head ref rbtree */
104 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
105 struct rb_node *node)
107 struct rb_node **p = &root->rb_node;
108 struct rb_node *parent_node = NULL;
109 struct btrfs_delayed_ref_head *entry;
110 struct btrfs_delayed_ref_head *ins;
113 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
114 bytenr = ins->bytenr;
117 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
120 if (bytenr < entry->bytenr)
122 else if (bytenr > entry->bytenr)
128 rb_link_node(node, parent_node, p);
129 rb_insert_color(node, root);
133 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
134 struct btrfs_delayed_ref_node *ins)
136 struct rb_node **p = &root->rb_node;
137 struct rb_node *node = &ins->ref_node;
138 struct rb_node *parent_node = NULL;
139 struct btrfs_delayed_ref_node *entry;
145 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
147 comp = comp_refs(ins, entry, true);
156 rb_link_node(node, parent_node, p);
157 rb_insert_color(node, root);
162 * find an head entry based on bytenr. This returns the delayed ref
163 * head if it was able to find one, or NULL if nothing was in that spot.
164 * If return_bigger is given, the next bigger entry is returned if no exact
167 static struct btrfs_delayed_ref_head *
168 find_ref_head(struct rb_root *root, u64 bytenr,
172 struct btrfs_delayed_ref_head *entry;
177 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
179 if (bytenr < entry->bytenr)
181 else if (bytenr > entry->bytenr)
186 if (entry && return_bigger) {
187 if (bytenr > entry->bytenr) {
188 n = rb_next(&entry->href_node);
191 entry = rb_entry(n, struct btrfs_delayed_ref_head,
200 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
201 struct btrfs_delayed_ref_head *head)
203 struct btrfs_delayed_ref_root *delayed_refs;
205 delayed_refs = &trans->transaction->delayed_refs;
206 lockdep_assert_held(&delayed_refs->lock);
207 if (mutex_trylock(&head->mutex))
210 refcount_inc(&head->refs);
211 spin_unlock(&delayed_refs->lock);
213 mutex_lock(&head->mutex);
214 spin_lock(&delayed_refs->lock);
215 if (RB_EMPTY_NODE(&head->href_node)) {
216 mutex_unlock(&head->mutex);
217 btrfs_put_delayed_ref_head(head);
220 btrfs_put_delayed_ref_head(head);
224 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
225 struct btrfs_delayed_ref_root *delayed_refs,
226 struct btrfs_delayed_ref_head *head,
227 struct btrfs_delayed_ref_node *ref)
229 lockdep_assert_held(&head->lock);
230 rb_erase(&ref->ref_node, &head->ref_tree);
231 RB_CLEAR_NODE(&ref->ref_node);
232 if (!list_empty(&ref->add_list))
233 list_del(&ref->add_list);
235 btrfs_put_delayed_ref(ref);
236 atomic_dec(&delayed_refs->num_entries);
237 if (trans->delayed_ref_updates)
238 trans->delayed_ref_updates--;
241 static bool merge_ref(struct btrfs_trans_handle *trans,
242 struct btrfs_delayed_ref_root *delayed_refs,
243 struct btrfs_delayed_ref_head *head,
244 struct btrfs_delayed_ref_node *ref,
247 struct btrfs_delayed_ref_node *next;
248 struct rb_node *node = rb_next(&ref->ref_node);
251 while (!done && node) {
254 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
255 node = rb_next(node);
256 if (seq && next->seq >= seq)
258 if (comp_refs(ref, next, false))
261 if (ref->action == next->action) {
264 if (ref->ref_mod < next->ref_mod) {
268 mod = -next->ref_mod;
271 drop_delayed_ref(trans, delayed_refs, head, next);
273 if (ref->ref_mod == 0) {
274 drop_delayed_ref(trans, delayed_refs, head, ref);
278 * Can't have multiples of the same ref on a tree block.
280 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
281 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
288 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
289 struct btrfs_fs_info *fs_info,
290 struct btrfs_delayed_ref_root *delayed_refs,
291 struct btrfs_delayed_ref_head *head)
293 struct btrfs_delayed_ref_node *ref;
294 struct rb_node *node;
297 lockdep_assert_held(&head->lock);
299 if (RB_EMPTY_ROOT(&head->ref_tree))
302 /* We don't have too many refs to merge for data. */
306 spin_lock(&fs_info->tree_mod_seq_lock);
307 if (!list_empty(&fs_info->tree_mod_seq_list)) {
308 struct seq_list *elem;
310 elem = list_first_entry(&fs_info->tree_mod_seq_list,
311 struct seq_list, list);
314 spin_unlock(&fs_info->tree_mod_seq_lock);
317 for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
318 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
319 if (seq && ref->seq >= seq)
321 if (merge_ref(trans, delayed_refs, head, ref, seq))
326 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
327 struct btrfs_delayed_ref_root *delayed_refs,
330 struct seq_list *elem;
333 spin_lock(&fs_info->tree_mod_seq_lock);
334 if (!list_empty(&fs_info->tree_mod_seq_list)) {
335 elem = list_first_entry(&fs_info->tree_mod_seq_list,
336 struct seq_list, list);
337 if (seq >= elem->seq) {
339 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
340 (u32)(seq >> 32), (u32)seq,
341 (u32)(elem->seq >> 32), (u32)elem->seq,
347 spin_unlock(&fs_info->tree_mod_seq_lock);
351 struct btrfs_delayed_ref_head *
352 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
354 struct btrfs_delayed_ref_root *delayed_refs;
355 struct btrfs_delayed_ref_head *head;
359 delayed_refs = &trans->transaction->delayed_refs;
362 start = delayed_refs->run_delayed_start;
363 head = find_ref_head(&delayed_refs->href_root, start, 1);
364 if (!head && !loop) {
365 delayed_refs->run_delayed_start = 0;
368 head = find_ref_head(&delayed_refs->href_root, start, 1);
371 } else if (!head && loop) {
375 while (head->processing) {
376 struct rb_node *node;
378 node = rb_next(&head->href_node);
382 delayed_refs->run_delayed_start = 0;
387 head = rb_entry(node, struct btrfs_delayed_ref_head,
391 head->processing = 1;
392 WARN_ON(delayed_refs->num_heads_ready == 0);
393 delayed_refs->num_heads_ready--;
394 delayed_refs->run_delayed_start = head->bytenr +
400 * Helper to insert the ref_node to the tail or merge with tail.
402 * Return 0 for insert.
403 * Return >0 for merge.
405 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
406 struct btrfs_delayed_ref_root *root,
407 struct btrfs_delayed_ref_head *href,
408 struct btrfs_delayed_ref_node *ref)
410 struct btrfs_delayed_ref_node *exist;
414 spin_lock(&href->lock);
415 exist = tree_insert(&href->ref_tree, ref);
419 /* Now we are sure we can merge */
421 if (exist->action == ref->action) {
424 /* Need to change action */
425 if (exist->ref_mod < ref->ref_mod) {
426 exist->action = ref->action;
427 mod = -exist->ref_mod;
428 exist->ref_mod = ref->ref_mod;
429 if (ref->action == BTRFS_ADD_DELAYED_REF)
430 list_add_tail(&exist->add_list,
431 &href->ref_add_list);
432 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
433 ASSERT(!list_empty(&exist->add_list));
434 list_del(&exist->add_list);
441 exist->ref_mod += mod;
443 /* remove existing tail if its ref_mod is zero */
444 if (exist->ref_mod == 0)
445 drop_delayed_ref(trans, root, href, exist);
446 spin_unlock(&href->lock);
449 if (ref->action == BTRFS_ADD_DELAYED_REF)
450 list_add_tail(&ref->add_list, &href->ref_add_list);
451 atomic_inc(&root->num_entries);
452 trans->delayed_ref_updates++;
453 spin_unlock(&href->lock);
458 * helper function to update the accounting in the head ref
459 * existing and update must have the same bytenr
462 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
463 struct btrfs_delayed_ref_head *existing,
464 struct btrfs_delayed_ref_head *update,
465 int *old_ref_mod_ret)
469 BUG_ON(existing->is_data != update->is_data);
471 spin_lock(&existing->lock);
472 if (update->must_insert_reserved) {
473 /* if the extent was freed and then
474 * reallocated before the delayed ref
475 * entries were processed, we can end up
476 * with an existing head ref without
477 * the must_insert_reserved flag set.
480 existing->must_insert_reserved = update->must_insert_reserved;
483 * update the num_bytes so we make sure the accounting
486 existing->num_bytes = update->num_bytes;
490 if (update->extent_op) {
491 if (!existing->extent_op) {
492 existing->extent_op = update->extent_op;
494 if (update->extent_op->update_key) {
495 memcpy(&existing->extent_op->key,
496 &update->extent_op->key,
497 sizeof(update->extent_op->key));
498 existing->extent_op->update_key = true;
500 if (update->extent_op->update_flags) {
501 existing->extent_op->flags_to_set |=
502 update->extent_op->flags_to_set;
503 existing->extent_op->update_flags = true;
505 btrfs_free_delayed_extent_op(update->extent_op);
509 * update the reference mod on the head to reflect this new operation,
510 * only need the lock for this case cause we could be processing it
511 * currently, for refs we just added we know we're a-ok.
513 old_ref_mod = existing->total_ref_mod;
515 *old_ref_mod_ret = old_ref_mod;
516 existing->ref_mod += update->ref_mod;
517 existing->total_ref_mod += update->ref_mod;
520 * If we are going to from a positive ref mod to a negative or vice
521 * versa we need to make sure to adjust pending_csums accordingly.
523 if (existing->is_data) {
524 if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
525 delayed_refs->pending_csums -= existing->num_bytes;
526 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
527 delayed_refs->pending_csums += existing->num_bytes;
529 spin_unlock(&existing->lock);
533 * helper function to actually insert a head node into the rbtree.
534 * this does all the dirty work in terms of maintaining the correct
535 * overall modification count.
537 static noinline struct btrfs_delayed_ref_head *
538 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
539 struct btrfs_trans_handle *trans,
540 struct btrfs_delayed_ref_head *head_ref,
541 struct btrfs_qgroup_extent_record *qrecord,
542 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
543 int action, int is_data, int is_system,
544 int *qrecord_inserted_ret,
545 int *old_ref_mod, int *new_ref_mod)
548 struct btrfs_delayed_ref_head *existing;
549 struct btrfs_delayed_ref_root *delayed_refs;
551 int must_insert_reserved = 0;
552 int qrecord_inserted = 0;
554 /* If reserved is provided, it must be a data extent. */
555 BUG_ON(!is_data && reserved);
558 * the head node stores the sum of all the mods, so dropping a ref
559 * should drop the sum in the head node by one.
561 if (action == BTRFS_UPDATE_DELAYED_HEAD)
563 else if (action == BTRFS_DROP_DELAYED_REF)
567 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
568 * the reserved accounting when the extent is finally added, or
569 * if a later modification deletes the delayed ref without ever
570 * inserting the extent into the extent allocation tree.
571 * ref->must_insert_reserved is the flag used to record
572 * that accounting mods are required.
574 * Once we record must_insert_reserved, switch the action to
575 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
577 if (action == BTRFS_ADD_DELAYED_EXTENT)
578 must_insert_reserved = 1;
580 must_insert_reserved = 0;
582 delayed_refs = &trans->transaction->delayed_refs;
584 refcount_set(&head_ref->refs, 1);
585 head_ref->bytenr = bytenr;
586 head_ref->num_bytes = num_bytes;
587 head_ref->ref_mod = count_mod;
588 head_ref->must_insert_reserved = must_insert_reserved;
589 head_ref->is_data = is_data;
590 head_ref->is_system = is_system;
591 head_ref->ref_tree = RB_ROOT;
592 INIT_LIST_HEAD(&head_ref->ref_add_list);
593 RB_CLEAR_NODE(&head_ref->href_node);
594 head_ref->processing = 0;
595 head_ref->total_ref_mod = count_mod;
596 head_ref->qgroup_reserved = 0;
597 head_ref->qgroup_ref_root = 0;
598 spin_lock_init(&head_ref->lock);
599 mutex_init(&head_ref->mutex);
601 /* Record qgroup extent info if provided */
603 if (ref_root && reserved) {
604 head_ref->qgroup_ref_root = ref_root;
605 head_ref->qgroup_reserved = reserved;
608 qrecord->bytenr = bytenr;
609 qrecord->num_bytes = num_bytes;
610 qrecord->old_roots = NULL;
612 if(btrfs_qgroup_trace_extent_nolock(fs_info,
613 delayed_refs, qrecord))
616 qrecord_inserted = 1;
619 trace_add_delayed_ref_head(fs_info, head_ref, action);
621 existing = htree_insert(&delayed_refs->href_root,
622 &head_ref->href_node);
624 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
625 && existing->qgroup_reserved);
626 update_existing_head_ref(delayed_refs, existing, head_ref,
629 * we've updated the existing ref, free the newly
632 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
637 if (is_data && count_mod < 0)
638 delayed_refs->pending_csums += num_bytes;
639 delayed_refs->num_heads++;
640 delayed_refs->num_heads_ready++;
641 atomic_inc(&delayed_refs->num_entries);
642 trans->delayed_ref_updates++;
644 if (qrecord_inserted_ret)
645 *qrecord_inserted_ret = qrecord_inserted;
647 *new_ref_mod = head_ref->total_ref_mod;
652 * helper to insert a delayed tree ref into the rbtree.
655 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
656 struct btrfs_trans_handle *trans,
657 struct btrfs_delayed_ref_head *head_ref,
658 struct btrfs_delayed_ref_node *ref, u64 bytenr,
659 u64 num_bytes, u64 parent, u64 ref_root, int level,
662 struct btrfs_delayed_tree_ref *full_ref;
663 struct btrfs_delayed_ref_root *delayed_refs;
667 if (action == BTRFS_ADD_DELAYED_EXTENT)
668 action = BTRFS_ADD_DELAYED_REF;
670 if (is_fstree(ref_root))
671 seq = atomic64_read(&fs_info->tree_mod_seq);
672 delayed_refs = &trans->transaction->delayed_refs;
674 /* first set the basic ref node struct up */
675 refcount_set(&ref->refs, 1);
676 ref->bytenr = bytenr;
677 ref->num_bytes = num_bytes;
679 ref->action = action;
683 RB_CLEAR_NODE(&ref->ref_node);
684 INIT_LIST_HEAD(&ref->add_list);
686 full_ref = btrfs_delayed_node_to_tree_ref(ref);
687 full_ref->parent = parent;
688 full_ref->root = ref_root;
690 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
692 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
693 full_ref->level = level;
695 trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
697 ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
700 * XXX: memory should be freed at the same level allocated.
701 * But bad practice is anywhere... Follow it now. Need cleanup.
704 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
708 * helper to insert a delayed data ref into the rbtree.
711 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
712 struct btrfs_trans_handle *trans,
713 struct btrfs_delayed_ref_head *head_ref,
714 struct btrfs_delayed_ref_node *ref, u64 bytenr,
715 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
716 u64 offset, int action)
718 struct btrfs_delayed_data_ref *full_ref;
719 struct btrfs_delayed_ref_root *delayed_refs;
723 if (action == BTRFS_ADD_DELAYED_EXTENT)
724 action = BTRFS_ADD_DELAYED_REF;
726 delayed_refs = &trans->transaction->delayed_refs;
728 if (is_fstree(ref_root))
729 seq = atomic64_read(&fs_info->tree_mod_seq);
731 /* first set the basic ref node struct up */
732 refcount_set(&ref->refs, 1);
733 ref->bytenr = bytenr;
734 ref->num_bytes = num_bytes;
736 ref->action = action;
740 RB_CLEAR_NODE(&ref->ref_node);
741 INIT_LIST_HEAD(&ref->add_list);
743 full_ref = btrfs_delayed_node_to_data_ref(ref);
744 full_ref->parent = parent;
745 full_ref->root = ref_root;
747 ref->type = BTRFS_SHARED_DATA_REF_KEY;
749 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
751 full_ref->objectid = owner;
752 full_ref->offset = offset;
754 trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
756 ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
758 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
762 * add a delayed tree ref. This does all of the accounting required
763 * to make sure the delayed ref is eventually processed before this
764 * transaction commits.
766 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
767 struct btrfs_trans_handle *trans,
768 u64 bytenr, u64 num_bytes, u64 parent,
769 u64 ref_root, int level, int action,
770 struct btrfs_delayed_extent_op *extent_op,
771 int *old_ref_mod, int *new_ref_mod)
773 struct btrfs_delayed_tree_ref *ref;
774 struct btrfs_delayed_ref_head *head_ref;
775 struct btrfs_delayed_ref_root *delayed_refs;
776 struct btrfs_qgroup_extent_record *record = NULL;
777 int qrecord_inserted;
778 int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
780 BUG_ON(extent_op && extent_op->is_data);
781 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
785 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
789 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
790 is_fstree(ref_root)) {
791 record = kmalloc(sizeof(*record), GFP_NOFS);
796 head_ref->extent_op = extent_op;
798 delayed_refs = &trans->transaction->delayed_refs;
799 spin_lock(&delayed_refs->lock);
802 * insert both the head node and the new ref without dropping
805 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
806 bytenr, num_bytes, 0, 0, action, 0,
807 is_system, &qrecord_inserted,
808 old_ref_mod, new_ref_mod);
810 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
811 num_bytes, parent, ref_root, level, action);
812 spin_unlock(&delayed_refs->lock);
814 if (qrecord_inserted)
815 btrfs_qgroup_trace_extent_post(fs_info, record);
820 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
822 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
828 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
830 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
831 struct btrfs_trans_handle *trans,
832 u64 bytenr, u64 num_bytes,
833 u64 parent, u64 ref_root,
834 u64 owner, u64 offset, u64 reserved, int action,
835 int *old_ref_mod, int *new_ref_mod)
837 struct btrfs_delayed_data_ref *ref;
838 struct btrfs_delayed_ref_head *head_ref;
839 struct btrfs_delayed_ref_root *delayed_refs;
840 struct btrfs_qgroup_extent_record *record = NULL;
841 int qrecord_inserted;
843 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
847 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
849 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
853 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
854 is_fstree(ref_root)) {
855 record = kmalloc(sizeof(*record), GFP_NOFS);
857 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
858 kmem_cache_free(btrfs_delayed_ref_head_cachep,
864 head_ref->extent_op = NULL;
866 delayed_refs = &trans->transaction->delayed_refs;
867 spin_lock(&delayed_refs->lock);
870 * insert both the head node and the new ref without dropping
873 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
874 bytenr, num_bytes, ref_root, reserved,
875 action, 1, 0, &qrecord_inserted,
876 old_ref_mod, new_ref_mod);
878 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
879 num_bytes, parent, ref_root, owner, offset,
881 spin_unlock(&delayed_refs->lock);
883 if (qrecord_inserted)
884 return btrfs_qgroup_trace_extent_post(fs_info, record);
888 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
889 struct btrfs_trans_handle *trans,
890 u64 bytenr, u64 num_bytes,
891 struct btrfs_delayed_extent_op *extent_op)
893 struct btrfs_delayed_ref_head *head_ref;
894 struct btrfs_delayed_ref_root *delayed_refs;
896 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
900 head_ref->extent_op = extent_op;
902 delayed_refs = &trans->transaction->delayed_refs;
903 spin_lock(&delayed_refs->lock);
906 * extent_ops just modify the flags of an extent and they don't result
907 * in ref count changes, hence it's safe to pass false/0 for is_system
910 add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
911 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
912 extent_op->is_data, 0, NULL, NULL, NULL);
914 spin_unlock(&delayed_refs->lock);
919 * this does a simple search for the head node for a given extent.
920 * It must be called with the delayed ref spinlock held, and it returns
921 * the head node if any where found, or NULL if not.
923 struct btrfs_delayed_ref_head *
924 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
926 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
929 void __cold btrfs_delayed_ref_exit(void)
931 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
932 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
933 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
934 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
937 int __init btrfs_delayed_ref_init(void)
939 btrfs_delayed_ref_head_cachep = kmem_cache_create(
940 "btrfs_delayed_ref_head",
941 sizeof(struct btrfs_delayed_ref_head), 0,
942 SLAB_MEM_SPREAD, NULL);
943 if (!btrfs_delayed_ref_head_cachep)
946 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
947 "btrfs_delayed_tree_ref",
948 sizeof(struct btrfs_delayed_tree_ref), 0,
949 SLAB_MEM_SPREAD, NULL);
950 if (!btrfs_delayed_tree_ref_cachep)
953 btrfs_delayed_data_ref_cachep = kmem_cache_create(
954 "btrfs_delayed_data_ref",
955 sizeof(struct btrfs_delayed_data_ref), 0,
956 SLAB_MEM_SPREAD, NULL);
957 if (!btrfs_delayed_data_ref_cachep)
960 btrfs_delayed_extent_op_cachep = kmem_cache_create(
961 "btrfs_delayed_extent_op",
962 sizeof(struct btrfs_delayed_extent_op), 0,
963 SLAB_MEM_SPREAD, NULL);
964 if (!btrfs_delayed_extent_op_cachep)
969 btrfs_delayed_ref_exit();