btrfs: only track ref_heads in delayed_ref_updates
[sfrench/cifs-2.6.git] / fs / btrfs / delayed-ref.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "ctree.h"
10 #include "delayed-ref.h"
11 #include "transaction.h"
12 #include "qgroup.h"
13
14 struct kmem_cache *btrfs_delayed_ref_head_cachep;
15 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16 struct kmem_cache *btrfs_delayed_data_ref_cachep;
17 struct kmem_cache *btrfs_delayed_extent_op_cachep;
18 /*
19  * delayed back reference update tracking.  For subvolume trees
20  * we queue up extent allocations and backref maintenance for
21  * delayed processing.   This avoids deep call chains where we
22  * add extents in the middle of btrfs_search_slot, and it allows
23  * us to buffer up frequently modified backrefs in an rb tree instead
24  * of hammering updates on the extent allocation tree.
25  */
26
27 /*
28  * compare two delayed tree backrefs with same bytenr and type
29  */
30 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31                           struct btrfs_delayed_tree_ref *ref2)
32 {
33         if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
34                 if (ref1->root < ref2->root)
35                         return -1;
36                 if (ref1->root > ref2->root)
37                         return 1;
38         } else {
39                 if (ref1->parent < ref2->parent)
40                         return -1;
41                 if (ref1->parent > ref2->parent)
42                         return 1;
43         }
44         return 0;
45 }
46
47 /*
48  * compare two delayed data backrefs with same bytenr and type
49  */
50 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51                           struct btrfs_delayed_data_ref *ref2)
52 {
53         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54                 if (ref1->root < ref2->root)
55                         return -1;
56                 if (ref1->root > ref2->root)
57                         return 1;
58                 if (ref1->objectid < ref2->objectid)
59                         return -1;
60                 if (ref1->objectid > ref2->objectid)
61                         return 1;
62                 if (ref1->offset < ref2->offset)
63                         return -1;
64                 if (ref1->offset > ref2->offset)
65                         return 1;
66         } else {
67                 if (ref1->parent < ref2->parent)
68                         return -1;
69                 if (ref1->parent > ref2->parent)
70                         return 1;
71         }
72         return 0;
73 }
74
75 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76                      struct btrfs_delayed_ref_node *ref2,
77                      bool check_seq)
78 {
79         int ret = 0;
80
81         if (ref1->type < ref2->type)
82                 return -1;
83         if (ref1->type > ref2->type)
84                 return 1;
85         if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86             ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87                 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88                                      btrfs_delayed_node_to_tree_ref(ref2));
89         else
90                 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91                                      btrfs_delayed_node_to_data_ref(ref2));
92         if (ret)
93                 return ret;
94         if (check_seq) {
95                 if (ref1->seq < ref2->seq)
96                         return -1;
97                 if (ref1->seq > ref2->seq)
98                         return 1;
99         }
100         return 0;
101 }
102
103 /* insert a new ref to head ref rbtree */
104 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
105                                                    struct rb_node *node)
106 {
107         struct rb_node **p = &root->rb_root.rb_node;
108         struct rb_node *parent_node = NULL;
109         struct btrfs_delayed_ref_head *entry;
110         struct btrfs_delayed_ref_head *ins;
111         u64 bytenr;
112         bool leftmost = true;
113
114         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
115         bytenr = ins->bytenr;
116         while (*p) {
117                 parent_node = *p;
118                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
119                                  href_node);
120
121                 if (bytenr < entry->bytenr) {
122                         p = &(*p)->rb_left;
123                 } else if (bytenr > entry->bytenr) {
124                         p = &(*p)->rb_right;
125                         leftmost = false;
126                 } else {
127                         return entry;
128                 }
129         }
130
131         rb_link_node(node, parent_node, p);
132         rb_insert_color_cached(node, root, leftmost);
133         return NULL;
134 }
135
136 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
137                 struct btrfs_delayed_ref_node *ins)
138 {
139         struct rb_node **p = &root->rb_root.rb_node;
140         struct rb_node *node = &ins->ref_node;
141         struct rb_node *parent_node = NULL;
142         struct btrfs_delayed_ref_node *entry;
143         bool leftmost = true;
144
145         while (*p) {
146                 int comp;
147
148                 parent_node = *p;
149                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
150                                  ref_node);
151                 comp = comp_refs(ins, entry, true);
152                 if (comp < 0) {
153                         p = &(*p)->rb_left;
154                 } else if (comp > 0) {
155                         p = &(*p)->rb_right;
156                         leftmost = false;
157                 } else {
158                         return entry;
159                 }
160         }
161
162         rb_link_node(node, parent_node, p);
163         rb_insert_color_cached(node, root, leftmost);
164         return NULL;
165 }
166
167 static struct btrfs_delayed_ref_head *find_first_ref_head(
168                 struct btrfs_delayed_ref_root *dr)
169 {
170         struct rb_node *n;
171         struct btrfs_delayed_ref_head *entry;
172
173         n = rb_first_cached(&dr->href_root);
174         if (!n)
175                 return NULL;
176
177         entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
178
179         return entry;
180 }
181
182 /*
183  * Find a head entry based on bytenr. This returns the delayed ref head if it
184  * was able to find one, or NULL if nothing was in that spot.  If return_bigger
185  * is given, the next bigger entry is returned if no exact match is found.
186  */
187 static struct btrfs_delayed_ref_head *find_ref_head(
188                 struct btrfs_delayed_ref_root *dr, u64 bytenr,
189                 bool return_bigger)
190 {
191         struct rb_root *root = &dr->href_root.rb_root;
192         struct rb_node *n;
193         struct btrfs_delayed_ref_head *entry;
194
195         n = root->rb_node;
196         entry = NULL;
197         while (n) {
198                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
199
200                 if (bytenr < entry->bytenr)
201                         n = n->rb_left;
202                 else if (bytenr > entry->bytenr)
203                         n = n->rb_right;
204                 else
205                         return entry;
206         }
207         if (entry && return_bigger) {
208                 if (bytenr > entry->bytenr) {
209                         n = rb_next(&entry->href_node);
210                         if (!n)
211                                 return NULL;
212                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
213                                          href_node);
214                 }
215                 return entry;
216         }
217         return NULL;
218 }
219
220 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
221                            struct btrfs_delayed_ref_head *head)
222 {
223         lockdep_assert_held(&delayed_refs->lock);
224         if (mutex_trylock(&head->mutex))
225                 return 0;
226
227         refcount_inc(&head->refs);
228         spin_unlock(&delayed_refs->lock);
229
230         mutex_lock(&head->mutex);
231         spin_lock(&delayed_refs->lock);
232         if (RB_EMPTY_NODE(&head->href_node)) {
233                 mutex_unlock(&head->mutex);
234                 btrfs_put_delayed_ref_head(head);
235                 return -EAGAIN;
236         }
237         btrfs_put_delayed_ref_head(head);
238         return 0;
239 }
240
241 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
242                                     struct btrfs_delayed_ref_root *delayed_refs,
243                                     struct btrfs_delayed_ref_head *head,
244                                     struct btrfs_delayed_ref_node *ref)
245 {
246         lockdep_assert_held(&head->lock);
247         rb_erase_cached(&ref->ref_node, &head->ref_tree);
248         RB_CLEAR_NODE(&ref->ref_node);
249         if (!list_empty(&ref->add_list))
250                 list_del(&ref->add_list);
251         ref->in_tree = 0;
252         btrfs_put_delayed_ref(ref);
253         atomic_dec(&delayed_refs->num_entries);
254 }
255
256 static bool merge_ref(struct btrfs_trans_handle *trans,
257                       struct btrfs_delayed_ref_root *delayed_refs,
258                       struct btrfs_delayed_ref_head *head,
259                       struct btrfs_delayed_ref_node *ref,
260                       u64 seq)
261 {
262         struct btrfs_delayed_ref_node *next;
263         struct rb_node *node = rb_next(&ref->ref_node);
264         bool done = false;
265
266         while (!done && node) {
267                 int mod;
268
269                 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
270                 node = rb_next(node);
271                 if (seq && next->seq >= seq)
272                         break;
273                 if (comp_refs(ref, next, false))
274                         break;
275
276                 if (ref->action == next->action) {
277                         mod = next->ref_mod;
278                 } else {
279                         if (ref->ref_mod < next->ref_mod) {
280                                 swap(ref, next);
281                                 done = true;
282                         }
283                         mod = -next->ref_mod;
284                 }
285
286                 drop_delayed_ref(trans, delayed_refs, head, next);
287                 ref->ref_mod += mod;
288                 if (ref->ref_mod == 0) {
289                         drop_delayed_ref(trans, delayed_refs, head, ref);
290                         done = true;
291                 } else {
292                         /*
293                          * Can't have multiples of the same ref on a tree block.
294                          */
295                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
296                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
297                 }
298         }
299
300         return done;
301 }
302
303 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
304                               struct btrfs_delayed_ref_root *delayed_refs,
305                               struct btrfs_delayed_ref_head *head)
306 {
307         struct btrfs_fs_info *fs_info = trans->fs_info;
308         struct btrfs_delayed_ref_node *ref;
309         struct rb_node *node;
310         u64 seq = 0;
311
312         lockdep_assert_held(&head->lock);
313
314         if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
315                 return;
316
317         /* We don't have too many refs to merge for data. */
318         if (head->is_data)
319                 return;
320
321         spin_lock(&fs_info->tree_mod_seq_lock);
322         if (!list_empty(&fs_info->tree_mod_seq_list)) {
323                 struct seq_list *elem;
324
325                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
326                                         struct seq_list, list);
327                 seq = elem->seq;
328         }
329         spin_unlock(&fs_info->tree_mod_seq_lock);
330
331 again:
332         for (node = rb_first_cached(&head->ref_tree); node;
333              node = rb_next(node)) {
334                 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
335                 if (seq && ref->seq >= seq)
336                         continue;
337                 if (merge_ref(trans, delayed_refs, head, ref, seq))
338                         goto again;
339         }
340 }
341
342 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
343 {
344         struct seq_list *elem;
345         int ret = 0;
346
347         spin_lock(&fs_info->tree_mod_seq_lock);
348         if (!list_empty(&fs_info->tree_mod_seq_list)) {
349                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
350                                         struct seq_list, list);
351                 if (seq >= elem->seq) {
352                         btrfs_debug(fs_info,
353                                 "holding back delayed_ref %#x.%x, lowest is %#x.%x",
354                                 (u32)(seq >> 32), (u32)seq,
355                                 (u32)(elem->seq >> 32), (u32)elem->seq);
356                         ret = 1;
357                 }
358         }
359
360         spin_unlock(&fs_info->tree_mod_seq_lock);
361         return ret;
362 }
363
364 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
365                 struct btrfs_delayed_ref_root *delayed_refs)
366 {
367         struct btrfs_delayed_ref_head *head;
368
369 again:
370         head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
371                              true);
372         if (!head && delayed_refs->run_delayed_start != 0) {
373                 delayed_refs->run_delayed_start = 0;
374                 head = find_first_ref_head(delayed_refs);
375         }
376         if (!head)
377                 return NULL;
378
379         while (head->processing) {
380                 struct rb_node *node;
381
382                 node = rb_next(&head->href_node);
383                 if (!node) {
384                         if (delayed_refs->run_delayed_start == 0)
385                                 return NULL;
386                         delayed_refs->run_delayed_start = 0;
387                         goto again;
388                 }
389                 head = rb_entry(node, struct btrfs_delayed_ref_head,
390                                 href_node);
391         }
392
393         head->processing = 1;
394         WARN_ON(delayed_refs->num_heads_ready == 0);
395         delayed_refs->num_heads_ready--;
396         delayed_refs->run_delayed_start = head->bytenr +
397                 head->num_bytes;
398         return head;
399 }
400
401 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
402                            struct btrfs_delayed_ref_head *head)
403 {
404         lockdep_assert_held(&delayed_refs->lock);
405         lockdep_assert_held(&head->lock);
406
407         rb_erase_cached(&head->href_node, &delayed_refs->href_root);
408         RB_CLEAR_NODE(&head->href_node);
409         atomic_dec(&delayed_refs->num_entries);
410         delayed_refs->num_heads--;
411         if (head->processing == 0)
412                 delayed_refs->num_heads_ready--;
413 }
414
415 /*
416  * Helper to insert the ref_node to the tail or merge with tail.
417  *
418  * Return 0 for insert.
419  * Return >0 for merge.
420  */
421 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
422                               struct btrfs_delayed_ref_root *root,
423                               struct btrfs_delayed_ref_head *href,
424                               struct btrfs_delayed_ref_node *ref)
425 {
426         struct btrfs_delayed_ref_node *exist;
427         int mod;
428         int ret = 0;
429
430         spin_lock(&href->lock);
431         exist = tree_insert(&href->ref_tree, ref);
432         if (!exist)
433                 goto inserted;
434
435         /* Now we are sure we can merge */
436         ret = 1;
437         if (exist->action == ref->action) {
438                 mod = ref->ref_mod;
439         } else {
440                 /* Need to change action */
441                 if (exist->ref_mod < ref->ref_mod) {
442                         exist->action = ref->action;
443                         mod = -exist->ref_mod;
444                         exist->ref_mod = ref->ref_mod;
445                         if (ref->action == BTRFS_ADD_DELAYED_REF)
446                                 list_add_tail(&exist->add_list,
447                                               &href->ref_add_list);
448                         else if (ref->action == BTRFS_DROP_DELAYED_REF) {
449                                 ASSERT(!list_empty(&exist->add_list));
450                                 list_del(&exist->add_list);
451                         } else {
452                                 ASSERT(0);
453                         }
454                 } else
455                         mod = -ref->ref_mod;
456         }
457         exist->ref_mod += mod;
458
459         /* remove existing tail if its ref_mod is zero */
460         if (exist->ref_mod == 0)
461                 drop_delayed_ref(trans, root, href, exist);
462         spin_unlock(&href->lock);
463         return ret;
464 inserted:
465         if (ref->action == BTRFS_ADD_DELAYED_REF)
466                 list_add_tail(&ref->add_list, &href->ref_add_list);
467         atomic_inc(&root->num_entries);
468         spin_unlock(&href->lock);
469         return ret;
470 }
471
472 /*
473  * helper function to update the accounting in the head ref
474  * existing and update must have the same bytenr
475  */
476 static noinline void
477 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
478                          struct btrfs_delayed_ref_head *existing,
479                          struct btrfs_delayed_ref_head *update,
480                          int *old_ref_mod_ret)
481 {
482         int old_ref_mod;
483
484         BUG_ON(existing->is_data != update->is_data);
485
486         spin_lock(&existing->lock);
487         if (update->must_insert_reserved) {
488                 /* if the extent was freed and then
489                  * reallocated before the delayed ref
490                  * entries were processed, we can end up
491                  * with an existing head ref without
492                  * the must_insert_reserved flag set.
493                  * Set it again here
494                  */
495                 existing->must_insert_reserved = update->must_insert_reserved;
496
497                 /*
498                  * update the num_bytes so we make sure the accounting
499                  * is done correctly
500                  */
501                 existing->num_bytes = update->num_bytes;
502
503         }
504
505         if (update->extent_op) {
506                 if (!existing->extent_op) {
507                         existing->extent_op = update->extent_op;
508                 } else {
509                         if (update->extent_op->update_key) {
510                                 memcpy(&existing->extent_op->key,
511                                        &update->extent_op->key,
512                                        sizeof(update->extent_op->key));
513                                 existing->extent_op->update_key = true;
514                         }
515                         if (update->extent_op->update_flags) {
516                                 existing->extent_op->flags_to_set |=
517                                         update->extent_op->flags_to_set;
518                                 existing->extent_op->update_flags = true;
519                         }
520                         btrfs_free_delayed_extent_op(update->extent_op);
521                 }
522         }
523         /*
524          * update the reference mod on the head to reflect this new operation,
525          * only need the lock for this case cause we could be processing it
526          * currently, for refs we just added we know we're a-ok.
527          */
528         old_ref_mod = existing->total_ref_mod;
529         if (old_ref_mod_ret)
530                 *old_ref_mod_ret = old_ref_mod;
531         existing->ref_mod += update->ref_mod;
532         existing->total_ref_mod += update->ref_mod;
533
534         /*
535          * If we are going to from a positive ref mod to a negative or vice
536          * versa we need to make sure to adjust pending_csums accordingly.
537          */
538         if (existing->is_data) {
539                 if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
540                         delayed_refs->pending_csums -= existing->num_bytes;
541                 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
542                         delayed_refs->pending_csums += existing->num_bytes;
543         }
544         spin_unlock(&existing->lock);
545 }
546
547 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
548                                   struct btrfs_qgroup_extent_record *qrecord,
549                                   u64 bytenr, u64 num_bytes, u64 ref_root,
550                                   u64 reserved, int action, bool is_data,
551                                   bool is_system)
552 {
553         int count_mod = 1;
554         int must_insert_reserved = 0;
555
556         /* If reserved is provided, it must be a data extent. */
557         BUG_ON(!is_data && reserved);
558
559         /*
560          * The head node stores the sum of all the mods, so dropping a ref
561          * should drop the sum in the head node by one.
562          */
563         if (action == BTRFS_UPDATE_DELAYED_HEAD)
564                 count_mod = 0;
565         else if (action == BTRFS_DROP_DELAYED_REF)
566                 count_mod = -1;
567
568         /*
569          * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
570          * accounting when the extent is finally added, or if a later
571          * modification deletes the delayed ref without ever inserting the
572          * extent into the extent allocation tree.  ref->must_insert_reserved
573          * is the flag used to record that accounting mods are required.
574          *
575          * Once we record must_insert_reserved, switch the action to
576          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
577          */
578         if (action == BTRFS_ADD_DELAYED_EXTENT)
579                 must_insert_reserved = 1;
580         else
581                 must_insert_reserved = 0;
582
583         refcount_set(&head_ref->refs, 1);
584         head_ref->bytenr = bytenr;
585         head_ref->num_bytes = num_bytes;
586         head_ref->ref_mod = count_mod;
587         head_ref->must_insert_reserved = must_insert_reserved;
588         head_ref->is_data = is_data;
589         head_ref->is_system = is_system;
590         head_ref->ref_tree = RB_ROOT_CACHED;
591         INIT_LIST_HEAD(&head_ref->ref_add_list);
592         RB_CLEAR_NODE(&head_ref->href_node);
593         head_ref->processing = 0;
594         head_ref->total_ref_mod = count_mod;
595         head_ref->qgroup_reserved = 0;
596         head_ref->qgroup_ref_root = 0;
597         spin_lock_init(&head_ref->lock);
598         mutex_init(&head_ref->mutex);
599
600         if (qrecord) {
601                 if (ref_root && reserved) {
602                         head_ref->qgroup_ref_root = ref_root;
603                         head_ref->qgroup_reserved = reserved;
604                 }
605
606                 qrecord->bytenr = bytenr;
607                 qrecord->num_bytes = num_bytes;
608                 qrecord->old_roots = NULL;
609         }
610 }
611
612 /*
613  * helper function to actually insert a head node into the rbtree.
614  * this does all the dirty work in terms of maintaining the correct
615  * overall modification count.
616  */
617 static noinline struct btrfs_delayed_ref_head *
618 add_delayed_ref_head(struct btrfs_trans_handle *trans,
619                      struct btrfs_delayed_ref_head *head_ref,
620                      struct btrfs_qgroup_extent_record *qrecord,
621                      int action, int *qrecord_inserted_ret,
622                      int *old_ref_mod, int *new_ref_mod)
623 {
624         struct btrfs_delayed_ref_head *existing;
625         struct btrfs_delayed_ref_root *delayed_refs;
626         int qrecord_inserted = 0;
627
628         delayed_refs = &trans->transaction->delayed_refs;
629
630         /* Record qgroup extent info if provided */
631         if (qrecord) {
632                 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
633                                         delayed_refs, qrecord))
634                         kfree(qrecord);
635                 else
636                         qrecord_inserted = 1;
637         }
638
639         trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
640
641         existing = htree_insert(&delayed_refs->href_root,
642                                 &head_ref->href_node);
643         if (existing) {
644                 WARN_ON(qrecord && head_ref->qgroup_ref_root
645                         && head_ref->qgroup_reserved
646                         && existing->qgroup_ref_root
647                         && existing->qgroup_reserved);
648                 update_existing_head_ref(delayed_refs, existing, head_ref,
649                                          old_ref_mod);
650                 /*
651                  * we've updated the existing ref, free the newly
652                  * allocated ref
653                  */
654                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
655                 head_ref = existing;
656         } else {
657                 if (old_ref_mod)
658                         *old_ref_mod = 0;
659                 if (head_ref->is_data && head_ref->ref_mod < 0)
660                         delayed_refs->pending_csums += head_ref->num_bytes;
661                 delayed_refs->num_heads++;
662                 delayed_refs->num_heads_ready++;
663                 atomic_inc(&delayed_refs->num_entries);
664                 trans->delayed_ref_updates++;
665         }
666         if (qrecord_inserted_ret)
667                 *qrecord_inserted_ret = qrecord_inserted;
668         if (new_ref_mod)
669                 *new_ref_mod = head_ref->total_ref_mod;
670
671         return head_ref;
672 }
673
674 /*
675  * init_delayed_ref_common - Initialize the structure which represents a
676  *                           modification to a an extent.
677  *
678  * @fs_info:    Internal to the mounted filesystem mount structure.
679  *
680  * @ref:        The structure which is going to be initialized.
681  *
682  * @bytenr:     The logical address of the extent for which a modification is
683  *              going to be recorded.
684  *
685  * @num_bytes:  Size of the extent whose modification is being recorded.
686  *
687  * @ref_root:   The id of the root where this modification has originated, this
688  *              can be either one of the well-known metadata trees or the
689  *              subvolume id which references this extent.
690  *
691  * @action:     Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
692  *              BTRFS_ADD_DELAYED_EXTENT
693  *
694  * @ref_type:   Holds the type of the extent which is being recorded, can be
695  *              one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
696  *              when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
697  *              BTRFS_EXTENT_DATA_REF_KEY when recording data extent
698  */
699 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
700                                     struct btrfs_delayed_ref_node *ref,
701                                     u64 bytenr, u64 num_bytes, u64 ref_root,
702                                     int action, u8 ref_type)
703 {
704         u64 seq = 0;
705
706         if (action == BTRFS_ADD_DELAYED_EXTENT)
707                 action = BTRFS_ADD_DELAYED_REF;
708
709         if (is_fstree(ref_root))
710                 seq = atomic64_read(&fs_info->tree_mod_seq);
711
712         refcount_set(&ref->refs, 1);
713         ref->bytenr = bytenr;
714         ref->num_bytes = num_bytes;
715         ref->ref_mod = 1;
716         ref->action = action;
717         ref->is_head = 0;
718         ref->in_tree = 1;
719         ref->seq = seq;
720         ref->type = ref_type;
721         RB_CLEAR_NODE(&ref->ref_node);
722         INIT_LIST_HEAD(&ref->add_list);
723 }
724
725 /*
726  * add a delayed tree ref.  This does all of the accounting required
727  * to make sure the delayed ref is eventually processed before this
728  * transaction commits.
729  */
730 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
731                                u64 bytenr, u64 num_bytes, u64 parent,
732                                u64 ref_root,  int level, int action,
733                                struct btrfs_delayed_extent_op *extent_op,
734                                int *old_ref_mod, int *new_ref_mod)
735 {
736         struct btrfs_fs_info *fs_info = trans->fs_info;
737         struct btrfs_delayed_tree_ref *ref;
738         struct btrfs_delayed_ref_head *head_ref;
739         struct btrfs_delayed_ref_root *delayed_refs;
740         struct btrfs_qgroup_extent_record *record = NULL;
741         int qrecord_inserted;
742         bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
743         int ret;
744         u8 ref_type;
745
746         BUG_ON(extent_op && extent_op->is_data);
747         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
748         if (!ref)
749                 return -ENOMEM;
750
751         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
752         if (!head_ref) {
753                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
754                 return -ENOMEM;
755         }
756
757         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
758             is_fstree(ref_root)) {
759                 record = kmalloc(sizeof(*record), GFP_NOFS);
760                 if (!record) {
761                         kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
762                         kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
763                         return -ENOMEM;
764                 }
765         }
766
767         if (parent)
768                 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
769         else
770                 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
771
772         init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
773                                 ref_root, action, ref_type);
774         ref->root = ref_root;
775         ref->parent = parent;
776         ref->level = level;
777
778         init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
779                               ref_root, 0, action, false, is_system);
780         head_ref->extent_op = extent_op;
781
782         delayed_refs = &trans->transaction->delayed_refs;
783         spin_lock(&delayed_refs->lock);
784
785         /*
786          * insert both the head node and the new ref without dropping
787          * the spin lock
788          */
789         head_ref = add_delayed_ref_head(trans, head_ref, record,
790                                         action, &qrecord_inserted,
791                                         old_ref_mod, new_ref_mod);
792
793         ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
794         spin_unlock(&delayed_refs->lock);
795
796         trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
797                                    action == BTRFS_ADD_DELAYED_EXTENT ?
798                                    BTRFS_ADD_DELAYED_REF : action);
799         if (ret > 0)
800                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
801
802         if (qrecord_inserted)
803                 btrfs_qgroup_trace_extent_post(fs_info, record);
804
805         return 0;
806 }
807
808 /*
809  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
810  */
811 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
812                                u64 bytenr, u64 num_bytes,
813                                u64 parent, u64 ref_root,
814                                u64 owner, u64 offset, u64 reserved, int action,
815                                int *old_ref_mod, int *new_ref_mod)
816 {
817         struct btrfs_fs_info *fs_info = trans->fs_info;
818         struct btrfs_delayed_data_ref *ref;
819         struct btrfs_delayed_ref_head *head_ref;
820         struct btrfs_delayed_ref_root *delayed_refs;
821         struct btrfs_qgroup_extent_record *record = NULL;
822         int qrecord_inserted;
823         int ret;
824         u8 ref_type;
825
826         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
827         if (!ref)
828                 return -ENOMEM;
829
830         if (parent)
831                 ref_type = BTRFS_SHARED_DATA_REF_KEY;
832         else
833                 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
834         init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
835                                 ref_root, action, ref_type);
836         ref->root = ref_root;
837         ref->parent = parent;
838         ref->objectid = owner;
839         ref->offset = offset;
840
841
842         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
843         if (!head_ref) {
844                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
845                 return -ENOMEM;
846         }
847
848         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
849             is_fstree(ref_root)) {
850                 record = kmalloc(sizeof(*record), GFP_NOFS);
851                 if (!record) {
852                         kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
853                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
854                                         head_ref);
855                         return -ENOMEM;
856                 }
857         }
858
859         init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
860                               reserved, action, true, false);
861         head_ref->extent_op = NULL;
862
863         delayed_refs = &trans->transaction->delayed_refs;
864         spin_lock(&delayed_refs->lock);
865
866         /*
867          * insert both the head node and the new ref without dropping
868          * the spin lock
869          */
870         head_ref = add_delayed_ref_head(trans, head_ref, record,
871                                         action, &qrecord_inserted,
872                                         old_ref_mod, new_ref_mod);
873
874         ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
875         spin_unlock(&delayed_refs->lock);
876
877         trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
878                                    action == BTRFS_ADD_DELAYED_EXTENT ?
879                                    BTRFS_ADD_DELAYED_REF : action);
880         if (ret > 0)
881                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
882
883
884         if (qrecord_inserted)
885                 return btrfs_qgroup_trace_extent_post(fs_info, record);
886         return 0;
887 }
888
889 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
890                                 struct btrfs_trans_handle *trans,
891                                 u64 bytenr, u64 num_bytes,
892                                 struct btrfs_delayed_extent_op *extent_op)
893 {
894         struct btrfs_delayed_ref_head *head_ref;
895         struct btrfs_delayed_ref_root *delayed_refs;
896
897         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
898         if (!head_ref)
899                 return -ENOMEM;
900
901         init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
902                               BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
903                               false);
904         head_ref->extent_op = extent_op;
905
906         delayed_refs = &trans->transaction->delayed_refs;
907         spin_lock(&delayed_refs->lock);
908
909         add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
910                              NULL, NULL, NULL);
911
912         spin_unlock(&delayed_refs->lock);
913         return 0;
914 }
915
916 /*
917  * this does a simple search for the head node for a given extent.
918  * It must be called with the delayed ref spinlock held, and it returns
919  * the head node if any where found, or NULL if not.
920  */
921 struct btrfs_delayed_ref_head *
922 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
923 {
924         return find_ref_head(delayed_refs, bytenr, false);
925 }
926
927 void __cold btrfs_delayed_ref_exit(void)
928 {
929         kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
930         kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
931         kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
932         kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
933 }
934
935 int __init btrfs_delayed_ref_init(void)
936 {
937         btrfs_delayed_ref_head_cachep = kmem_cache_create(
938                                 "btrfs_delayed_ref_head",
939                                 sizeof(struct btrfs_delayed_ref_head), 0,
940                                 SLAB_MEM_SPREAD, NULL);
941         if (!btrfs_delayed_ref_head_cachep)
942                 goto fail;
943
944         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
945                                 "btrfs_delayed_tree_ref",
946                                 sizeof(struct btrfs_delayed_tree_ref), 0,
947                                 SLAB_MEM_SPREAD, NULL);
948         if (!btrfs_delayed_tree_ref_cachep)
949                 goto fail;
950
951         btrfs_delayed_data_ref_cachep = kmem_cache_create(
952                                 "btrfs_delayed_data_ref",
953                                 sizeof(struct btrfs_delayed_data_ref), 0,
954                                 SLAB_MEM_SPREAD, NULL);
955         if (!btrfs_delayed_data_ref_cachep)
956                 goto fail;
957
958         btrfs_delayed_extent_op_cachep = kmem_cache_create(
959                                 "btrfs_delayed_extent_op",
960                                 sizeof(struct btrfs_delayed_extent_op), 0,
961                                 SLAB_MEM_SPREAD, NULL);
962         if (!btrfs_delayed_extent_op_cachep)
963                 goto fail;
964
965         return 0;
966 fail:
967         btrfs_delayed_ref_exit();
968         return -ENOMEM;
969 }