2e0bbe4ecc08980dcac8e227266128304b6545e6
[sfrench/cifs-2.6.git] / fs / btrfs / delayed-ref.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "ctree.h"
10 #include "delayed-ref.h"
11 #include "transaction.h"
12 #include "qgroup.h"
13
14 struct kmem_cache *btrfs_delayed_ref_head_cachep;
15 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16 struct kmem_cache *btrfs_delayed_data_ref_cachep;
17 struct kmem_cache *btrfs_delayed_extent_op_cachep;
18 /*
19  * delayed back reference update tracking.  For subvolume trees
20  * we queue up extent allocations and backref maintenance for
21  * delayed processing.   This avoids deep call chains where we
22  * add extents in the middle of btrfs_search_slot, and it allows
23  * us to buffer up frequently modified backrefs in an rb tree instead
24  * of hammering updates on the extent allocation tree.
25  */
26
27 /*
28  * compare two delayed tree backrefs with same bytenr and type
29  */
30 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31                           struct btrfs_delayed_tree_ref *ref2)
32 {
33         if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
34                 if (ref1->root < ref2->root)
35                         return -1;
36                 if (ref1->root > ref2->root)
37                         return 1;
38         } else {
39                 if (ref1->parent < ref2->parent)
40                         return -1;
41                 if (ref1->parent > ref2->parent)
42                         return 1;
43         }
44         return 0;
45 }
46
47 /*
48  * compare two delayed data backrefs with same bytenr and type
49  */
50 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51                           struct btrfs_delayed_data_ref *ref2)
52 {
53         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54                 if (ref1->root < ref2->root)
55                         return -1;
56                 if (ref1->root > ref2->root)
57                         return 1;
58                 if (ref1->objectid < ref2->objectid)
59                         return -1;
60                 if (ref1->objectid > ref2->objectid)
61                         return 1;
62                 if (ref1->offset < ref2->offset)
63                         return -1;
64                 if (ref1->offset > ref2->offset)
65                         return 1;
66         } else {
67                 if (ref1->parent < ref2->parent)
68                         return -1;
69                 if (ref1->parent > ref2->parent)
70                         return 1;
71         }
72         return 0;
73 }
74
75 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76                      struct btrfs_delayed_ref_node *ref2,
77                      bool check_seq)
78 {
79         int ret = 0;
80
81         if (ref1->type < ref2->type)
82                 return -1;
83         if (ref1->type > ref2->type)
84                 return 1;
85         if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86             ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87                 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88                                      btrfs_delayed_node_to_tree_ref(ref2));
89         else
90                 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91                                      btrfs_delayed_node_to_data_ref(ref2));
92         if (ret)
93                 return ret;
94         if (check_seq) {
95                 if (ref1->seq < ref2->seq)
96                         return -1;
97                 if (ref1->seq > ref2->seq)
98                         return 1;
99         }
100         return 0;
101 }
102
103 /* insert a new ref to head ref rbtree */
104 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
105                                                    struct rb_node *node)
106 {
107         struct rb_node **p = &root->rb_node;
108         struct rb_node *parent_node = NULL;
109         struct btrfs_delayed_ref_head *entry;
110         struct btrfs_delayed_ref_head *ins;
111         u64 bytenr;
112
113         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
114         bytenr = ins->bytenr;
115         while (*p) {
116                 parent_node = *p;
117                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
118                                  href_node);
119
120                 if (bytenr < entry->bytenr)
121                         p = &(*p)->rb_left;
122                 else if (bytenr > entry->bytenr)
123                         p = &(*p)->rb_right;
124                 else
125                         return entry;
126         }
127
128         rb_link_node(node, parent_node, p);
129         rb_insert_color(node, root);
130         return NULL;
131 }
132
133 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
134                 struct btrfs_delayed_ref_node *ins)
135 {
136         struct rb_node **p = &root->rb_node;
137         struct rb_node *node = &ins->ref_node;
138         struct rb_node *parent_node = NULL;
139         struct btrfs_delayed_ref_node *entry;
140
141         while (*p) {
142                 int comp;
143
144                 parent_node = *p;
145                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
146                                  ref_node);
147                 comp = comp_refs(ins, entry, true);
148                 if (comp < 0)
149                         p = &(*p)->rb_left;
150                 else if (comp > 0)
151                         p = &(*p)->rb_right;
152                 else
153                         return entry;
154         }
155
156         rb_link_node(node, parent_node, p);
157         rb_insert_color(node, root);
158         return NULL;
159 }
160
161 /*
162  * find an head entry based on bytenr. This returns the delayed ref
163  * head if it was able to find one, or NULL if nothing was in that spot.
164  * If return_bigger is given, the next bigger entry is returned if no exact
165  * match is found.
166  */
167 static struct btrfs_delayed_ref_head *
168 find_ref_head(struct rb_root *root, u64 bytenr,
169               int return_bigger)
170 {
171         struct rb_node *n;
172         struct btrfs_delayed_ref_head *entry;
173
174         n = root->rb_node;
175         entry = NULL;
176         while (n) {
177                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
178
179                 if (bytenr < entry->bytenr)
180                         n = n->rb_left;
181                 else if (bytenr > entry->bytenr)
182                         n = n->rb_right;
183                 else
184                         return entry;
185         }
186         if (entry && return_bigger) {
187                 if (bytenr > entry->bytenr) {
188                         n = rb_next(&entry->href_node);
189                         if (!n)
190                                 n = rb_first(root);
191                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
192                                          href_node);
193                         return entry;
194                 }
195                 return entry;
196         }
197         return NULL;
198 }
199
200 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
201                            struct btrfs_delayed_ref_head *head)
202 {
203         struct btrfs_delayed_ref_root *delayed_refs;
204
205         delayed_refs = &trans->transaction->delayed_refs;
206         lockdep_assert_held(&delayed_refs->lock);
207         if (mutex_trylock(&head->mutex))
208                 return 0;
209
210         refcount_inc(&head->refs);
211         spin_unlock(&delayed_refs->lock);
212
213         mutex_lock(&head->mutex);
214         spin_lock(&delayed_refs->lock);
215         if (RB_EMPTY_NODE(&head->href_node)) {
216                 mutex_unlock(&head->mutex);
217                 btrfs_put_delayed_ref_head(head);
218                 return -EAGAIN;
219         }
220         btrfs_put_delayed_ref_head(head);
221         return 0;
222 }
223
224 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
225                                     struct btrfs_delayed_ref_root *delayed_refs,
226                                     struct btrfs_delayed_ref_head *head,
227                                     struct btrfs_delayed_ref_node *ref)
228 {
229         lockdep_assert_held(&head->lock);
230         rb_erase(&ref->ref_node, &head->ref_tree);
231         RB_CLEAR_NODE(&ref->ref_node);
232         if (!list_empty(&ref->add_list))
233                 list_del(&ref->add_list);
234         ref->in_tree = 0;
235         btrfs_put_delayed_ref(ref);
236         atomic_dec(&delayed_refs->num_entries);
237         if (trans->delayed_ref_updates)
238                 trans->delayed_ref_updates--;
239 }
240
241 static bool merge_ref(struct btrfs_trans_handle *trans,
242                       struct btrfs_delayed_ref_root *delayed_refs,
243                       struct btrfs_delayed_ref_head *head,
244                       struct btrfs_delayed_ref_node *ref,
245                       u64 seq)
246 {
247         struct btrfs_delayed_ref_node *next;
248         struct rb_node *node = rb_next(&ref->ref_node);
249         bool done = false;
250
251         while (!done && node) {
252                 int mod;
253
254                 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
255                 node = rb_next(node);
256                 if (seq && next->seq >= seq)
257                         break;
258                 if (comp_refs(ref, next, false))
259                         break;
260
261                 if (ref->action == next->action) {
262                         mod = next->ref_mod;
263                 } else {
264                         if (ref->ref_mod < next->ref_mod) {
265                                 swap(ref, next);
266                                 done = true;
267                         }
268                         mod = -next->ref_mod;
269                 }
270
271                 drop_delayed_ref(trans, delayed_refs, head, next);
272                 ref->ref_mod += mod;
273                 if (ref->ref_mod == 0) {
274                         drop_delayed_ref(trans, delayed_refs, head, ref);
275                         done = true;
276                 } else {
277                         /*
278                          * Can't have multiples of the same ref on a tree block.
279                          */
280                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
281                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
282                 }
283         }
284
285         return done;
286 }
287
288 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
289                               struct btrfs_fs_info *fs_info,
290                               struct btrfs_delayed_ref_root *delayed_refs,
291                               struct btrfs_delayed_ref_head *head)
292 {
293         struct btrfs_delayed_ref_node *ref;
294         struct rb_node *node;
295         u64 seq = 0;
296
297         lockdep_assert_held(&head->lock);
298
299         if (RB_EMPTY_ROOT(&head->ref_tree))
300                 return;
301
302         /* We don't have too many refs to merge for data. */
303         if (head->is_data)
304                 return;
305
306         spin_lock(&fs_info->tree_mod_seq_lock);
307         if (!list_empty(&fs_info->tree_mod_seq_list)) {
308                 struct seq_list *elem;
309
310                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
311                                         struct seq_list, list);
312                 seq = elem->seq;
313         }
314         spin_unlock(&fs_info->tree_mod_seq_lock);
315
316 again:
317         for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
318                 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
319                 if (seq && ref->seq >= seq)
320                         continue;
321                 if (merge_ref(trans, delayed_refs, head, ref, seq))
322                         goto again;
323         }
324 }
325
326 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
327 {
328         struct seq_list *elem;
329         int ret = 0;
330
331         spin_lock(&fs_info->tree_mod_seq_lock);
332         if (!list_empty(&fs_info->tree_mod_seq_list)) {
333                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
334                                         struct seq_list, list);
335                 if (seq >= elem->seq) {
336                         btrfs_debug(fs_info,
337                                 "holding back delayed_ref %#x.%x, lowest is %#x.%x",
338                                 (u32)(seq >> 32), (u32)seq,
339                                 (u32)(elem->seq >> 32), (u32)elem->seq);
340                         ret = 1;
341                 }
342         }
343
344         spin_unlock(&fs_info->tree_mod_seq_lock);
345         return ret;
346 }
347
348 struct btrfs_delayed_ref_head *
349 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
350 {
351         struct btrfs_delayed_ref_root *delayed_refs;
352         struct btrfs_delayed_ref_head *head;
353         u64 start;
354         bool loop = false;
355
356         delayed_refs = &trans->transaction->delayed_refs;
357
358 again:
359         start = delayed_refs->run_delayed_start;
360         head = find_ref_head(&delayed_refs->href_root, start, 1);
361         if (!head && !loop) {
362                 delayed_refs->run_delayed_start = 0;
363                 start = 0;
364                 loop = true;
365                 head = find_ref_head(&delayed_refs->href_root, start, 1);
366                 if (!head)
367                         return NULL;
368         } else if (!head && loop) {
369                 return NULL;
370         }
371
372         while (head->processing) {
373                 struct rb_node *node;
374
375                 node = rb_next(&head->href_node);
376                 if (!node) {
377                         if (loop)
378                                 return NULL;
379                         delayed_refs->run_delayed_start = 0;
380                         start = 0;
381                         loop = true;
382                         goto again;
383                 }
384                 head = rb_entry(node, struct btrfs_delayed_ref_head,
385                                 href_node);
386         }
387
388         head->processing = 1;
389         WARN_ON(delayed_refs->num_heads_ready == 0);
390         delayed_refs->num_heads_ready--;
391         delayed_refs->run_delayed_start = head->bytenr +
392                 head->num_bytes;
393         return head;
394 }
395
396 /*
397  * Helper to insert the ref_node to the tail or merge with tail.
398  *
399  * Return 0 for insert.
400  * Return >0 for merge.
401  */
402 static int insert_delayed_ref(struct btrfs_trans_handle *trans,
403                               struct btrfs_delayed_ref_root *root,
404                               struct btrfs_delayed_ref_head *href,
405                               struct btrfs_delayed_ref_node *ref)
406 {
407         struct btrfs_delayed_ref_node *exist;
408         int mod;
409         int ret = 0;
410
411         spin_lock(&href->lock);
412         exist = tree_insert(&href->ref_tree, ref);
413         if (!exist)
414                 goto inserted;
415
416         /* Now we are sure we can merge */
417         ret = 1;
418         if (exist->action == ref->action) {
419                 mod = ref->ref_mod;
420         } else {
421                 /* Need to change action */
422                 if (exist->ref_mod < ref->ref_mod) {
423                         exist->action = ref->action;
424                         mod = -exist->ref_mod;
425                         exist->ref_mod = ref->ref_mod;
426                         if (ref->action == BTRFS_ADD_DELAYED_REF)
427                                 list_add_tail(&exist->add_list,
428                                               &href->ref_add_list);
429                         else if (ref->action == BTRFS_DROP_DELAYED_REF) {
430                                 ASSERT(!list_empty(&exist->add_list));
431                                 list_del(&exist->add_list);
432                         } else {
433                                 ASSERT(0);
434                         }
435                 } else
436                         mod = -ref->ref_mod;
437         }
438         exist->ref_mod += mod;
439
440         /* remove existing tail if its ref_mod is zero */
441         if (exist->ref_mod == 0)
442                 drop_delayed_ref(trans, root, href, exist);
443         spin_unlock(&href->lock);
444         return ret;
445 inserted:
446         if (ref->action == BTRFS_ADD_DELAYED_REF)
447                 list_add_tail(&ref->add_list, &href->ref_add_list);
448         atomic_inc(&root->num_entries);
449         trans->delayed_ref_updates++;
450         spin_unlock(&href->lock);
451         return ret;
452 }
453
454 /*
455  * helper function to update the accounting in the head ref
456  * existing and update must have the same bytenr
457  */
458 static noinline void
459 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
460                          struct btrfs_delayed_ref_head *existing,
461                          struct btrfs_delayed_ref_head *update,
462                          int *old_ref_mod_ret)
463 {
464         int old_ref_mod;
465
466         BUG_ON(existing->is_data != update->is_data);
467
468         spin_lock(&existing->lock);
469         if (update->must_insert_reserved) {
470                 /* if the extent was freed and then
471                  * reallocated before the delayed ref
472                  * entries were processed, we can end up
473                  * with an existing head ref without
474                  * the must_insert_reserved flag set.
475                  * Set it again here
476                  */
477                 existing->must_insert_reserved = update->must_insert_reserved;
478
479                 /*
480                  * update the num_bytes so we make sure the accounting
481                  * is done correctly
482                  */
483                 existing->num_bytes = update->num_bytes;
484
485         }
486
487         if (update->extent_op) {
488                 if (!existing->extent_op) {
489                         existing->extent_op = update->extent_op;
490                 } else {
491                         if (update->extent_op->update_key) {
492                                 memcpy(&existing->extent_op->key,
493                                        &update->extent_op->key,
494                                        sizeof(update->extent_op->key));
495                                 existing->extent_op->update_key = true;
496                         }
497                         if (update->extent_op->update_flags) {
498                                 existing->extent_op->flags_to_set |=
499                                         update->extent_op->flags_to_set;
500                                 existing->extent_op->update_flags = true;
501                         }
502                         btrfs_free_delayed_extent_op(update->extent_op);
503                 }
504         }
505         /*
506          * update the reference mod on the head to reflect this new operation,
507          * only need the lock for this case cause we could be processing it
508          * currently, for refs we just added we know we're a-ok.
509          */
510         old_ref_mod = existing->total_ref_mod;
511         if (old_ref_mod_ret)
512                 *old_ref_mod_ret = old_ref_mod;
513         existing->ref_mod += update->ref_mod;
514         existing->total_ref_mod += update->ref_mod;
515
516         /*
517          * If we are going to from a positive ref mod to a negative or vice
518          * versa we need to make sure to adjust pending_csums accordingly.
519          */
520         if (existing->is_data) {
521                 if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
522                         delayed_refs->pending_csums -= existing->num_bytes;
523                 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
524                         delayed_refs->pending_csums += existing->num_bytes;
525         }
526         spin_unlock(&existing->lock);
527 }
528
529 /*
530  * helper function to actually insert a head node into the rbtree.
531  * this does all the dirty work in terms of maintaining the correct
532  * overall modification count.
533  */
534 static noinline struct btrfs_delayed_ref_head *
535 add_delayed_ref_head(struct btrfs_trans_handle *trans,
536                      struct btrfs_delayed_ref_head *head_ref,
537                      struct btrfs_qgroup_extent_record *qrecord,
538                      u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
539                      int action, int is_data, int is_system,
540                      int *qrecord_inserted_ret,
541                      int *old_ref_mod, int *new_ref_mod)
542
543 {
544         struct btrfs_delayed_ref_head *existing;
545         struct btrfs_delayed_ref_root *delayed_refs;
546         int count_mod = 1;
547         int must_insert_reserved = 0;
548         int qrecord_inserted = 0;
549
550         /* If reserved is provided, it must be a data extent. */
551         BUG_ON(!is_data && reserved);
552
553         /*
554          * the head node stores the sum of all the mods, so dropping a ref
555          * should drop the sum in the head node by one.
556          */
557         if (action == BTRFS_UPDATE_DELAYED_HEAD)
558                 count_mod = 0;
559         else if (action == BTRFS_DROP_DELAYED_REF)
560                 count_mod = -1;
561
562         /*
563          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
564          * the reserved accounting when the extent is finally added, or
565          * if a later modification deletes the delayed ref without ever
566          * inserting the extent into the extent allocation tree.
567          * ref->must_insert_reserved is the flag used to record
568          * that accounting mods are required.
569          *
570          * Once we record must_insert_reserved, switch the action to
571          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
572          */
573         if (action == BTRFS_ADD_DELAYED_EXTENT)
574                 must_insert_reserved = 1;
575         else
576                 must_insert_reserved = 0;
577
578         delayed_refs = &trans->transaction->delayed_refs;
579
580         refcount_set(&head_ref->refs, 1);
581         head_ref->bytenr = bytenr;
582         head_ref->num_bytes = num_bytes;
583         head_ref->ref_mod = count_mod;
584         head_ref->must_insert_reserved = must_insert_reserved;
585         head_ref->is_data = is_data;
586         head_ref->is_system = is_system;
587         head_ref->ref_tree = RB_ROOT;
588         INIT_LIST_HEAD(&head_ref->ref_add_list);
589         RB_CLEAR_NODE(&head_ref->href_node);
590         head_ref->processing = 0;
591         head_ref->total_ref_mod = count_mod;
592         head_ref->qgroup_reserved = 0;
593         head_ref->qgroup_ref_root = 0;
594         spin_lock_init(&head_ref->lock);
595         mutex_init(&head_ref->mutex);
596
597         /* Record qgroup extent info if provided */
598         if (qrecord) {
599                 if (ref_root && reserved) {
600                         head_ref->qgroup_ref_root = ref_root;
601                         head_ref->qgroup_reserved = reserved;
602                 }
603
604                 qrecord->bytenr = bytenr;
605                 qrecord->num_bytes = num_bytes;
606                 qrecord->old_roots = NULL;
607
608                 if(btrfs_qgroup_trace_extent_nolock(trans->fs_info,
609                                         delayed_refs, qrecord))
610                         kfree(qrecord);
611                 else
612                         qrecord_inserted = 1;
613         }
614
615         trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
616
617         existing = htree_insert(&delayed_refs->href_root,
618                                 &head_ref->href_node);
619         if (existing) {
620                 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
621                         && existing->qgroup_reserved);
622                 update_existing_head_ref(delayed_refs, existing, head_ref,
623                                          old_ref_mod);
624                 /*
625                  * we've updated the existing ref, free the newly
626                  * allocated ref
627                  */
628                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
629                 head_ref = existing;
630         } else {
631                 if (old_ref_mod)
632                         *old_ref_mod = 0;
633                 if (is_data && count_mod < 0)
634                         delayed_refs->pending_csums += num_bytes;
635                 delayed_refs->num_heads++;
636                 delayed_refs->num_heads_ready++;
637                 atomic_inc(&delayed_refs->num_entries);
638                 trans->delayed_ref_updates++;
639         }
640         if (qrecord_inserted_ret)
641                 *qrecord_inserted_ret = qrecord_inserted;
642         if (new_ref_mod)
643                 *new_ref_mod = head_ref->total_ref_mod;
644         return head_ref;
645 }
646
647 /*
648  * helper to insert a delayed tree ref into the rbtree.
649  */
650 static noinline void
651 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
652                      struct btrfs_trans_handle *trans,
653                      struct btrfs_delayed_ref_head *head_ref,
654                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
655                      u64 num_bytes, u64 parent, u64 ref_root, int level,
656                      int action)
657 {
658         struct btrfs_delayed_tree_ref *full_ref;
659         struct btrfs_delayed_ref_root *delayed_refs;
660         u64 seq = 0;
661         int ret;
662
663         if (action == BTRFS_ADD_DELAYED_EXTENT)
664                 action = BTRFS_ADD_DELAYED_REF;
665
666         if (is_fstree(ref_root))
667                 seq = atomic64_read(&fs_info->tree_mod_seq);
668         delayed_refs = &trans->transaction->delayed_refs;
669
670         /* first set the basic ref node struct up */
671         refcount_set(&ref->refs, 1);
672         ref->bytenr = bytenr;
673         ref->num_bytes = num_bytes;
674         ref->ref_mod = 1;
675         ref->action = action;
676         ref->is_head = 0;
677         ref->in_tree = 1;
678         ref->seq = seq;
679         RB_CLEAR_NODE(&ref->ref_node);
680         INIT_LIST_HEAD(&ref->add_list);
681
682         full_ref = btrfs_delayed_node_to_tree_ref(ref);
683         full_ref->parent = parent;
684         full_ref->root = ref_root;
685         if (parent)
686                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
687         else
688                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
689         full_ref->level = level;
690
691         trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
692
693         ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
694
695         /*
696          * XXX: memory should be freed at the same level allocated.
697          * But bad practice is anywhere... Follow it now. Need cleanup.
698          */
699         if (ret > 0)
700                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
701 }
702
703 /*
704  * helper to insert a delayed data ref into the rbtree.
705  */
706 static noinline void
707 add_delayed_data_ref(struct btrfs_trans_handle *trans,
708                      struct btrfs_delayed_ref_head *head_ref,
709                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
710                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
711                      u64 offset, int action)
712 {
713         struct btrfs_delayed_data_ref *full_ref;
714         struct btrfs_delayed_ref_root *delayed_refs;
715         u64 seq = 0;
716         int ret;
717
718         if (action == BTRFS_ADD_DELAYED_EXTENT)
719                 action = BTRFS_ADD_DELAYED_REF;
720
721         delayed_refs = &trans->transaction->delayed_refs;
722
723         if (is_fstree(ref_root))
724                 seq = atomic64_read(&trans->fs_info->tree_mod_seq);
725
726         /* first set the basic ref node struct up */
727         refcount_set(&ref->refs, 1);
728         ref->bytenr = bytenr;
729         ref->num_bytes = num_bytes;
730         ref->ref_mod = 1;
731         ref->action = action;
732         ref->is_head = 0;
733         ref->in_tree = 1;
734         ref->seq = seq;
735         RB_CLEAR_NODE(&ref->ref_node);
736         INIT_LIST_HEAD(&ref->add_list);
737
738         full_ref = btrfs_delayed_node_to_data_ref(ref);
739         full_ref->parent = parent;
740         full_ref->root = ref_root;
741         if (parent)
742                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
743         else
744                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
745
746         full_ref->objectid = owner;
747         full_ref->offset = offset;
748
749         trace_add_delayed_data_ref(trans->fs_info, ref, full_ref, action);
750
751         ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
752         if (ret > 0)
753                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
754 }
755
756 /*
757  * add a delayed tree ref.  This does all of the accounting required
758  * to make sure the delayed ref is eventually processed before this
759  * transaction commits.
760  */
761 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
762                                struct btrfs_trans_handle *trans,
763                                u64 bytenr, u64 num_bytes, u64 parent,
764                                u64 ref_root,  int level, int action,
765                                struct btrfs_delayed_extent_op *extent_op,
766                                int *old_ref_mod, int *new_ref_mod)
767 {
768         struct btrfs_delayed_tree_ref *ref;
769         struct btrfs_delayed_ref_head *head_ref;
770         struct btrfs_delayed_ref_root *delayed_refs;
771         struct btrfs_qgroup_extent_record *record = NULL;
772         int qrecord_inserted;
773         int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
774
775         BUG_ON(extent_op && extent_op->is_data);
776         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
777         if (!ref)
778                 return -ENOMEM;
779
780         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
781         if (!head_ref)
782                 goto free_ref;
783
784         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
785             is_fstree(ref_root)) {
786                 record = kmalloc(sizeof(*record), GFP_NOFS);
787                 if (!record)
788                         goto free_head_ref;
789         }
790
791         head_ref->extent_op = extent_op;
792
793         delayed_refs = &trans->transaction->delayed_refs;
794         spin_lock(&delayed_refs->lock);
795
796         /*
797          * insert both the head node and the new ref without dropping
798          * the spin lock
799          */
800         head_ref = add_delayed_ref_head(trans, head_ref, record, bytenr,
801                                         num_bytes, 0, 0, action, 0,
802                                         is_system, &qrecord_inserted,
803                                         old_ref_mod, new_ref_mod);
804
805         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
806                              num_bytes, parent, ref_root, level, action);
807         spin_unlock(&delayed_refs->lock);
808
809         if (qrecord_inserted)
810                 btrfs_qgroup_trace_extent_post(fs_info, record);
811
812         return 0;
813
814 free_head_ref:
815         kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
816 free_ref:
817         kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
818
819         return -ENOMEM;
820 }
821
822 /*
823  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
824  */
825 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
826                                struct btrfs_trans_handle *trans,
827                                u64 bytenr, u64 num_bytes,
828                                u64 parent, u64 ref_root,
829                                u64 owner, u64 offset, u64 reserved, int action,
830                                int *old_ref_mod, int *new_ref_mod)
831 {
832         struct btrfs_delayed_data_ref *ref;
833         struct btrfs_delayed_ref_head *head_ref;
834         struct btrfs_delayed_ref_root *delayed_refs;
835         struct btrfs_qgroup_extent_record *record = NULL;
836         int qrecord_inserted;
837
838         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
839         if (!ref)
840                 return -ENOMEM;
841
842         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
843         if (!head_ref) {
844                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
845                 return -ENOMEM;
846         }
847
848         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
849             is_fstree(ref_root)) {
850                 record = kmalloc(sizeof(*record), GFP_NOFS);
851                 if (!record) {
852                         kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
853                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
854                                         head_ref);
855                         return -ENOMEM;
856                 }
857         }
858
859         head_ref->extent_op = NULL;
860
861         delayed_refs = &trans->transaction->delayed_refs;
862         spin_lock(&delayed_refs->lock);
863
864         /*
865          * insert both the head node and the new ref without dropping
866          * the spin lock
867          */
868         head_ref = add_delayed_ref_head(trans, head_ref, record, bytenr,
869                                         num_bytes, ref_root, reserved,
870                                         action, 1, 0, &qrecord_inserted,
871                                         old_ref_mod, new_ref_mod);
872
873         add_delayed_data_ref(trans, head_ref, &ref->node, bytenr, num_bytes,
874                              parent, ref_root, owner, offset, action);
875         spin_unlock(&delayed_refs->lock);
876
877         if (qrecord_inserted)
878                 return btrfs_qgroup_trace_extent_post(fs_info, record);
879         return 0;
880 }
881
882 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
883                                 struct btrfs_trans_handle *trans,
884                                 u64 bytenr, u64 num_bytes,
885                                 struct btrfs_delayed_extent_op *extent_op)
886 {
887         struct btrfs_delayed_ref_head *head_ref;
888         struct btrfs_delayed_ref_root *delayed_refs;
889
890         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
891         if (!head_ref)
892                 return -ENOMEM;
893
894         head_ref->extent_op = extent_op;
895
896         delayed_refs = &trans->transaction->delayed_refs;
897         spin_lock(&delayed_refs->lock);
898
899         /*
900          * extent_ops just modify the flags of an extent and they don't result
901          * in ref count changes, hence it's safe to pass false/0 for is_system
902          * argument
903          */
904         add_delayed_ref_head(trans, head_ref, NULL, bytenr, num_bytes, 0, 0,
905                              BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
906                              0, NULL, NULL, NULL);
907
908         spin_unlock(&delayed_refs->lock);
909         return 0;
910 }
911
912 /*
913  * this does a simple search for the head node for a given extent.
914  * It must be called with the delayed ref spinlock held, and it returns
915  * the head node if any where found, or NULL if not.
916  */
917 struct btrfs_delayed_ref_head *
918 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
919 {
920         return find_ref_head(&delayed_refs->href_root, bytenr, 0);
921 }
922
923 void __cold btrfs_delayed_ref_exit(void)
924 {
925         kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
926         kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
927         kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
928         kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
929 }
930
931 int __init btrfs_delayed_ref_init(void)
932 {
933         btrfs_delayed_ref_head_cachep = kmem_cache_create(
934                                 "btrfs_delayed_ref_head",
935                                 sizeof(struct btrfs_delayed_ref_head), 0,
936                                 SLAB_MEM_SPREAD, NULL);
937         if (!btrfs_delayed_ref_head_cachep)
938                 goto fail;
939
940         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
941                                 "btrfs_delayed_tree_ref",
942                                 sizeof(struct btrfs_delayed_tree_ref), 0,
943                                 SLAB_MEM_SPREAD, NULL);
944         if (!btrfs_delayed_tree_ref_cachep)
945                 goto fail;
946
947         btrfs_delayed_data_ref_cachep = kmem_cache_create(
948                                 "btrfs_delayed_data_ref",
949                                 sizeof(struct btrfs_delayed_data_ref), 0,
950                                 SLAB_MEM_SPREAD, NULL);
951         if (!btrfs_delayed_data_ref_cachep)
952                 goto fail;
953
954         btrfs_delayed_extent_op_cachep = kmem_cache_create(
955                                 "btrfs_delayed_extent_op",
956                                 sizeof(struct btrfs_delayed_extent_op), 0,
957                                 SLAB_MEM_SPREAD, NULL);
958         if (!btrfs_delayed_extent_op_cachep)
959                 goto fail;
960
961         return 0;
962 fail:
963         btrfs_delayed_ref_exit();
964         return -ENOMEM;
965 }