Merge tag 'drm-misc-fixes-2017-08-08' of git://anongit.freedesktop.org/git/drm-misc...
[sfrench/cifs-2.6.git] / fs / btrfs / delayed-ref.c
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 #include "qgroup.h"
26
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 /*
32  * delayed back reference update tracking.  For subvolume trees
33  * we queue up extent allocations and backref maintenance for
34  * delayed processing.   This avoids deep call chains where we
35  * add extents in the middle of btrfs_search_slot, and it allows
36  * us to buffer up frequently modified backrefs in an rb tree instead
37  * of hammering updates on the extent allocation tree.
38  */
39
40 /*
41  * compare two delayed tree backrefs with same bytenr and type
42  */
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44                           struct btrfs_delayed_tree_ref *ref1, int type)
45 {
46         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47                 if (ref1->root < ref2->root)
48                         return -1;
49                 if (ref1->root > ref2->root)
50                         return 1;
51         } else {
52                 if (ref1->parent < ref2->parent)
53                         return -1;
54                 if (ref1->parent > ref2->parent)
55                         return 1;
56         }
57         return 0;
58 }
59
60 /*
61  * compare two delayed data backrefs with same bytenr and type
62  */
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64                           struct btrfs_delayed_data_ref *ref1)
65 {
66         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67                 if (ref1->root < ref2->root)
68                         return -1;
69                 if (ref1->root > ref2->root)
70                         return 1;
71                 if (ref1->objectid < ref2->objectid)
72                         return -1;
73                 if (ref1->objectid > ref2->objectid)
74                         return 1;
75                 if (ref1->offset < ref2->offset)
76                         return -1;
77                 if (ref1->offset > ref2->offset)
78                         return 1;
79         } else {
80                 if (ref1->parent < ref2->parent)
81                         return -1;
82                 if (ref1->parent > ref2->parent)
83                         return 1;
84         }
85         return 0;
86 }
87
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
90                                                    struct rb_node *node)
91 {
92         struct rb_node **p = &root->rb_node;
93         struct rb_node *parent_node = NULL;
94         struct btrfs_delayed_ref_head *entry;
95         struct btrfs_delayed_ref_head *ins;
96         u64 bytenr;
97
98         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99         bytenr = ins->node.bytenr;
100         while (*p) {
101                 parent_node = *p;
102                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103                                  href_node);
104
105                 if (bytenr < entry->node.bytenr)
106                         p = &(*p)->rb_left;
107                 else if (bytenr > entry->node.bytenr)
108                         p = &(*p)->rb_right;
109                 else
110                         return entry;
111         }
112
113         rb_link_node(node, parent_node, p);
114         rb_insert_color(node, root);
115         return NULL;
116 }
117
118 /*
119  * find an head entry based on bytenr. This returns the delayed ref
120  * head if it was able to find one, or NULL if nothing was in that spot.
121  * If return_bigger is given, the next bigger entry is returned if no exact
122  * match is found.
123  */
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
126               int return_bigger)
127 {
128         struct rb_node *n;
129         struct btrfs_delayed_ref_head *entry;
130
131         n = root->rb_node;
132         entry = NULL;
133         while (n) {
134                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135
136                 if (bytenr < entry->node.bytenr)
137                         n = n->rb_left;
138                 else if (bytenr > entry->node.bytenr)
139                         n = n->rb_right;
140                 else
141                         return entry;
142         }
143         if (entry && return_bigger) {
144                 if (bytenr > entry->node.bytenr) {
145                         n = rb_next(&entry->href_node);
146                         if (!n)
147                                 n = rb_first(root);
148                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
149                                          href_node);
150                         return entry;
151                 }
152                 return entry;
153         }
154         return NULL;
155 }
156
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158                            struct btrfs_delayed_ref_head *head)
159 {
160         struct btrfs_delayed_ref_root *delayed_refs;
161
162         delayed_refs = &trans->transaction->delayed_refs;
163         assert_spin_locked(&delayed_refs->lock);
164         if (mutex_trylock(&head->mutex))
165                 return 0;
166
167         refcount_inc(&head->node.refs);
168         spin_unlock(&delayed_refs->lock);
169
170         mutex_lock(&head->mutex);
171         spin_lock(&delayed_refs->lock);
172         if (!head->node.in_tree) {
173                 mutex_unlock(&head->mutex);
174                 btrfs_put_delayed_ref(&head->node);
175                 return -EAGAIN;
176         }
177         btrfs_put_delayed_ref(&head->node);
178         return 0;
179 }
180
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182                                     struct btrfs_delayed_ref_root *delayed_refs,
183                                     struct btrfs_delayed_ref_head *head,
184                                     struct btrfs_delayed_ref_node *ref)
185 {
186         if (btrfs_delayed_ref_is_head(ref)) {
187                 head = btrfs_delayed_node_to_head(ref);
188                 rb_erase(&head->href_node, &delayed_refs->href_root);
189         } else {
190                 assert_spin_locked(&head->lock);
191                 list_del(&ref->list);
192                 if (!list_empty(&ref->add_list))
193                         list_del(&ref->add_list);
194         }
195         ref->in_tree = 0;
196         btrfs_put_delayed_ref(ref);
197         atomic_dec(&delayed_refs->num_entries);
198         if (trans->delayed_ref_updates)
199                 trans->delayed_ref_updates--;
200 }
201
202 static bool merge_ref(struct btrfs_trans_handle *trans,
203                       struct btrfs_delayed_ref_root *delayed_refs,
204                       struct btrfs_delayed_ref_head *head,
205                       struct btrfs_delayed_ref_node *ref,
206                       u64 seq)
207 {
208         struct btrfs_delayed_ref_node *next;
209         bool done = false;
210
211         next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
212                                 list);
213         while (!done && &next->list != &head->ref_list) {
214                 int mod;
215                 struct btrfs_delayed_ref_node *next2;
216
217                 next2 = list_next_entry(next, list);
218
219                 if (next == ref)
220                         goto next;
221
222                 if (seq && next->seq >= seq)
223                         goto next;
224
225                 if (next->type != ref->type)
226                         goto next;
227
228                 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
229                      ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
230                     comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
231                                    btrfs_delayed_node_to_tree_ref(next),
232                                    ref->type))
233                         goto next;
234                 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
235                      ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
236                     comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
237                                    btrfs_delayed_node_to_data_ref(next)))
238                         goto next;
239
240                 if (ref->action == next->action) {
241                         mod = next->ref_mod;
242                 } else {
243                         if (ref->ref_mod < next->ref_mod) {
244                                 swap(ref, next);
245                                 done = true;
246                         }
247                         mod = -next->ref_mod;
248                 }
249
250                 drop_delayed_ref(trans, delayed_refs, head, next);
251                 ref->ref_mod += mod;
252                 if (ref->ref_mod == 0) {
253                         drop_delayed_ref(trans, delayed_refs, head, ref);
254                         done = true;
255                 } else {
256                         /*
257                          * Can't have multiples of the same ref on a tree block.
258                          */
259                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
260                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
261                 }
262 next:
263                 next = next2;
264         }
265
266         return done;
267 }
268
269 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
270                               struct btrfs_fs_info *fs_info,
271                               struct btrfs_delayed_ref_root *delayed_refs,
272                               struct btrfs_delayed_ref_head *head)
273 {
274         struct btrfs_delayed_ref_node *ref;
275         u64 seq = 0;
276
277         assert_spin_locked(&head->lock);
278
279         if (list_empty(&head->ref_list))
280                 return;
281
282         /* We don't have too many refs to merge for data. */
283         if (head->is_data)
284                 return;
285
286         spin_lock(&fs_info->tree_mod_seq_lock);
287         if (!list_empty(&fs_info->tree_mod_seq_list)) {
288                 struct seq_list *elem;
289
290                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
291                                         struct seq_list, list);
292                 seq = elem->seq;
293         }
294         spin_unlock(&fs_info->tree_mod_seq_lock);
295
296         ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
297                                list);
298         while (&ref->list != &head->ref_list) {
299                 if (seq && ref->seq >= seq)
300                         goto next;
301
302                 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
303                         if (list_empty(&head->ref_list))
304                                 break;
305                         ref = list_first_entry(&head->ref_list,
306                                                struct btrfs_delayed_ref_node,
307                                                list);
308                         continue;
309                 }
310 next:
311                 ref = list_next_entry(ref, list);
312         }
313 }
314
315 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
316                             struct btrfs_delayed_ref_root *delayed_refs,
317                             u64 seq)
318 {
319         struct seq_list *elem;
320         int ret = 0;
321
322         spin_lock(&fs_info->tree_mod_seq_lock);
323         if (!list_empty(&fs_info->tree_mod_seq_list)) {
324                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
325                                         struct seq_list, list);
326                 if (seq >= elem->seq) {
327                         btrfs_debug(fs_info,
328                                 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
329                                 (u32)(seq >> 32), (u32)seq,
330                                 (u32)(elem->seq >> 32), (u32)elem->seq,
331                                 delayed_refs);
332                         ret = 1;
333                 }
334         }
335
336         spin_unlock(&fs_info->tree_mod_seq_lock);
337         return ret;
338 }
339
340 struct btrfs_delayed_ref_head *
341 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
342 {
343         struct btrfs_delayed_ref_root *delayed_refs;
344         struct btrfs_delayed_ref_head *head;
345         u64 start;
346         bool loop = false;
347
348         delayed_refs = &trans->transaction->delayed_refs;
349
350 again:
351         start = delayed_refs->run_delayed_start;
352         head = find_ref_head(&delayed_refs->href_root, start, 1);
353         if (!head && !loop) {
354                 delayed_refs->run_delayed_start = 0;
355                 start = 0;
356                 loop = true;
357                 head = find_ref_head(&delayed_refs->href_root, start, 1);
358                 if (!head)
359                         return NULL;
360         } else if (!head && loop) {
361                 return NULL;
362         }
363
364         while (head->processing) {
365                 struct rb_node *node;
366
367                 node = rb_next(&head->href_node);
368                 if (!node) {
369                         if (loop)
370                                 return NULL;
371                         delayed_refs->run_delayed_start = 0;
372                         start = 0;
373                         loop = true;
374                         goto again;
375                 }
376                 head = rb_entry(node, struct btrfs_delayed_ref_head,
377                                 href_node);
378         }
379
380         head->processing = 1;
381         WARN_ON(delayed_refs->num_heads_ready == 0);
382         delayed_refs->num_heads_ready--;
383         delayed_refs->run_delayed_start = head->node.bytenr +
384                 head->node.num_bytes;
385         return head;
386 }
387
388 /*
389  * Helper to insert the ref_node to the tail or merge with tail.
390  *
391  * Return 0 for insert.
392  * Return >0 for merge.
393  */
394 static int
395 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
396                            struct btrfs_delayed_ref_root *root,
397                            struct btrfs_delayed_ref_head *href,
398                            struct btrfs_delayed_ref_node *ref)
399 {
400         struct btrfs_delayed_ref_node *exist;
401         int mod;
402         int ret = 0;
403
404         spin_lock(&href->lock);
405         /* Check whether we can merge the tail node with ref */
406         if (list_empty(&href->ref_list))
407                 goto add_tail;
408         exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
409                            list);
410         /* No need to compare bytenr nor is_head */
411         if (exist->type != ref->type || exist->seq != ref->seq)
412                 goto add_tail;
413
414         if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
415              exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
416             comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
417                            btrfs_delayed_node_to_tree_ref(ref),
418                            ref->type))
419                 goto add_tail;
420         if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
421              exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
422             comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
423                            btrfs_delayed_node_to_data_ref(ref)))
424                 goto add_tail;
425
426         /* Now we are sure we can merge */
427         ret = 1;
428         if (exist->action == ref->action) {
429                 mod = ref->ref_mod;
430         } else {
431                 /* Need to change action */
432                 if (exist->ref_mod < ref->ref_mod) {
433                         exist->action = ref->action;
434                         mod = -exist->ref_mod;
435                         exist->ref_mod = ref->ref_mod;
436                         if (ref->action == BTRFS_ADD_DELAYED_REF)
437                                 list_add_tail(&exist->add_list,
438                                               &href->ref_add_list);
439                         else if (ref->action == BTRFS_DROP_DELAYED_REF) {
440                                 ASSERT(!list_empty(&exist->add_list));
441                                 list_del(&exist->add_list);
442                         } else {
443                                 ASSERT(0);
444                         }
445                 } else
446                         mod = -ref->ref_mod;
447         }
448         exist->ref_mod += mod;
449
450         /* remove existing tail if its ref_mod is zero */
451         if (exist->ref_mod == 0)
452                 drop_delayed_ref(trans, root, href, exist);
453         spin_unlock(&href->lock);
454         return ret;
455
456 add_tail:
457         list_add_tail(&ref->list, &href->ref_list);
458         if (ref->action == BTRFS_ADD_DELAYED_REF)
459                 list_add_tail(&ref->add_list, &href->ref_add_list);
460         atomic_inc(&root->num_entries);
461         trans->delayed_ref_updates++;
462         spin_unlock(&href->lock);
463         return ret;
464 }
465
466 /*
467  * helper function to update the accounting in the head ref
468  * existing and update must have the same bytenr
469  */
470 static noinline void
471 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
472                          struct btrfs_delayed_ref_node *existing,
473                          struct btrfs_delayed_ref_node *update,
474                          int *old_ref_mod_ret)
475 {
476         struct btrfs_delayed_ref_head *existing_ref;
477         struct btrfs_delayed_ref_head *ref;
478         int old_ref_mod;
479
480         existing_ref = btrfs_delayed_node_to_head(existing);
481         ref = btrfs_delayed_node_to_head(update);
482         BUG_ON(existing_ref->is_data != ref->is_data);
483
484         spin_lock(&existing_ref->lock);
485         if (ref->must_insert_reserved) {
486                 /* if the extent was freed and then
487                  * reallocated before the delayed ref
488                  * entries were processed, we can end up
489                  * with an existing head ref without
490                  * the must_insert_reserved flag set.
491                  * Set it again here
492                  */
493                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
494
495                 /*
496                  * update the num_bytes so we make sure the accounting
497                  * is done correctly
498                  */
499                 existing->num_bytes = update->num_bytes;
500
501         }
502
503         if (ref->extent_op) {
504                 if (!existing_ref->extent_op) {
505                         existing_ref->extent_op = ref->extent_op;
506                 } else {
507                         if (ref->extent_op->update_key) {
508                                 memcpy(&existing_ref->extent_op->key,
509                                        &ref->extent_op->key,
510                                        sizeof(ref->extent_op->key));
511                                 existing_ref->extent_op->update_key = true;
512                         }
513                         if (ref->extent_op->update_flags) {
514                                 existing_ref->extent_op->flags_to_set |=
515                                         ref->extent_op->flags_to_set;
516                                 existing_ref->extent_op->update_flags = true;
517                         }
518                         btrfs_free_delayed_extent_op(ref->extent_op);
519                 }
520         }
521         /*
522          * update the reference mod on the head to reflect this new operation,
523          * only need the lock for this case cause we could be processing it
524          * currently, for refs we just added we know we're a-ok.
525          */
526         old_ref_mod = existing_ref->total_ref_mod;
527         if (old_ref_mod_ret)
528                 *old_ref_mod_ret = old_ref_mod;
529         existing->ref_mod += update->ref_mod;
530         existing_ref->total_ref_mod += update->ref_mod;
531
532         /*
533          * If we are going to from a positive ref mod to a negative or vice
534          * versa we need to make sure to adjust pending_csums accordingly.
535          */
536         if (existing_ref->is_data) {
537                 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
538                         delayed_refs->pending_csums -= existing->num_bytes;
539                 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
540                         delayed_refs->pending_csums += existing->num_bytes;
541         }
542         spin_unlock(&existing_ref->lock);
543 }
544
545 /*
546  * helper function to actually insert a head node into the rbtree.
547  * this does all the dirty work in terms of maintaining the correct
548  * overall modification count.
549  */
550 static noinline struct btrfs_delayed_ref_head *
551 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
552                      struct btrfs_trans_handle *trans,
553                      struct btrfs_delayed_ref_node *ref,
554                      struct btrfs_qgroup_extent_record *qrecord,
555                      u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
556                      int action, int is_data, int *qrecord_inserted_ret,
557                      int *old_ref_mod, int *new_ref_mod)
558 {
559         struct btrfs_delayed_ref_head *existing;
560         struct btrfs_delayed_ref_head *head_ref = NULL;
561         struct btrfs_delayed_ref_root *delayed_refs;
562         int count_mod = 1;
563         int must_insert_reserved = 0;
564         int qrecord_inserted = 0;
565
566         /* If reserved is provided, it must be a data extent. */
567         BUG_ON(!is_data && reserved);
568
569         /*
570          * the head node stores the sum of all the mods, so dropping a ref
571          * should drop the sum in the head node by one.
572          */
573         if (action == BTRFS_UPDATE_DELAYED_HEAD)
574                 count_mod = 0;
575         else if (action == BTRFS_DROP_DELAYED_REF)
576                 count_mod = -1;
577
578         /*
579          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
580          * the reserved accounting when the extent is finally added, or
581          * if a later modification deletes the delayed ref without ever
582          * inserting the extent into the extent allocation tree.
583          * ref->must_insert_reserved is the flag used to record
584          * that accounting mods are required.
585          *
586          * Once we record must_insert_reserved, switch the action to
587          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
588          */
589         if (action == BTRFS_ADD_DELAYED_EXTENT)
590                 must_insert_reserved = 1;
591         else
592                 must_insert_reserved = 0;
593
594         delayed_refs = &trans->transaction->delayed_refs;
595
596         /* first set the basic ref node struct up */
597         refcount_set(&ref->refs, 1);
598         ref->bytenr = bytenr;
599         ref->num_bytes = num_bytes;
600         ref->ref_mod = count_mod;
601         ref->type  = 0;
602         ref->action  = 0;
603         ref->is_head = 1;
604         ref->in_tree = 1;
605         ref->seq = 0;
606
607         head_ref = btrfs_delayed_node_to_head(ref);
608         head_ref->must_insert_reserved = must_insert_reserved;
609         head_ref->is_data = is_data;
610         INIT_LIST_HEAD(&head_ref->ref_list);
611         INIT_LIST_HEAD(&head_ref->ref_add_list);
612         head_ref->processing = 0;
613         head_ref->total_ref_mod = count_mod;
614         head_ref->qgroup_reserved = 0;
615         head_ref->qgroup_ref_root = 0;
616
617         /* Record qgroup extent info if provided */
618         if (qrecord) {
619                 if (ref_root && reserved) {
620                         head_ref->qgroup_ref_root = ref_root;
621                         head_ref->qgroup_reserved = reserved;
622                 }
623
624                 qrecord->bytenr = bytenr;
625                 qrecord->num_bytes = num_bytes;
626                 qrecord->old_roots = NULL;
627
628                 if(btrfs_qgroup_trace_extent_nolock(fs_info,
629                                         delayed_refs, qrecord))
630                         kfree(qrecord);
631                 else
632                         qrecord_inserted = 1;
633         }
634
635         spin_lock_init(&head_ref->lock);
636         mutex_init(&head_ref->mutex);
637
638         trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
639
640         existing = htree_insert(&delayed_refs->href_root,
641                                 &head_ref->href_node);
642         if (existing) {
643                 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
644                         && existing->qgroup_reserved);
645                 update_existing_head_ref(delayed_refs, &existing->node, ref,
646                                          old_ref_mod);
647                 /*
648                  * we've updated the existing ref, free the newly
649                  * allocated ref
650                  */
651                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
652                 head_ref = existing;
653         } else {
654                 if (old_ref_mod)
655                         *old_ref_mod = 0;
656                 if (is_data && count_mod < 0)
657                         delayed_refs->pending_csums += num_bytes;
658                 delayed_refs->num_heads++;
659                 delayed_refs->num_heads_ready++;
660                 atomic_inc(&delayed_refs->num_entries);
661                 trans->delayed_ref_updates++;
662         }
663         if (qrecord_inserted_ret)
664                 *qrecord_inserted_ret = qrecord_inserted;
665         if (new_ref_mod)
666                 *new_ref_mod = head_ref->total_ref_mod;
667         return head_ref;
668 }
669
670 /*
671  * helper to insert a delayed tree ref into the rbtree.
672  */
673 static noinline void
674 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
675                      struct btrfs_trans_handle *trans,
676                      struct btrfs_delayed_ref_head *head_ref,
677                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
678                      u64 num_bytes, u64 parent, u64 ref_root, int level,
679                      int action)
680 {
681         struct btrfs_delayed_tree_ref *full_ref;
682         struct btrfs_delayed_ref_root *delayed_refs;
683         u64 seq = 0;
684         int ret;
685
686         if (action == BTRFS_ADD_DELAYED_EXTENT)
687                 action = BTRFS_ADD_DELAYED_REF;
688
689         if (is_fstree(ref_root))
690                 seq = atomic64_read(&fs_info->tree_mod_seq);
691         delayed_refs = &trans->transaction->delayed_refs;
692
693         /* first set the basic ref node struct up */
694         refcount_set(&ref->refs, 1);
695         ref->bytenr = bytenr;
696         ref->num_bytes = num_bytes;
697         ref->ref_mod = 1;
698         ref->action = action;
699         ref->is_head = 0;
700         ref->in_tree = 1;
701         ref->seq = seq;
702         INIT_LIST_HEAD(&ref->list);
703         INIT_LIST_HEAD(&ref->add_list);
704
705         full_ref = btrfs_delayed_node_to_tree_ref(ref);
706         full_ref->parent = parent;
707         full_ref->root = ref_root;
708         if (parent)
709                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
710         else
711                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
712         full_ref->level = level;
713
714         trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
715
716         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
717
718         /*
719          * XXX: memory should be freed at the same level allocated.
720          * But bad practice is anywhere... Follow it now. Need cleanup.
721          */
722         if (ret > 0)
723                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
724 }
725
726 /*
727  * helper to insert a delayed data ref into the rbtree.
728  */
729 static noinline void
730 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
731                      struct btrfs_trans_handle *trans,
732                      struct btrfs_delayed_ref_head *head_ref,
733                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
734                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
735                      u64 offset, int action)
736 {
737         struct btrfs_delayed_data_ref *full_ref;
738         struct btrfs_delayed_ref_root *delayed_refs;
739         u64 seq = 0;
740         int ret;
741
742         if (action == BTRFS_ADD_DELAYED_EXTENT)
743                 action = BTRFS_ADD_DELAYED_REF;
744
745         delayed_refs = &trans->transaction->delayed_refs;
746
747         if (is_fstree(ref_root))
748                 seq = atomic64_read(&fs_info->tree_mod_seq);
749
750         /* first set the basic ref node struct up */
751         refcount_set(&ref->refs, 1);
752         ref->bytenr = bytenr;
753         ref->num_bytes = num_bytes;
754         ref->ref_mod = 1;
755         ref->action = action;
756         ref->is_head = 0;
757         ref->in_tree = 1;
758         ref->seq = seq;
759         INIT_LIST_HEAD(&ref->list);
760         INIT_LIST_HEAD(&ref->add_list);
761
762         full_ref = btrfs_delayed_node_to_data_ref(ref);
763         full_ref->parent = parent;
764         full_ref->root = ref_root;
765         if (parent)
766                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
767         else
768                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
769
770         full_ref->objectid = owner;
771         full_ref->offset = offset;
772
773         trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
774
775         ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
776
777         if (ret > 0)
778                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
779 }
780
781 /*
782  * add a delayed tree ref.  This does all of the accounting required
783  * to make sure the delayed ref is eventually processed before this
784  * transaction commits.
785  */
786 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
787                                struct btrfs_trans_handle *trans,
788                                u64 bytenr, u64 num_bytes, u64 parent,
789                                u64 ref_root,  int level, int action,
790                                struct btrfs_delayed_extent_op *extent_op,
791                                int *old_ref_mod, int *new_ref_mod)
792 {
793         struct btrfs_delayed_tree_ref *ref;
794         struct btrfs_delayed_ref_head *head_ref;
795         struct btrfs_delayed_ref_root *delayed_refs;
796         struct btrfs_qgroup_extent_record *record = NULL;
797         int qrecord_inserted;
798
799         BUG_ON(extent_op && extent_op->is_data);
800         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
801         if (!ref)
802                 return -ENOMEM;
803
804         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
805         if (!head_ref)
806                 goto free_ref;
807
808         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
809             is_fstree(ref_root)) {
810                 record = kmalloc(sizeof(*record), GFP_NOFS);
811                 if (!record)
812                         goto free_head_ref;
813         }
814
815         head_ref->extent_op = extent_op;
816
817         delayed_refs = &trans->transaction->delayed_refs;
818         spin_lock(&delayed_refs->lock);
819
820         /*
821          * insert both the head node and the new ref without dropping
822          * the spin lock
823          */
824         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
825                                         bytenr, num_bytes, 0, 0, action, 0,
826                                         &qrecord_inserted, old_ref_mod,
827                                         new_ref_mod);
828
829         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
830                              num_bytes, parent, ref_root, level, action);
831         spin_unlock(&delayed_refs->lock);
832
833         if (qrecord_inserted)
834                 return btrfs_qgroup_trace_extent_post(fs_info, record);
835         return 0;
836
837 free_head_ref:
838         kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
839 free_ref:
840         kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
841
842         return -ENOMEM;
843 }
844
845 /*
846  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
847  */
848 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
849                                struct btrfs_trans_handle *trans,
850                                u64 bytenr, u64 num_bytes,
851                                u64 parent, u64 ref_root,
852                                u64 owner, u64 offset, u64 reserved, int action,
853                                int *old_ref_mod, int *new_ref_mod)
854 {
855         struct btrfs_delayed_data_ref *ref;
856         struct btrfs_delayed_ref_head *head_ref;
857         struct btrfs_delayed_ref_root *delayed_refs;
858         struct btrfs_qgroup_extent_record *record = NULL;
859         int qrecord_inserted;
860
861         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
862         if (!ref)
863                 return -ENOMEM;
864
865         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
866         if (!head_ref) {
867                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
868                 return -ENOMEM;
869         }
870
871         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
872             is_fstree(ref_root)) {
873                 record = kmalloc(sizeof(*record), GFP_NOFS);
874                 if (!record) {
875                         kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
876                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
877                                         head_ref);
878                         return -ENOMEM;
879                 }
880         }
881
882         head_ref->extent_op = NULL;
883
884         delayed_refs = &trans->transaction->delayed_refs;
885         spin_lock(&delayed_refs->lock);
886
887         /*
888          * insert both the head node and the new ref without dropping
889          * the spin lock
890          */
891         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
892                                         bytenr, num_bytes, ref_root, reserved,
893                                         action, 1, &qrecord_inserted,
894                                         old_ref_mod, new_ref_mod);
895
896         add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
897                                    num_bytes, parent, ref_root, owner, offset,
898                                    action);
899         spin_unlock(&delayed_refs->lock);
900
901         if (qrecord_inserted)
902                 return btrfs_qgroup_trace_extent_post(fs_info, record);
903         return 0;
904 }
905
906 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
907                                 struct btrfs_trans_handle *trans,
908                                 u64 bytenr, u64 num_bytes,
909                                 struct btrfs_delayed_extent_op *extent_op)
910 {
911         struct btrfs_delayed_ref_head *head_ref;
912         struct btrfs_delayed_ref_root *delayed_refs;
913
914         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
915         if (!head_ref)
916                 return -ENOMEM;
917
918         head_ref->extent_op = extent_op;
919
920         delayed_refs = &trans->transaction->delayed_refs;
921         spin_lock(&delayed_refs->lock);
922
923         add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
924                              num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
925                              extent_op->is_data, NULL, NULL, NULL);
926
927         spin_unlock(&delayed_refs->lock);
928         return 0;
929 }
930
931 /*
932  * this does a simple search for the head node for a given extent.
933  * It must be called with the delayed ref spinlock held, and it returns
934  * the head node if any where found, or NULL if not.
935  */
936 struct btrfs_delayed_ref_head *
937 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
938 {
939         return find_ref_head(&delayed_refs->href_root, bytenr, 0);
940 }
941
942 void btrfs_delayed_ref_exit(void)
943 {
944         kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
945         kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
946         kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
947         kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
948 }
949
950 int btrfs_delayed_ref_init(void)
951 {
952         btrfs_delayed_ref_head_cachep = kmem_cache_create(
953                                 "btrfs_delayed_ref_head",
954                                 sizeof(struct btrfs_delayed_ref_head), 0,
955                                 SLAB_MEM_SPREAD, NULL);
956         if (!btrfs_delayed_ref_head_cachep)
957                 goto fail;
958
959         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
960                                 "btrfs_delayed_tree_ref",
961                                 sizeof(struct btrfs_delayed_tree_ref), 0,
962                                 SLAB_MEM_SPREAD, NULL);
963         if (!btrfs_delayed_tree_ref_cachep)
964                 goto fail;
965
966         btrfs_delayed_data_ref_cachep = kmem_cache_create(
967                                 "btrfs_delayed_data_ref",
968                                 sizeof(struct btrfs_delayed_data_ref), 0,
969                                 SLAB_MEM_SPREAD, NULL);
970         if (!btrfs_delayed_data_ref_cachep)
971                 goto fail;
972
973         btrfs_delayed_extent_op_cachep = kmem_cache_create(
974                                 "btrfs_delayed_extent_op",
975                                 sizeof(struct btrfs_delayed_extent_op), 0,
976                                 SLAB_MEM_SPREAD, NULL);
977         if (!btrfs_delayed_extent_op_cachep)
978                 goto fail;
979
980         return 0;
981 fail:
982         btrfs_delayed_ref_exit();
983         return -ENOMEM;
984 }