ed262d44ff8a3d302f7cef99aca8dd11d674c5bd
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
19 #include "tree-log.h"
20 #include "disk-io.h"
21 #include "print-tree.h"
22 #include "volumes.h"
23 #include "raid56.h"
24 #include "locking.h"
25 #include "free-space-cache.h"
26 #include "free-space-tree.h"
27 #include "math.h"
28 #include "sysfs.h"
29 #include "qgroup.h"
30 #include "ref-verify.h"
31
32 #undef SCRAMBLE_DELAYED_REFS
33
34 /*
35  * control flags for do_chunk_alloc's force field
36  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
37  * if we really need one.
38  *
39  * CHUNK_ALLOC_LIMITED means to only try and allocate one
40  * if we have very few chunks already allocated.  This is
41  * used as part of the clustering code to help make sure
42  * we have a good pool of storage to cluster in, without
43  * filling the FS with empty chunks
44  *
45  * CHUNK_ALLOC_FORCE means it must try to allocate one
46  *
47  */
48 enum {
49         CHUNK_ALLOC_NO_FORCE = 0,
50         CHUNK_ALLOC_LIMITED = 1,
51         CHUNK_ALLOC_FORCE = 2,
52 };
53
54 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
55                                struct btrfs_fs_info *fs_info,
56                                 struct btrfs_delayed_ref_node *node, u64 parent,
57                                 u64 root_objectid, u64 owner_objectid,
58                                 u64 owner_offset, int refs_to_drop,
59                                 struct btrfs_delayed_extent_op *extra_op);
60 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
61                                     struct extent_buffer *leaf,
62                                     struct btrfs_extent_item *ei);
63 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
64                                       struct btrfs_fs_info *fs_info,
65                                       u64 parent, u64 root_objectid,
66                                       u64 flags, u64 owner, u64 offset,
67                                       struct btrfs_key *ins, int ref_mod);
68 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
69                                      struct btrfs_fs_info *fs_info,
70                                      u64 parent, u64 root_objectid,
71                                      u64 flags, struct btrfs_disk_key *key,
72                                      int level, struct btrfs_key *ins);
73 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
74                           struct btrfs_fs_info *fs_info, u64 flags,
75                           int force);
76 static int find_next_key(struct btrfs_path *path, int level,
77                          struct btrfs_key *key);
78 static void dump_space_info(struct btrfs_fs_info *fs_info,
79                             struct btrfs_space_info *info, u64 bytes,
80                             int dump_block_groups);
81 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
82                                u64 num_bytes);
83 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
84                                      struct btrfs_space_info *space_info,
85                                      u64 num_bytes);
86 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
87                                      struct btrfs_space_info *space_info,
88                                      u64 num_bytes);
89
90 static noinline int
91 block_group_cache_done(struct btrfs_block_group_cache *cache)
92 {
93         smp_mb();
94         return cache->cached == BTRFS_CACHE_FINISHED ||
95                 cache->cached == BTRFS_CACHE_ERROR;
96 }
97
98 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
99 {
100         return (cache->flags & bits) == bits;
101 }
102
103 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
104 {
105         atomic_inc(&cache->count);
106 }
107
108 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
109 {
110         if (atomic_dec_and_test(&cache->count)) {
111                 WARN_ON(cache->pinned > 0);
112                 WARN_ON(cache->reserved > 0);
113
114                 /*
115                  * If not empty, someone is still holding mutex of
116                  * full_stripe_lock, which can only be released by caller.
117                  * And it will definitely cause use-after-free when caller
118                  * tries to release full stripe lock.
119                  *
120                  * No better way to resolve, but only to warn.
121                  */
122                 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
123                 kfree(cache->free_space_ctl);
124                 kfree(cache);
125         }
126 }
127
128 /*
129  * this adds the block group to the fs_info rb tree for the block group
130  * cache
131  */
132 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
133                                 struct btrfs_block_group_cache *block_group)
134 {
135         struct rb_node **p;
136         struct rb_node *parent = NULL;
137         struct btrfs_block_group_cache *cache;
138
139         spin_lock(&info->block_group_cache_lock);
140         p = &info->block_group_cache_tree.rb_node;
141
142         while (*p) {
143                 parent = *p;
144                 cache = rb_entry(parent, struct btrfs_block_group_cache,
145                                  cache_node);
146                 if (block_group->key.objectid < cache->key.objectid) {
147                         p = &(*p)->rb_left;
148                 } else if (block_group->key.objectid > cache->key.objectid) {
149                         p = &(*p)->rb_right;
150                 } else {
151                         spin_unlock(&info->block_group_cache_lock);
152                         return -EEXIST;
153                 }
154         }
155
156         rb_link_node(&block_group->cache_node, parent, p);
157         rb_insert_color(&block_group->cache_node,
158                         &info->block_group_cache_tree);
159
160         if (info->first_logical_byte > block_group->key.objectid)
161                 info->first_logical_byte = block_group->key.objectid;
162
163         spin_unlock(&info->block_group_cache_lock);
164
165         return 0;
166 }
167
168 /*
169  * This will return the block group at or after bytenr if contains is 0, else
170  * it will return the block group that contains the bytenr
171  */
172 static struct btrfs_block_group_cache *
173 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
174                               int contains)
175 {
176         struct btrfs_block_group_cache *cache, *ret = NULL;
177         struct rb_node *n;
178         u64 end, start;
179
180         spin_lock(&info->block_group_cache_lock);
181         n = info->block_group_cache_tree.rb_node;
182
183         while (n) {
184                 cache = rb_entry(n, struct btrfs_block_group_cache,
185                                  cache_node);
186                 end = cache->key.objectid + cache->key.offset - 1;
187                 start = cache->key.objectid;
188
189                 if (bytenr < start) {
190                         if (!contains && (!ret || start < ret->key.objectid))
191                                 ret = cache;
192                         n = n->rb_left;
193                 } else if (bytenr > start) {
194                         if (contains && bytenr <= end) {
195                                 ret = cache;
196                                 break;
197                         }
198                         n = n->rb_right;
199                 } else {
200                         ret = cache;
201                         break;
202                 }
203         }
204         if (ret) {
205                 btrfs_get_block_group(ret);
206                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
207                         info->first_logical_byte = ret->key.objectid;
208         }
209         spin_unlock(&info->block_group_cache_lock);
210
211         return ret;
212 }
213
214 static int add_excluded_extent(struct btrfs_fs_info *fs_info,
215                                u64 start, u64 num_bytes)
216 {
217         u64 end = start + num_bytes - 1;
218         set_extent_bits(&fs_info->freed_extents[0],
219                         start, end, EXTENT_UPTODATE);
220         set_extent_bits(&fs_info->freed_extents[1],
221                         start, end, EXTENT_UPTODATE);
222         return 0;
223 }
224
225 static void free_excluded_extents(struct btrfs_fs_info *fs_info,
226                                   struct btrfs_block_group_cache *cache)
227 {
228         u64 start, end;
229
230         start = cache->key.objectid;
231         end = start + cache->key.offset - 1;
232
233         clear_extent_bits(&fs_info->freed_extents[0],
234                           start, end, EXTENT_UPTODATE);
235         clear_extent_bits(&fs_info->freed_extents[1],
236                           start, end, EXTENT_UPTODATE);
237 }
238
239 static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
240                                  struct btrfs_block_group_cache *cache)
241 {
242         u64 bytenr;
243         u64 *logical;
244         int stripe_len;
245         int i, nr, ret;
246
247         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
248                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
249                 cache->bytes_super += stripe_len;
250                 ret = add_excluded_extent(fs_info, cache->key.objectid,
251                                           stripe_len);
252                 if (ret)
253                         return ret;
254         }
255
256         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
257                 bytenr = btrfs_sb_offset(i);
258                 ret = btrfs_rmap_block(fs_info, cache->key.objectid,
259                                        bytenr, 0, &logical, &nr, &stripe_len);
260                 if (ret)
261                         return ret;
262
263                 while (nr--) {
264                         u64 start, len;
265
266                         if (logical[nr] > cache->key.objectid +
267                             cache->key.offset)
268                                 continue;
269
270                         if (logical[nr] + stripe_len <= cache->key.objectid)
271                                 continue;
272
273                         start = logical[nr];
274                         if (start < cache->key.objectid) {
275                                 start = cache->key.objectid;
276                                 len = (logical[nr] + stripe_len) - start;
277                         } else {
278                                 len = min_t(u64, stripe_len,
279                                             cache->key.objectid +
280                                             cache->key.offset - start);
281                         }
282
283                         cache->bytes_super += len;
284                         ret = add_excluded_extent(fs_info, start, len);
285                         if (ret) {
286                                 kfree(logical);
287                                 return ret;
288                         }
289                 }
290
291                 kfree(logical);
292         }
293         return 0;
294 }
295
296 static struct btrfs_caching_control *
297 get_caching_control(struct btrfs_block_group_cache *cache)
298 {
299         struct btrfs_caching_control *ctl;
300
301         spin_lock(&cache->lock);
302         if (!cache->caching_ctl) {
303                 spin_unlock(&cache->lock);
304                 return NULL;
305         }
306
307         ctl = cache->caching_ctl;
308         refcount_inc(&ctl->count);
309         spin_unlock(&cache->lock);
310         return ctl;
311 }
312
313 static void put_caching_control(struct btrfs_caching_control *ctl)
314 {
315         if (refcount_dec_and_test(&ctl->count))
316                 kfree(ctl);
317 }
318
319 #ifdef CONFIG_BTRFS_DEBUG
320 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
321 {
322         struct btrfs_fs_info *fs_info = block_group->fs_info;
323         u64 start = block_group->key.objectid;
324         u64 len = block_group->key.offset;
325         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
326                 fs_info->nodesize : fs_info->sectorsize;
327         u64 step = chunk << 1;
328
329         while (len > chunk) {
330                 btrfs_remove_free_space(block_group, start, chunk);
331                 start += step;
332                 if (len < step)
333                         len = 0;
334                 else
335                         len -= step;
336         }
337 }
338 #endif
339
340 /*
341  * this is only called by cache_block_group, since we could have freed extents
342  * we need to check the pinned_extents for any extents that can't be used yet
343  * since their free space will be released as soon as the transaction commits.
344  */
345 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
346                        struct btrfs_fs_info *info, u64 start, u64 end)
347 {
348         u64 extent_start, extent_end, size, total_added = 0;
349         int ret;
350
351         while (start < end) {
352                 ret = find_first_extent_bit(info->pinned_extents, start,
353                                             &extent_start, &extent_end,
354                                             EXTENT_DIRTY | EXTENT_UPTODATE,
355                                             NULL);
356                 if (ret)
357                         break;
358
359                 if (extent_start <= start) {
360                         start = extent_end + 1;
361                 } else if (extent_start > start && extent_start < end) {
362                         size = extent_start - start;
363                         total_added += size;
364                         ret = btrfs_add_free_space(block_group, start,
365                                                    size);
366                         BUG_ON(ret); /* -ENOMEM or logic error */
367                         start = extent_end + 1;
368                 } else {
369                         break;
370                 }
371         }
372
373         if (start < end) {
374                 size = end - start;
375                 total_added += size;
376                 ret = btrfs_add_free_space(block_group, start, size);
377                 BUG_ON(ret); /* -ENOMEM or logic error */
378         }
379
380         return total_added;
381 }
382
383 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
384 {
385         struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
386         struct btrfs_fs_info *fs_info = block_group->fs_info;
387         struct btrfs_root *extent_root = fs_info->extent_root;
388         struct btrfs_path *path;
389         struct extent_buffer *leaf;
390         struct btrfs_key key;
391         u64 total_found = 0;
392         u64 last = 0;
393         u32 nritems;
394         int ret;
395         bool wakeup = true;
396
397         path = btrfs_alloc_path();
398         if (!path)
399                 return -ENOMEM;
400
401         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
402
403 #ifdef CONFIG_BTRFS_DEBUG
404         /*
405          * If we're fragmenting we don't want to make anybody think we can
406          * allocate from this block group until we've had a chance to fragment
407          * the free space.
408          */
409         if (btrfs_should_fragment_free_space(block_group))
410                 wakeup = false;
411 #endif
412         /*
413          * We don't want to deadlock with somebody trying to allocate a new
414          * extent for the extent root while also trying to search the extent
415          * root to add free space.  So we skip locking and search the commit
416          * root, since its read-only
417          */
418         path->skip_locking = 1;
419         path->search_commit_root = 1;
420         path->reada = READA_FORWARD;
421
422         key.objectid = last;
423         key.offset = 0;
424         key.type = BTRFS_EXTENT_ITEM_KEY;
425
426 next:
427         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
428         if (ret < 0)
429                 goto out;
430
431         leaf = path->nodes[0];
432         nritems = btrfs_header_nritems(leaf);
433
434         while (1) {
435                 if (btrfs_fs_closing(fs_info) > 1) {
436                         last = (u64)-1;
437                         break;
438                 }
439
440                 if (path->slots[0] < nritems) {
441                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
442                 } else {
443                         ret = find_next_key(path, 0, &key);
444                         if (ret)
445                                 break;
446
447                         if (need_resched() ||
448                             rwsem_is_contended(&fs_info->commit_root_sem)) {
449                                 if (wakeup)
450                                         caching_ctl->progress = last;
451                                 btrfs_release_path(path);
452                                 up_read(&fs_info->commit_root_sem);
453                                 mutex_unlock(&caching_ctl->mutex);
454                                 cond_resched();
455                                 mutex_lock(&caching_ctl->mutex);
456                                 down_read(&fs_info->commit_root_sem);
457                                 goto next;
458                         }
459
460                         ret = btrfs_next_leaf(extent_root, path);
461                         if (ret < 0)
462                                 goto out;
463                         if (ret)
464                                 break;
465                         leaf = path->nodes[0];
466                         nritems = btrfs_header_nritems(leaf);
467                         continue;
468                 }
469
470                 if (key.objectid < last) {
471                         key.objectid = last;
472                         key.offset = 0;
473                         key.type = BTRFS_EXTENT_ITEM_KEY;
474
475                         if (wakeup)
476                                 caching_ctl->progress = last;
477                         btrfs_release_path(path);
478                         goto next;
479                 }
480
481                 if (key.objectid < block_group->key.objectid) {
482                         path->slots[0]++;
483                         continue;
484                 }
485
486                 if (key.objectid >= block_group->key.objectid +
487                     block_group->key.offset)
488                         break;
489
490                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
491                     key.type == BTRFS_METADATA_ITEM_KEY) {
492                         total_found += add_new_free_space(block_group,
493                                                           fs_info, last,
494                                                           key.objectid);
495                         if (key.type == BTRFS_METADATA_ITEM_KEY)
496                                 last = key.objectid +
497                                         fs_info->nodesize;
498                         else
499                                 last = key.objectid + key.offset;
500
501                         if (total_found > CACHING_CTL_WAKE_UP) {
502                                 total_found = 0;
503                                 if (wakeup)
504                                         wake_up(&caching_ctl->wait);
505                         }
506                 }
507                 path->slots[0]++;
508         }
509         ret = 0;
510
511         total_found += add_new_free_space(block_group, fs_info, last,
512                                           block_group->key.objectid +
513                                           block_group->key.offset);
514         caching_ctl->progress = (u64)-1;
515
516 out:
517         btrfs_free_path(path);
518         return ret;
519 }
520
521 static noinline void caching_thread(struct btrfs_work *work)
522 {
523         struct btrfs_block_group_cache *block_group;
524         struct btrfs_fs_info *fs_info;
525         struct btrfs_caching_control *caching_ctl;
526         int ret;
527
528         caching_ctl = container_of(work, struct btrfs_caching_control, work);
529         block_group = caching_ctl->block_group;
530         fs_info = block_group->fs_info;
531
532         mutex_lock(&caching_ctl->mutex);
533         down_read(&fs_info->commit_root_sem);
534
535         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
536                 ret = load_free_space_tree(caching_ctl);
537         else
538                 ret = load_extent_tree_free(caching_ctl);
539
540         spin_lock(&block_group->lock);
541         block_group->caching_ctl = NULL;
542         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
543         spin_unlock(&block_group->lock);
544
545 #ifdef CONFIG_BTRFS_DEBUG
546         if (btrfs_should_fragment_free_space(block_group)) {
547                 u64 bytes_used;
548
549                 spin_lock(&block_group->space_info->lock);
550                 spin_lock(&block_group->lock);
551                 bytes_used = block_group->key.offset -
552                         btrfs_block_group_used(&block_group->item);
553                 block_group->space_info->bytes_used += bytes_used >> 1;
554                 spin_unlock(&block_group->lock);
555                 spin_unlock(&block_group->space_info->lock);
556                 fragment_free_space(block_group);
557         }
558 #endif
559
560         caching_ctl->progress = (u64)-1;
561
562         up_read(&fs_info->commit_root_sem);
563         free_excluded_extents(fs_info, block_group);
564         mutex_unlock(&caching_ctl->mutex);
565
566         wake_up(&caching_ctl->wait);
567
568         put_caching_control(caching_ctl);
569         btrfs_put_block_group(block_group);
570 }
571
572 static int cache_block_group(struct btrfs_block_group_cache *cache,
573                              int load_cache_only)
574 {
575         DEFINE_WAIT(wait);
576         struct btrfs_fs_info *fs_info = cache->fs_info;
577         struct btrfs_caching_control *caching_ctl;
578         int ret = 0;
579
580         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
581         if (!caching_ctl)
582                 return -ENOMEM;
583
584         INIT_LIST_HEAD(&caching_ctl->list);
585         mutex_init(&caching_ctl->mutex);
586         init_waitqueue_head(&caching_ctl->wait);
587         caching_ctl->block_group = cache;
588         caching_ctl->progress = cache->key.objectid;
589         refcount_set(&caching_ctl->count, 1);
590         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
591                         caching_thread, NULL, NULL);
592
593         spin_lock(&cache->lock);
594         /*
595          * This should be a rare occasion, but this could happen I think in the
596          * case where one thread starts to load the space cache info, and then
597          * some other thread starts a transaction commit which tries to do an
598          * allocation while the other thread is still loading the space cache
599          * info.  The previous loop should have kept us from choosing this block
600          * group, but if we've moved to the state where we will wait on caching
601          * block groups we need to first check if we're doing a fast load here,
602          * so we can wait for it to finish, otherwise we could end up allocating
603          * from a block group who's cache gets evicted for one reason or
604          * another.
605          */
606         while (cache->cached == BTRFS_CACHE_FAST) {
607                 struct btrfs_caching_control *ctl;
608
609                 ctl = cache->caching_ctl;
610                 refcount_inc(&ctl->count);
611                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
612                 spin_unlock(&cache->lock);
613
614                 schedule();
615
616                 finish_wait(&ctl->wait, &wait);
617                 put_caching_control(ctl);
618                 spin_lock(&cache->lock);
619         }
620
621         if (cache->cached != BTRFS_CACHE_NO) {
622                 spin_unlock(&cache->lock);
623                 kfree(caching_ctl);
624                 return 0;
625         }
626         WARN_ON(cache->caching_ctl);
627         cache->caching_ctl = caching_ctl;
628         cache->cached = BTRFS_CACHE_FAST;
629         spin_unlock(&cache->lock);
630
631         if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
632                 mutex_lock(&caching_ctl->mutex);
633                 ret = load_free_space_cache(fs_info, cache);
634
635                 spin_lock(&cache->lock);
636                 if (ret == 1) {
637                         cache->caching_ctl = NULL;
638                         cache->cached = BTRFS_CACHE_FINISHED;
639                         cache->last_byte_to_unpin = (u64)-1;
640                         caching_ctl->progress = (u64)-1;
641                 } else {
642                         if (load_cache_only) {
643                                 cache->caching_ctl = NULL;
644                                 cache->cached = BTRFS_CACHE_NO;
645                         } else {
646                                 cache->cached = BTRFS_CACHE_STARTED;
647                                 cache->has_caching_ctl = 1;
648                         }
649                 }
650                 spin_unlock(&cache->lock);
651 #ifdef CONFIG_BTRFS_DEBUG
652                 if (ret == 1 &&
653                     btrfs_should_fragment_free_space(cache)) {
654                         u64 bytes_used;
655
656                         spin_lock(&cache->space_info->lock);
657                         spin_lock(&cache->lock);
658                         bytes_used = cache->key.offset -
659                                 btrfs_block_group_used(&cache->item);
660                         cache->space_info->bytes_used += bytes_used >> 1;
661                         spin_unlock(&cache->lock);
662                         spin_unlock(&cache->space_info->lock);
663                         fragment_free_space(cache);
664                 }
665 #endif
666                 mutex_unlock(&caching_ctl->mutex);
667
668                 wake_up(&caching_ctl->wait);
669                 if (ret == 1) {
670                         put_caching_control(caching_ctl);
671                         free_excluded_extents(fs_info, cache);
672                         return 0;
673                 }
674         } else {
675                 /*
676                  * We're either using the free space tree or no caching at all.
677                  * Set cached to the appropriate value and wakeup any waiters.
678                  */
679                 spin_lock(&cache->lock);
680                 if (load_cache_only) {
681                         cache->caching_ctl = NULL;
682                         cache->cached = BTRFS_CACHE_NO;
683                 } else {
684                         cache->cached = BTRFS_CACHE_STARTED;
685                         cache->has_caching_ctl = 1;
686                 }
687                 spin_unlock(&cache->lock);
688                 wake_up(&caching_ctl->wait);
689         }
690
691         if (load_cache_only) {
692                 put_caching_control(caching_ctl);
693                 return 0;
694         }
695
696         down_write(&fs_info->commit_root_sem);
697         refcount_inc(&caching_ctl->count);
698         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
699         up_write(&fs_info->commit_root_sem);
700
701         btrfs_get_block_group(cache);
702
703         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
704
705         return ret;
706 }
707
708 /*
709  * return the block group that starts at or after bytenr
710  */
711 static struct btrfs_block_group_cache *
712 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
713 {
714         return block_group_cache_tree_search(info, bytenr, 0);
715 }
716
717 /*
718  * return the block group that contains the given bytenr
719  */
720 struct btrfs_block_group_cache *btrfs_lookup_block_group(
721                                                  struct btrfs_fs_info *info,
722                                                  u64 bytenr)
723 {
724         return block_group_cache_tree_search(info, bytenr, 1);
725 }
726
727 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
728                                                   u64 flags)
729 {
730         struct list_head *head = &info->space_info;
731         struct btrfs_space_info *found;
732
733         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
734
735         rcu_read_lock();
736         list_for_each_entry_rcu(found, head, list) {
737                 if (found->flags & flags) {
738                         rcu_read_unlock();
739                         return found;
740                 }
741         }
742         rcu_read_unlock();
743         return NULL;
744 }
745
746 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
747                              bool metadata, u64 root_objectid)
748 {
749         struct btrfs_space_info *space_info;
750         u64 flags;
751
752         if (metadata) {
753                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
754                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
755                 else
756                         flags = BTRFS_BLOCK_GROUP_METADATA;
757         } else {
758                 flags = BTRFS_BLOCK_GROUP_DATA;
759         }
760
761         space_info = __find_space_info(fs_info, flags);
762         ASSERT(space_info);
763         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
764 }
765
766 /*
767  * after adding space to the filesystem, we need to clear the full flags
768  * on all the space infos.
769  */
770 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
771 {
772         struct list_head *head = &info->space_info;
773         struct btrfs_space_info *found;
774
775         rcu_read_lock();
776         list_for_each_entry_rcu(found, head, list)
777                 found->full = 0;
778         rcu_read_unlock();
779 }
780
781 /* simple helper to search for an existing data extent at a given offset */
782 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
783 {
784         int ret;
785         struct btrfs_key key;
786         struct btrfs_path *path;
787
788         path = btrfs_alloc_path();
789         if (!path)
790                 return -ENOMEM;
791
792         key.objectid = start;
793         key.offset = len;
794         key.type = BTRFS_EXTENT_ITEM_KEY;
795         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
796         btrfs_free_path(path);
797         return ret;
798 }
799
800 /*
801  * helper function to lookup reference count and flags of a tree block.
802  *
803  * the head node for delayed ref is used to store the sum of all the
804  * reference count modifications queued up in the rbtree. the head
805  * node may also store the extent flags to set. This way you can check
806  * to see what the reference count and extent flags would be if all of
807  * the delayed refs are not processed.
808  */
809 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
810                              struct btrfs_fs_info *fs_info, u64 bytenr,
811                              u64 offset, int metadata, u64 *refs, u64 *flags)
812 {
813         struct btrfs_delayed_ref_head *head;
814         struct btrfs_delayed_ref_root *delayed_refs;
815         struct btrfs_path *path;
816         struct btrfs_extent_item *ei;
817         struct extent_buffer *leaf;
818         struct btrfs_key key;
819         u32 item_size;
820         u64 num_refs;
821         u64 extent_flags;
822         int ret;
823
824         /*
825          * If we don't have skinny metadata, don't bother doing anything
826          * different
827          */
828         if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
829                 offset = fs_info->nodesize;
830                 metadata = 0;
831         }
832
833         path = btrfs_alloc_path();
834         if (!path)
835                 return -ENOMEM;
836
837         if (!trans) {
838                 path->skip_locking = 1;
839                 path->search_commit_root = 1;
840         }
841
842 search_again:
843         key.objectid = bytenr;
844         key.offset = offset;
845         if (metadata)
846                 key.type = BTRFS_METADATA_ITEM_KEY;
847         else
848                 key.type = BTRFS_EXTENT_ITEM_KEY;
849
850         ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
851         if (ret < 0)
852                 goto out_free;
853
854         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
855                 if (path->slots[0]) {
856                         path->slots[0]--;
857                         btrfs_item_key_to_cpu(path->nodes[0], &key,
858                                               path->slots[0]);
859                         if (key.objectid == bytenr &&
860                             key.type == BTRFS_EXTENT_ITEM_KEY &&
861                             key.offset == fs_info->nodesize)
862                                 ret = 0;
863                 }
864         }
865
866         if (ret == 0) {
867                 leaf = path->nodes[0];
868                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
869                 if (item_size >= sizeof(*ei)) {
870                         ei = btrfs_item_ptr(leaf, path->slots[0],
871                                             struct btrfs_extent_item);
872                         num_refs = btrfs_extent_refs(leaf, ei);
873                         extent_flags = btrfs_extent_flags(leaf, ei);
874                 } else {
875 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
876                         struct btrfs_extent_item_v0 *ei0;
877                         BUG_ON(item_size != sizeof(*ei0));
878                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
879                                              struct btrfs_extent_item_v0);
880                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
881                         /* FIXME: this isn't correct for data */
882                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
883 #else
884                         BUG();
885 #endif
886                 }
887                 BUG_ON(num_refs == 0);
888         } else {
889                 num_refs = 0;
890                 extent_flags = 0;
891                 ret = 0;
892         }
893
894         if (!trans)
895                 goto out;
896
897         delayed_refs = &trans->transaction->delayed_refs;
898         spin_lock(&delayed_refs->lock);
899         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
900         if (head) {
901                 if (!mutex_trylock(&head->mutex)) {
902                         refcount_inc(&head->refs);
903                         spin_unlock(&delayed_refs->lock);
904
905                         btrfs_release_path(path);
906
907                         /*
908                          * Mutex was contended, block until it's released and try
909                          * again
910                          */
911                         mutex_lock(&head->mutex);
912                         mutex_unlock(&head->mutex);
913                         btrfs_put_delayed_ref_head(head);
914                         goto search_again;
915                 }
916                 spin_lock(&head->lock);
917                 if (head->extent_op && head->extent_op->update_flags)
918                         extent_flags |= head->extent_op->flags_to_set;
919                 else
920                         BUG_ON(num_refs == 0);
921
922                 num_refs += head->ref_mod;
923                 spin_unlock(&head->lock);
924                 mutex_unlock(&head->mutex);
925         }
926         spin_unlock(&delayed_refs->lock);
927 out:
928         WARN_ON(num_refs == 0);
929         if (refs)
930                 *refs = num_refs;
931         if (flags)
932                 *flags = extent_flags;
933 out_free:
934         btrfs_free_path(path);
935         return ret;
936 }
937
938 /*
939  * Back reference rules.  Back refs have three main goals:
940  *
941  * 1) differentiate between all holders of references to an extent so that
942  *    when a reference is dropped we can make sure it was a valid reference
943  *    before freeing the extent.
944  *
945  * 2) Provide enough information to quickly find the holders of an extent
946  *    if we notice a given block is corrupted or bad.
947  *
948  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
949  *    maintenance.  This is actually the same as #2, but with a slightly
950  *    different use case.
951  *
952  * There are two kinds of back refs. The implicit back refs is optimized
953  * for pointers in non-shared tree blocks. For a given pointer in a block,
954  * back refs of this kind provide information about the block's owner tree
955  * and the pointer's key. These information allow us to find the block by
956  * b-tree searching. The full back refs is for pointers in tree blocks not
957  * referenced by their owner trees. The location of tree block is recorded
958  * in the back refs. Actually the full back refs is generic, and can be
959  * used in all cases the implicit back refs is used. The major shortcoming
960  * of the full back refs is its overhead. Every time a tree block gets
961  * COWed, we have to update back refs entry for all pointers in it.
962  *
963  * For a newly allocated tree block, we use implicit back refs for
964  * pointers in it. This means most tree related operations only involve
965  * implicit back refs. For a tree block created in old transaction, the
966  * only way to drop a reference to it is COW it. So we can detect the
967  * event that tree block loses its owner tree's reference and do the
968  * back refs conversion.
969  *
970  * When a tree block is COWed through a tree, there are four cases:
971  *
972  * The reference count of the block is one and the tree is the block's
973  * owner tree. Nothing to do in this case.
974  *
975  * The reference count of the block is one and the tree is not the
976  * block's owner tree. In this case, full back refs is used for pointers
977  * in the block. Remove these full back refs, add implicit back refs for
978  * every pointers in the new block.
979  *
980  * The reference count of the block is greater than one and the tree is
981  * the block's owner tree. In this case, implicit back refs is used for
982  * pointers in the block. Add full back refs for every pointers in the
983  * block, increase lower level extents' reference counts. The original
984  * implicit back refs are entailed to the new block.
985  *
986  * The reference count of the block is greater than one and the tree is
987  * not the block's owner tree. Add implicit back refs for every pointer in
988  * the new block, increase lower level extents' reference count.
989  *
990  * Back Reference Key composing:
991  *
992  * The key objectid corresponds to the first byte in the extent,
993  * The key type is used to differentiate between types of back refs.
994  * There are different meanings of the key offset for different types
995  * of back refs.
996  *
997  * File extents can be referenced by:
998  *
999  * - multiple snapshots, subvolumes, or different generations in one subvol
1000  * - different files inside a single subvolume
1001  * - different offsets inside a file (bookend extents in file.c)
1002  *
1003  * The extent ref structure for the implicit back refs has fields for:
1004  *
1005  * - Objectid of the subvolume root
1006  * - objectid of the file holding the reference
1007  * - original offset in the file
1008  * - how many bookend extents
1009  *
1010  * The key offset for the implicit back refs is hash of the first
1011  * three fields.
1012  *
1013  * The extent ref structure for the full back refs has field for:
1014  *
1015  * - number of pointers in the tree leaf
1016  *
1017  * The key offset for the implicit back refs is the first byte of
1018  * the tree leaf
1019  *
1020  * When a file extent is allocated, The implicit back refs is used.
1021  * the fields are filled in:
1022  *
1023  *     (root_key.objectid, inode objectid, offset in file, 1)
1024  *
1025  * When a file extent is removed file truncation, we find the
1026  * corresponding implicit back refs and check the following fields:
1027  *
1028  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1029  *
1030  * Btree extents can be referenced by:
1031  *
1032  * - Different subvolumes
1033  *
1034  * Both the implicit back refs and the full back refs for tree blocks
1035  * only consist of key. The key offset for the implicit back refs is
1036  * objectid of block's owner tree. The key offset for the full back refs
1037  * is the first byte of parent block.
1038  *
1039  * When implicit back refs is used, information about the lowest key and
1040  * level of the tree block are required. These information are stored in
1041  * tree block info structure.
1042  */
1043
1044 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1045 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1046                                   struct btrfs_fs_info *fs_info,
1047                                   struct btrfs_path *path,
1048                                   u64 owner, u32 extra_size)
1049 {
1050         struct btrfs_root *root = fs_info->extent_root;
1051         struct btrfs_extent_item *item;
1052         struct btrfs_extent_item_v0 *ei0;
1053         struct btrfs_extent_ref_v0 *ref0;
1054         struct btrfs_tree_block_info *bi;
1055         struct extent_buffer *leaf;
1056         struct btrfs_key key;
1057         struct btrfs_key found_key;
1058         u32 new_size = sizeof(*item);
1059         u64 refs;
1060         int ret;
1061
1062         leaf = path->nodes[0];
1063         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1064
1065         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1066         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1067                              struct btrfs_extent_item_v0);
1068         refs = btrfs_extent_refs_v0(leaf, ei0);
1069
1070         if (owner == (u64)-1) {
1071                 while (1) {
1072                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1073                                 ret = btrfs_next_leaf(root, path);
1074                                 if (ret < 0)
1075                                         return ret;
1076                                 BUG_ON(ret > 0); /* Corruption */
1077                                 leaf = path->nodes[0];
1078                         }
1079                         btrfs_item_key_to_cpu(leaf, &found_key,
1080                                               path->slots[0]);
1081                         BUG_ON(key.objectid != found_key.objectid);
1082                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1083                                 path->slots[0]++;
1084                                 continue;
1085                         }
1086                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1087                                               struct btrfs_extent_ref_v0);
1088                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1089                         break;
1090                 }
1091         }
1092         btrfs_release_path(path);
1093
1094         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1095                 new_size += sizeof(*bi);
1096
1097         new_size -= sizeof(*ei0);
1098         ret = btrfs_search_slot(trans, root, &key, path,
1099                                 new_size + extra_size, 1);
1100         if (ret < 0)
1101                 return ret;
1102         BUG_ON(ret); /* Corruption */
1103
1104         btrfs_extend_item(fs_info, path, new_size);
1105
1106         leaf = path->nodes[0];
1107         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1108         btrfs_set_extent_refs(leaf, item, refs);
1109         /* FIXME: get real generation */
1110         btrfs_set_extent_generation(leaf, item, 0);
1111         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1112                 btrfs_set_extent_flags(leaf, item,
1113                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1114                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1115                 bi = (struct btrfs_tree_block_info *)(item + 1);
1116                 /* FIXME: get first key of the block */
1117                 memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
1118                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1119         } else {
1120                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1121         }
1122         btrfs_mark_buffer_dirty(leaf);
1123         return 0;
1124 }
1125 #endif
1126
1127 /*
1128  * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
1129  * is_data == BTRFS_REF_TYPE_DATA, data type is requried,
1130  * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
1131  */
1132 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
1133                                      struct btrfs_extent_inline_ref *iref,
1134                                      enum btrfs_inline_ref_type is_data)
1135 {
1136         int type = btrfs_extent_inline_ref_type(eb, iref);
1137         u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
1138
1139         if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1140             type == BTRFS_SHARED_BLOCK_REF_KEY ||
1141             type == BTRFS_SHARED_DATA_REF_KEY ||
1142             type == BTRFS_EXTENT_DATA_REF_KEY) {
1143                 if (is_data == BTRFS_REF_TYPE_BLOCK) {
1144                         if (type == BTRFS_TREE_BLOCK_REF_KEY)
1145                                 return type;
1146                         if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1147                                 ASSERT(eb->fs_info);
1148                                 /*
1149                                  * Every shared one has parent tree
1150                                  * block, which must be aligned to
1151                                  * nodesize.
1152                                  */
1153                                 if (offset &&
1154                                     IS_ALIGNED(offset, eb->fs_info->nodesize))
1155                                         return type;
1156                         }
1157                 } else if (is_data == BTRFS_REF_TYPE_DATA) {
1158                         if (type == BTRFS_EXTENT_DATA_REF_KEY)
1159                                 return type;
1160                         if (type == BTRFS_SHARED_DATA_REF_KEY) {
1161                                 ASSERT(eb->fs_info);
1162                                 /*
1163                                  * Every shared one has parent tree
1164                                  * block, which must be aligned to
1165                                  * nodesize.
1166                                  */
1167                                 if (offset &&
1168                                     IS_ALIGNED(offset, eb->fs_info->nodesize))
1169                                         return type;
1170                         }
1171                 } else {
1172                         ASSERT(is_data == BTRFS_REF_TYPE_ANY);
1173                         return type;
1174                 }
1175         }
1176
1177         btrfs_print_leaf((struct extent_buffer *)eb);
1178         btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
1179                   eb->start, type);
1180         WARN_ON(1);
1181
1182         return BTRFS_REF_TYPE_INVALID;
1183 }
1184
1185 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1186 {
1187         u32 high_crc = ~(u32)0;
1188         u32 low_crc = ~(u32)0;
1189         __le64 lenum;
1190
1191         lenum = cpu_to_le64(root_objectid);
1192         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1193         lenum = cpu_to_le64(owner);
1194         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1195         lenum = cpu_to_le64(offset);
1196         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1197
1198         return ((u64)high_crc << 31) ^ (u64)low_crc;
1199 }
1200
1201 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1202                                      struct btrfs_extent_data_ref *ref)
1203 {
1204         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1205                                     btrfs_extent_data_ref_objectid(leaf, ref),
1206                                     btrfs_extent_data_ref_offset(leaf, ref));
1207 }
1208
1209 static int match_extent_data_ref(struct extent_buffer *leaf,
1210                                  struct btrfs_extent_data_ref *ref,
1211                                  u64 root_objectid, u64 owner, u64 offset)
1212 {
1213         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1214             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1215             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1216                 return 0;
1217         return 1;
1218 }
1219
1220 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1221                                            struct btrfs_fs_info *fs_info,
1222                                            struct btrfs_path *path,
1223                                            u64 bytenr, u64 parent,
1224                                            u64 root_objectid,
1225                                            u64 owner, u64 offset)
1226 {
1227         struct btrfs_root *root = fs_info->extent_root;
1228         struct btrfs_key key;
1229         struct btrfs_extent_data_ref *ref;
1230         struct extent_buffer *leaf;
1231         u32 nritems;
1232         int ret;
1233         int recow;
1234         int err = -ENOENT;
1235
1236         key.objectid = bytenr;
1237         if (parent) {
1238                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1239                 key.offset = parent;
1240         } else {
1241                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1242                 key.offset = hash_extent_data_ref(root_objectid,
1243                                                   owner, offset);
1244         }
1245 again:
1246         recow = 0;
1247         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1248         if (ret < 0) {
1249                 err = ret;
1250                 goto fail;
1251         }
1252
1253         if (parent) {
1254                 if (!ret)
1255                         return 0;
1256 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1257                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1258                 btrfs_release_path(path);
1259                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1260                 if (ret < 0) {
1261                         err = ret;
1262                         goto fail;
1263                 }
1264                 if (!ret)
1265                         return 0;
1266 #endif
1267                 goto fail;
1268         }
1269
1270         leaf = path->nodes[0];
1271         nritems = btrfs_header_nritems(leaf);
1272         while (1) {
1273                 if (path->slots[0] >= nritems) {
1274                         ret = btrfs_next_leaf(root, path);
1275                         if (ret < 0)
1276                                 err = ret;
1277                         if (ret)
1278                                 goto fail;
1279
1280                         leaf = path->nodes[0];
1281                         nritems = btrfs_header_nritems(leaf);
1282                         recow = 1;
1283                 }
1284
1285                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1286                 if (key.objectid != bytenr ||
1287                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1288                         goto fail;
1289
1290                 ref = btrfs_item_ptr(leaf, path->slots[0],
1291                                      struct btrfs_extent_data_ref);
1292
1293                 if (match_extent_data_ref(leaf, ref, root_objectid,
1294                                           owner, offset)) {
1295                         if (recow) {
1296                                 btrfs_release_path(path);
1297                                 goto again;
1298                         }
1299                         err = 0;
1300                         break;
1301                 }
1302                 path->slots[0]++;
1303         }
1304 fail:
1305         return err;
1306 }
1307
1308 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1309                                            struct btrfs_fs_info *fs_info,
1310                                            struct btrfs_path *path,
1311                                            u64 bytenr, u64 parent,
1312                                            u64 root_objectid, u64 owner,
1313                                            u64 offset, int refs_to_add)
1314 {
1315         struct btrfs_root *root = fs_info->extent_root;
1316         struct btrfs_key key;
1317         struct extent_buffer *leaf;
1318         u32 size;
1319         u32 num_refs;
1320         int ret;
1321
1322         key.objectid = bytenr;
1323         if (parent) {
1324                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1325                 key.offset = parent;
1326                 size = sizeof(struct btrfs_shared_data_ref);
1327         } else {
1328                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1329                 key.offset = hash_extent_data_ref(root_objectid,
1330                                                   owner, offset);
1331                 size = sizeof(struct btrfs_extent_data_ref);
1332         }
1333
1334         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1335         if (ret && ret != -EEXIST)
1336                 goto fail;
1337
1338         leaf = path->nodes[0];
1339         if (parent) {
1340                 struct btrfs_shared_data_ref *ref;
1341                 ref = btrfs_item_ptr(leaf, path->slots[0],
1342                                      struct btrfs_shared_data_ref);
1343                 if (ret == 0) {
1344                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1345                 } else {
1346                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1347                         num_refs += refs_to_add;
1348                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1349                 }
1350         } else {
1351                 struct btrfs_extent_data_ref *ref;
1352                 while (ret == -EEXIST) {
1353                         ref = btrfs_item_ptr(leaf, path->slots[0],
1354                                              struct btrfs_extent_data_ref);
1355                         if (match_extent_data_ref(leaf, ref, root_objectid,
1356                                                   owner, offset))
1357                                 break;
1358                         btrfs_release_path(path);
1359                         key.offset++;
1360                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1361                                                       size);
1362                         if (ret && ret != -EEXIST)
1363                                 goto fail;
1364
1365                         leaf = path->nodes[0];
1366                 }
1367                 ref = btrfs_item_ptr(leaf, path->slots[0],
1368                                      struct btrfs_extent_data_ref);
1369                 if (ret == 0) {
1370                         btrfs_set_extent_data_ref_root(leaf, ref,
1371                                                        root_objectid);
1372                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1373                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1374                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1375                 } else {
1376                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1377                         num_refs += refs_to_add;
1378                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1379                 }
1380         }
1381         btrfs_mark_buffer_dirty(leaf);
1382         ret = 0;
1383 fail:
1384         btrfs_release_path(path);
1385         return ret;
1386 }
1387
1388 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1389                                            struct btrfs_fs_info *fs_info,
1390                                            struct btrfs_path *path,
1391                                            int refs_to_drop, int *last_ref)
1392 {
1393         struct btrfs_key key;
1394         struct btrfs_extent_data_ref *ref1 = NULL;
1395         struct btrfs_shared_data_ref *ref2 = NULL;
1396         struct extent_buffer *leaf;
1397         u32 num_refs = 0;
1398         int ret = 0;
1399
1400         leaf = path->nodes[0];
1401         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1402
1403         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1404                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1405                                       struct btrfs_extent_data_ref);
1406                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1407         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1408                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1409                                       struct btrfs_shared_data_ref);
1410                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1411 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1412         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1413                 struct btrfs_extent_ref_v0 *ref0;
1414                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1415                                       struct btrfs_extent_ref_v0);
1416                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1417 #endif
1418         } else {
1419                 BUG();
1420         }
1421
1422         BUG_ON(num_refs < refs_to_drop);
1423         num_refs -= refs_to_drop;
1424
1425         if (num_refs == 0) {
1426                 ret = btrfs_del_item(trans, fs_info->extent_root, path);
1427                 *last_ref = 1;
1428         } else {
1429                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1430                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1431                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1432                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1433 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1434                 else {
1435                         struct btrfs_extent_ref_v0 *ref0;
1436                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1437                                         struct btrfs_extent_ref_v0);
1438                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1439                 }
1440 #endif
1441                 btrfs_mark_buffer_dirty(leaf);
1442         }
1443         return ret;
1444 }
1445
1446 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1447                                           struct btrfs_extent_inline_ref *iref)
1448 {
1449         struct btrfs_key key;
1450         struct extent_buffer *leaf;
1451         struct btrfs_extent_data_ref *ref1;
1452         struct btrfs_shared_data_ref *ref2;
1453         u32 num_refs = 0;
1454         int type;
1455
1456         leaf = path->nodes[0];
1457         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1458         if (iref) {
1459                 /*
1460                  * If type is invalid, we should have bailed out earlier than
1461                  * this call.
1462                  */
1463                 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
1464                 ASSERT(type != BTRFS_REF_TYPE_INVALID);
1465                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1466                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1467                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1468                 } else {
1469                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1470                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1471                 }
1472         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1473                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1474                                       struct btrfs_extent_data_ref);
1475                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1476         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1477                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1478                                       struct btrfs_shared_data_ref);
1479                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1480 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1481         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1482                 struct btrfs_extent_ref_v0 *ref0;
1483                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1484                                       struct btrfs_extent_ref_v0);
1485                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1486 #endif
1487         } else {
1488                 WARN_ON(1);
1489         }
1490         return num_refs;
1491 }
1492
1493 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1494                                           struct btrfs_fs_info *fs_info,
1495                                           struct btrfs_path *path,
1496                                           u64 bytenr, u64 parent,
1497                                           u64 root_objectid)
1498 {
1499         struct btrfs_root *root = fs_info->extent_root;
1500         struct btrfs_key key;
1501         int ret;
1502
1503         key.objectid = bytenr;
1504         if (parent) {
1505                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1506                 key.offset = parent;
1507         } else {
1508                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1509                 key.offset = root_objectid;
1510         }
1511
1512         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1513         if (ret > 0)
1514                 ret = -ENOENT;
1515 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1516         if (ret == -ENOENT && parent) {
1517                 btrfs_release_path(path);
1518                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1519                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1520                 if (ret > 0)
1521                         ret = -ENOENT;
1522         }
1523 #endif
1524         return ret;
1525 }
1526
1527 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1528                                           struct btrfs_fs_info *fs_info,
1529                                           struct btrfs_path *path,
1530                                           u64 bytenr, u64 parent,
1531                                           u64 root_objectid)
1532 {
1533         struct btrfs_key key;
1534         int ret;
1535
1536         key.objectid = bytenr;
1537         if (parent) {
1538                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1539                 key.offset = parent;
1540         } else {
1541                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1542                 key.offset = root_objectid;
1543         }
1544
1545         ret = btrfs_insert_empty_item(trans, fs_info->extent_root,
1546                                       path, &key, 0);
1547         btrfs_release_path(path);
1548         return ret;
1549 }
1550
1551 static inline int extent_ref_type(u64 parent, u64 owner)
1552 {
1553         int type;
1554         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1555                 if (parent > 0)
1556                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1557                 else
1558                         type = BTRFS_TREE_BLOCK_REF_KEY;
1559         } else {
1560                 if (parent > 0)
1561                         type = BTRFS_SHARED_DATA_REF_KEY;
1562                 else
1563                         type = BTRFS_EXTENT_DATA_REF_KEY;
1564         }
1565         return type;
1566 }
1567
1568 static int find_next_key(struct btrfs_path *path, int level,
1569                          struct btrfs_key *key)
1570
1571 {
1572         for (; level < BTRFS_MAX_LEVEL; level++) {
1573                 if (!path->nodes[level])
1574                         break;
1575                 if (path->slots[level] + 1 >=
1576                     btrfs_header_nritems(path->nodes[level]))
1577                         continue;
1578                 if (level == 0)
1579                         btrfs_item_key_to_cpu(path->nodes[level], key,
1580                                               path->slots[level] + 1);
1581                 else
1582                         btrfs_node_key_to_cpu(path->nodes[level], key,
1583                                               path->slots[level] + 1);
1584                 return 0;
1585         }
1586         return 1;
1587 }
1588
1589 /*
1590  * look for inline back ref. if back ref is found, *ref_ret is set
1591  * to the address of inline back ref, and 0 is returned.
1592  *
1593  * if back ref isn't found, *ref_ret is set to the address where it
1594  * should be inserted, and -ENOENT is returned.
1595  *
1596  * if insert is true and there are too many inline back refs, the path
1597  * points to the extent item, and -EAGAIN is returned.
1598  *
1599  * NOTE: inline back refs are ordered in the same way that back ref
1600  *       items in the tree are ordered.
1601  */
1602 static noinline_for_stack
1603 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1604                                  struct btrfs_fs_info *fs_info,
1605                                  struct btrfs_path *path,
1606                                  struct btrfs_extent_inline_ref **ref_ret,
1607                                  u64 bytenr, u64 num_bytes,
1608                                  u64 parent, u64 root_objectid,
1609                                  u64 owner, u64 offset, int insert)
1610 {
1611         struct btrfs_root *root = fs_info->extent_root;
1612         struct btrfs_key key;
1613         struct extent_buffer *leaf;
1614         struct btrfs_extent_item *ei;
1615         struct btrfs_extent_inline_ref *iref;
1616         u64 flags;
1617         u64 item_size;
1618         unsigned long ptr;
1619         unsigned long end;
1620         int extra_size;
1621         int type;
1622         int want;
1623         int ret;
1624         int err = 0;
1625         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
1626         int needed;
1627
1628         key.objectid = bytenr;
1629         key.type = BTRFS_EXTENT_ITEM_KEY;
1630         key.offset = num_bytes;
1631
1632         want = extent_ref_type(parent, owner);
1633         if (insert) {
1634                 extra_size = btrfs_extent_inline_ref_size(want);
1635                 path->keep_locks = 1;
1636         } else
1637                 extra_size = -1;
1638
1639         /*
1640          * Owner is our parent level, so we can just add one to get the level
1641          * for the block we are interested in.
1642          */
1643         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1644                 key.type = BTRFS_METADATA_ITEM_KEY;
1645                 key.offset = owner;
1646         }
1647
1648 again:
1649         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1650         if (ret < 0) {
1651                 err = ret;
1652                 goto out;
1653         }
1654
1655         /*
1656          * We may be a newly converted file system which still has the old fat
1657          * extent entries for metadata, so try and see if we have one of those.
1658          */
1659         if (ret > 0 && skinny_metadata) {
1660                 skinny_metadata = false;
1661                 if (path->slots[0]) {
1662                         path->slots[0]--;
1663                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1664                                               path->slots[0]);
1665                         if (key.objectid == bytenr &&
1666                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1667                             key.offset == num_bytes)
1668                                 ret = 0;
1669                 }
1670                 if (ret) {
1671                         key.objectid = bytenr;
1672                         key.type = BTRFS_EXTENT_ITEM_KEY;
1673                         key.offset = num_bytes;
1674                         btrfs_release_path(path);
1675                         goto again;
1676                 }
1677         }
1678
1679         if (ret && !insert) {
1680                 err = -ENOENT;
1681                 goto out;
1682         } else if (WARN_ON(ret)) {
1683                 err = -EIO;
1684                 goto out;
1685         }
1686
1687         leaf = path->nodes[0];
1688         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1689 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1690         if (item_size < sizeof(*ei)) {
1691                 if (!insert) {
1692                         err = -ENOENT;
1693                         goto out;
1694                 }
1695                 ret = convert_extent_item_v0(trans, fs_info, path, owner,
1696                                              extra_size);
1697                 if (ret < 0) {
1698                         err = ret;
1699                         goto out;
1700                 }
1701                 leaf = path->nodes[0];
1702                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1703         }
1704 #endif
1705         BUG_ON(item_size < sizeof(*ei));
1706
1707         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1708         flags = btrfs_extent_flags(leaf, ei);
1709
1710         ptr = (unsigned long)(ei + 1);
1711         end = (unsigned long)ei + item_size;
1712
1713         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1714                 ptr += sizeof(struct btrfs_tree_block_info);
1715                 BUG_ON(ptr > end);
1716         }
1717
1718         if (owner >= BTRFS_FIRST_FREE_OBJECTID)
1719                 needed = BTRFS_REF_TYPE_DATA;
1720         else
1721                 needed = BTRFS_REF_TYPE_BLOCK;
1722
1723         err = -ENOENT;
1724         while (1) {
1725                 if (ptr >= end) {
1726                         WARN_ON(ptr > end);
1727                         break;
1728                 }
1729                 iref = (struct btrfs_extent_inline_ref *)ptr;
1730                 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
1731                 if (type == BTRFS_REF_TYPE_INVALID) {
1732                         err = -EINVAL;
1733                         goto out;
1734                 }
1735
1736                 if (want < type)
1737                         break;
1738                 if (want > type) {
1739                         ptr += btrfs_extent_inline_ref_size(type);
1740                         continue;
1741                 }
1742
1743                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1744                         struct btrfs_extent_data_ref *dref;
1745                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1746                         if (match_extent_data_ref(leaf, dref, root_objectid,
1747                                                   owner, offset)) {
1748                                 err = 0;
1749                                 break;
1750                         }
1751                         if (hash_extent_data_ref_item(leaf, dref) <
1752                             hash_extent_data_ref(root_objectid, owner, offset))
1753                                 break;
1754                 } else {
1755                         u64 ref_offset;
1756                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1757                         if (parent > 0) {
1758                                 if (parent == ref_offset) {
1759                                         err = 0;
1760                                         break;
1761                                 }
1762                                 if (ref_offset < parent)
1763                                         break;
1764                         } else {
1765                                 if (root_objectid == ref_offset) {
1766                                         err = 0;
1767                                         break;
1768                                 }
1769                                 if (ref_offset < root_objectid)
1770                                         break;
1771                         }
1772                 }
1773                 ptr += btrfs_extent_inline_ref_size(type);
1774         }
1775         if (err == -ENOENT && insert) {
1776                 if (item_size + extra_size >=
1777                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1778                         err = -EAGAIN;
1779                         goto out;
1780                 }
1781                 /*
1782                  * To add new inline back ref, we have to make sure
1783                  * there is no corresponding back ref item.
1784                  * For simplicity, we just do not add new inline back
1785                  * ref if there is any kind of item for this block
1786                  */
1787                 if (find_next_key(path, 0, &key) == 0 &&
1788                     key.objectid == bytenr &&
1789                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1790                         err = -EAGAIN;
1791                         goto out;
1792                 }
1793         }
1794         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1795 out:
1796         if (insert) {
1797                 path->keep_locks = 0;
1798                 btrfs_unlock_up_safe(path, 1);
1799         }
1800         return err;
1801 }
1802
1803 /*
1804  * helper to add new inline back ref
1805  */
1806 static noinline_for_stack
1807 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
1808                                  struct btrfs_path *path,
1809                                  struct btrfs_extent_inline_ref *iref,
1810                                  u64 parent, u64 root_objectid,
1811                                  u64 owner, u64 offset, int refs_to_add,
1812                                  struct btrfs_delayed_extent_op *extent_op)
1813 {
1814         struct extent_buffer *leaf;
1815         struct btrfs_extent_item *ei;
1816         unsigned long ptr;
1817         unsigned long end;
1818         unsigned long item_offset;
1819         u64 refs;
1820         int size;
1821         int type;
1822
1823         leaf = path->nodes[0];
1824         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1825         item_offset = (unsigned long)iref - (unsigned long)ei;
1826
1827         type = extent_ref_type(parent, owner);
1828         size = btrfs_extent_inline_ref_size(type);
1829
1830         btrfs_extend_item(fs_info, path, size);
1831
1832         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1833         refs = btrfs_extent_refs(leaf, ei);
1834         refs += refs_to_add;
1835         btrfs_set_extent_refs(leaf, ei, refs);
1836         if (extent_op)
1837                 __run_delayed_extent_op(extent_op, leaf, ei);
1838
1839         ptr = (unsigned long)ei + item_offset;
1840         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1841         if (ptr < end - size)
1842                 memmove_extent_buffer(leaf, ptr + size, ptr,
1843                                       end - size - ptr);
1844
1845         iref = (struct btrfs_extent_inline_ref *)ptr;
1846         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1847         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1848                 struct btrfs_extent_data_ref *dref;
1849                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1850                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1851                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1852                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1853                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1854         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1855                 struct btrfs_shared_data_ref *sref;
1856                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1857                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1858                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1859         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1860                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1861         } else {
1862                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1863         }
1864         btrfs_mark_buffer_dirty(leaf);
1865 }
1866
1867 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1868                                  struct btrfs_fs_info *fs_info,
1869                                  struct btrfs_path *path,
1870                                  struct btrfs_extent_inline_ref **ref_ret,
1871                                  u64 bytenr, u64 num_bytes, u64 parent,
1872                                  u64 root_objectid, u64 owner, u64 offset)
1873 {
1874         int ret;
1875
1876         ret = lookup_inline_extent_backref(trans, fs_info, path, ref_ret,
1877                                            bytenr, num_bytes, parent,
1878                                            root_objectid, owner, offset, 0);
1879         if (ret != -ENOENT)
1880                 return ret;
1881
1882         btrfs_release_path(path);
1883         *ref_ret = NULL;
1884
1885         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1886                 ret = lookup_tree_block_ref(trans, fs_info, path, bytenr,
1887                                             parent, root_objectid);
1888         } else {
1889                 ret = lookup_extent_data_ref(trans, fs_info, path, bytenr,
1890                                              parent, root_objectid, owner,
1891                                              offset);
1892         }
1893         return ret;
1894 }
1895
1896 /*
1897  * helper to update/remove inline back ref
1898  */
1899 static noinline_for_stack
1900 void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
1901                                   struct btrfs_path *path,
1902                                   struct btrfs_extent_inline_ref *iref,
1903                                   int refs_to_mod,
1904                                   struct btrfs_delayed_extent_op *extent_op,
1905                                   int *last_ref)
1906 {
1907         struct extent_buffer *leaf;
1908         struct btrfs_extent_item *ei;
1909         struct btrfs_extent_data_ref *dref = NULL;
1910         struct btrfs_shared_data_ref *sref = NULL;
1911         unsigned long ptr;
1912         unsigned long end;
1913         u32 item_size;
1914         int size;
1915         int type;
1916         u64 refs;
1917
1918         leaf = path->nodes[0];
1919         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1920         refs = btrfs_extent_refs(leaf, ei);
1921         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1922         refs += refs_to_mod;
1923         btrfs_set_extent_refs(leaf, ei, refs);
1924         if (extent_op)
1925                 __run_delayed_extent_op(extent_op, leaf, ei);
1926
1927         /*
1928          * If type is invalid, we should have bailed out after
1929          * lookup_inline_extent_backref().
1930          */
1931         type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1932         ASSERT(type != BTRFS_REF_TYPE_INVALID);
1933
1934         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1935                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1936                 refs = btrfs_extent_data_ref_count(leaf, dref);
1937         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1938                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1939                 refs = btrfs_shared_data_ref_count(leaf, sref);
1940         } else {
1941                 refs = 1;
1942                 BUG_ON(refs_to_mod != -1);
1943         }
1944
1945         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1946         refs += refs_to_mod;
1947
1948         if (refs > 0) {
1949                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1950                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1951                 else
1952                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1953         } else {
1954                 *last_ref = 1;
1955                 size =  btrfs_extent_inline_ref_size(type);
1956                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1957                 ptr = (unsigned long)iref;
1958                 end = (unsigned long)ei + item_size;
1959                 if (ptr + size < end)
1960                         memmove_extent_buffer(leaf, ptr, ptr + size,
1961                                               end - ptr - size);
1962                 item_size -= size;
1963                 btrfs_truncate_item(fs_info, path, item_size, 1);
1964         }
1965         btrfs_mark_buffer_dirty(leaf);
1966 }
1967
1968 static noinline_for_stack
1969 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1970                                  struct btrfs_fs_info *fs_info,
1971                                  struct btrfs_path *path,
1972                                  u64 bytenr, u64 num_bytes, u64 parent,
1973                                  u64 root_objectid, u64 owner,
1974                                  u64 offset, int refs_to_add,
1975                                  struct btrfs_delayed_extent_op *extent_op)
1976 {
1977         struct btrfs_extent_inline_ref *iref;
1978         int ret;
1979
1980         ret = lookup_inline_extent_backref(trans, fs_info, path, &iref,
1981                                            bytenr, num_bytes, parent,
1982                                            root_objectid, owner, offset, 1);
1983         if (ret == 0) {
1984                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1985                 update_inline_extent_backref(fs_info, path, iref,
1986                                              refs_to_add, extent_op, NULL);
1987         } else if (ret == -ENOENT) {
1988                 setup_inline_extent_backref(fs_info, path, iref, parent,
1989                                             root_objectid, owner, offset,
1990                                             refs_to_add, extent_op);
1991                 ret = 0;
1992         }
1993         return ret;
1994 }
1995
1996 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1997                                  struct btrfs_fs_info *fs_info,
1998                                  struct btrfs_path *path,
1999                                  u64 bytenr, u64 parent, u64 root_objectid,
2000                                  u64 owner, u64 offset, int refs_to_add)
2001 {
2002         int ret;
2003         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2004                 BUG_ON(refs_to_add != 1);
2005                 ret = insert_tree_block_ref(trans, fs_info, path, bytenr,
2006                                             parent, root_objectid);
2007         } else {
2008                 ret = insert_extent_data_ref(trans, fs_info, path, bytenr,
2009                                              parent, root_objectid,
2010                                              owner, offset, refs_to_add);
2011         }
2012         return ret;
2013 }
2014
2015 static int remove_extent_backref(struct btrfs_trans_handle *trans,
2016                                  struct btrfs_fs_info *fs_info,
2017                                  struct btrfs_path *path,
2018                                  struct btrfs_extent_inline_ref *iref,
2019                                  int refs_to_drop, int is_data, int *last_ref)
2020 {
2021         int ret = 0;
2022
2023         BUG_ON(!is_data && refs_to_drop != 1);
2024         if (iref) {
2025                 update_inline_extent_backref(fs_info, path, iref,
2026                                              -refs_to_drop, NULL, last_ref);
2027         } else if (is_data) {
2028                 ret = remove_extent_data_ref(trans, fs_info, path, refs_to_drop,
2029                                              last_ref);
2030         } else {
2031                 *last_ref = 1;
2032                 ret = btrfs_del_item(trans, fs_info->extent_root, path);
2033         }
2034         return ret;
2035 }
2036
2037 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
2038 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
2039                                u64 *discarded_bytes)
2040 {
2041         int j, ret = 0;
2042         u64 bytes_left, end;
2043         u64 aligned_start = ALIGN(start, 1 << 9);
2044
2045         if (WARN_ON(start != aligned_start)) {
2046                 len -= aligned_start - start;
2047                 len = round_down(len, 1 << 9);
2048                 start = aligned_start;
2049         }
2050
2051         *discarded_bytes = 0;
2052
2053         if (!len)
2054                 return 0;
2055
2056         end = start + len;
2057         bytes_left = len;
2058
2059         /* Skip any superblocks on this device. */
2060         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
2061                 u64 sb_start = btrfs_sb_offset(j);
2062                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
2063                 u64 size = sb_start - start;
2064
2065                 if (!in_range(sb_start, start, bytes_left) &&
2066                     !in_range(sb_end, start, bytes_left) &&
2067                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
2068                         continue;
2069
2070                 /*
2071                  * Superblock spans beginning of range.  Adjust start and
2072                  * try again.
2073                  */
2074                 if (sb_start <= start) {
2075                         start += sb_end - start;
2076                         if (start > end) {
2077                                 bytes_left = 0;
2078                                 break;
2079                         }
2080                         bytes_left = end - start;
2081                         continue;
2082                 }
2083
2084                 if (size) {
2085                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
2086                                                    GFP_NOFS, 0);
2087                         if (!ret)
2088                                 *discarded_bytes += size;
2089                         else if (ret != -EOPNOTSUPP)
2090                                 return ret;
2091                 }
2092
2093                 start = sb_end;
2094                 if (start > end) {
2095                         bytes_left = 0;
2096                         break;
2097                 }
2098                 bytes_left = end - start;
2099         }
2100
2101         if (bytes_left) {
2102                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2103                                            GFP_NOFS, 0);
2104                 if (!ret)
2105                         *discarded_bytes += bytes_left;
2106         }
2107         return ret;
2108 }
2109
2110 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
2111                          u64 num_bytes, u64 *actual_bytes)
2112 {
2113         int ret;
2114         u64 discarded_bytes = 0;
2115         struct btrfs_bio *bbio = NULL;
2116
2117
2118         /*
2119          * Avoid races with device replace and make sure our bbio has devices
2120          * associated to its stripes that don't go away while we are discarding.
2121          */
2122         btrfs_bio_counter_inc_blocked(fs_info);
2123         /* Tell the block device(s) that the sectors can be discarded */
2124         ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
2125                               &bbio, 0);
2126         /* Error condition is -ENOMEM */
2127         if (!ret) {
2128                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2129                 int i;
2130
2131
2132                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2133                         u64 bytes;
2134                         struct request_queue *req_q;
2135
2136                         if (!stripe->dev->bdev) {
2137                                 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
2138                                 continue;
2139                         }
2140                         req_q = bdev_get_queue(stripe->dev->bdev);
2141                         if (!blk_queue_discard(req_q))
2142                                 continue;
2143
2144                         ret = btrfs_issue_discard(stripe->dev->bdev,
2145                                                   stripe->physical,
2146                                                   stripe->length,
2147                                                   &bytes);
2148                         if (!ret)
2149                                 discarded_bytes += bytes;
2150                         else if (ret != -EOPNOTSUPP)
2151                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2152
2153                         /*
2154                          * Just in case we get back EOPNOTSUPP for some reason,
2155                          * just ignore the return value so we don't screw up
2156                          * people calling discard_extent.
2157                          */
2158                         ret = 0;
2159                 }
2160                 btrfs_put_bbio(bbio);
2161         }
2162         btrfs_bio_counter_dec(fs_info);
2163
2164         if (actual_bytes)
2165                 *actual_bytes = discarded_bytes;
2166
2167
2168         if (ret == -EOPNOTSUPP)
2169                 ret = 0;
2170         return ret;
2171 }
2172
2173 /* Can return -ENOMEM */
2174 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2175                          struct btrfs_root *root,
2176                          u64 bytenr, u64 num_bytes, u64 parent,
2177                          u64 root_objectid, u64 owner, u64 offset)
2178 {
2179         struct btrfs_fs_info *fs_info = root->fs_info;
2180         int old_ref_mod, new_ref_mod;
2181         int ret;
2182
2183         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2184                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2185
2186         btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
2187                            owner, offset, BTRFS_ADD_DELAYED_REF);
2188
2189         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2190                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2191                                                  num_bytes, parent,
2192                                                  root_objectid, (int)owner,
2193                                                  BTRFS_ADD_DELAYED_REF, NULL,
2194                                                  &old_ref_mod, &new_ref_mod);
2195         } else {
2196                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2197                                                  num_bytes, parent,
2198                                                  root_objectid, owner, offset,
2199                                                  0, BTRFS_ADD_DELAYED_REF,
2200                                                  &old_ref_mod, &new_ref_mod);
2201         }
2202
2203         if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0) {
2204                 bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
2205
2206                 add_pinned_bytes(fs_info, -num_bytes, metadata, root_objectid);
2207         }
2208
2209         return ret;
2210 }
2211
2212 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2213                                   struct btrfs_fs_info *fs_info,
2214                                   struct btrfs_delayed_ref_node *node,
2215                                   u64 parent, u64 root_objectid,
2216                                   u64 owner, u64 offset, int refs_to_add,
2217                                   struct btrfs_delayed_extent_op *extent_op)
2218 {
2219         struct btrfs_path *path;
2220         struct extent_buffer *leaf;
2221         struct btrfs_extent_item *item;
2222         struct btrfs_key key;
2223         u64 bytenr = node->bytenr;
2224         u64 num_bytes = node->num_bytes;
2225         u64 refs;
2226         int ret;
2227
2228         path = btrfs_alloc_path();
2229         if (!path)
2230                 return -ENOMEM;
2231
2232         path->reada = READA_FORWARD;
2233         path->leave_spinning = 1;
2234         /* this will setup the path even if it fails to insert the back ref */
2235         ret = insert_inline_extent_backref(trans, fs_info, path, bytenr,
2236                                            num_bytes, parent, root_objectid,
2237                                            owner, offset,
2238                                            refs_to_add, extent_op);
2239         if ((ret < 0 && ret != -EAGAIN) || !ret)
2240                 goto out;
2241
2242         /*
2243          * Ok we had -EAGAIN which means we didn't have space to insert and
2244          * inline extent ref, so just update the reference count and add a
2245          * normal backref.
2246          */
2247         leaf = path->nodes[0];
2248         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2249         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2250         refs = btrfs_extent_refs(leaf, item);
2251         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2252         if (extent_op)
2253                 __run_delayed_extent_op(extent_op, leaf, item);
2254
2255         btrfs_mark_buffer_dirty(leaf);
2256         btrfs_release_path(path);
2257
2258         path->reada = READA_FORWARD;
2259         path->leave_spinning = 1;
2260         /* now insert the actual backref */
2261         ret = insert_extent_backref(trans, fs_info, path, bytenr, parent,
2262                                     root_objectid, owner, offset, refs_to_add);
2263         if (ret)
2264                 btrfs_abort_transaction(trans, ret);
2265 out:
2266         btrfs_free_path(path);
2267         return ret;
2268 }
2269
2270 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2271                                 struct btrfs_fs_info *fs_info,
2272                                 struct btrfs_delayed_ref_node *node,
2273                                 struct btrfs_delayed_extent_op *extent_op,
2274                                 int insert_reserved)
2275 {
2276         int ret = 0;
2277         struct btrfs_delayed_data_ref *ref;
2278         struct btrfs_key ins;
2279         u64 parent = 0;
2280         u64 ref_root = 0;
2281         u64 flags = 0;
2282
2283         ins.objectid = node->bytenr;
2284         ins.offset = node->num_bytes;
2285         ins.type = BTRFS_EXTENT_ITEM_KEY;
2286
2287         ref = btrfs_delayed_node_to_data_ref(node);
2288         trace_run_delayed_data_ref(fs_info, node, ref, node->action);
2289
2290         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2291                 parent = ref->parent;
2292         ref_root = ref->root;
2293
2294         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2295                 if (extent_op)
2296                         flags |= extent_op->flags_to_set;
2297                 ret = alloc_reserved_file_extent(trans, fs_info,
2298                                                  parent, ref_root, flags,
2299                                                  ref->objectid, ref->offset,
2300                                                  &ins, node->ref_mod);
2301         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2302                 ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
2303                                              ref_root, ref->objectid,
2304                                              ref->offset, node->ref_mod,
2305                                              extent_op);
2306         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2307                 ret = __btrfs_free_extent(trans, fs_info, node, parent,
2308                                           ref_root, ref->objectid,
2309                                           ref->offset, node->ref_mod,
2310                                           extent_op);
2311         } else {
2312                 BUG();
2313         }
2314         return ret;
2315 }
2316
2317 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2318                                     struct extent_buffer *leaf,
2319                                     struct btrfs_extent_item *ei)
2320 {
2321         u64 flags = btrfs_extent_flags(leaf, ei);
2322         if (extent_op->update_flags) {
2323                 flags |= extent_op->flags_to_set;
2324                 btrfs_set_extent_flags(leaf, ei, flags);
2325         }
2326
2327         if (extent_op->update_key) {
2328                 struct btrfs_tree_block_info *bi;
2329                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2330                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2331                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2332         }
2333 }
2334
2335 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2336                                  struct btrfs_fs_info *fs_info,
2337                                  struct btrfs_delayed_ref_head *head,
2338                                  struct btrfs_delayed_extent_op *extent_op)
2339 {
2340         struct btrfs_key key;
2341         struct btrfs_path *path;
2342         struct btrfs_extent_item *ei;
2343         struct extent_buffer *leaf;
2344         u32 item_size;
2345         int ret;
2346         int err = 0;
2347         int metadata = !extent_op->is_data;
2348
2349         if (trans->aborted)
2350                 return 0;
2351
2352         if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2353                 metadata = 0;
2354
2355         path = btrfs_alloc_path();
2356         if (!path)
2357                 return -ENOMEM;
2358
2359         key.objectid = head->bytenr;
2360
2361         if (metadata) {
2362                 key.type = BTRFS_METADATA_ITEM_KEY;
2363                 key.offset = extent_op->level;
2364         } else {
2365                 key.type = BTRFS_EXTENT_ITEM_KEY;
2366                 key.offset = head->num_bytes;
2367         }
2368
2369 again:
2370         path->reada = READA_FORWARD;
2371         path->leave_spinning = 1;
2372         ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
2373         if (ret < 0) {
2374                 err = ret;
2375                 goto out;
2376         }
2377         if (ret > 0) {
2378                 if (metadata) {
2379                         if (path->slots[0] > 0) {
2380                                 path->slots[0]--;
2381                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2382                                                       path->slots[0]);
2383                                 if (key.objectid == head->bytenr &&
2384                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2385                                     key.offset == head->num_bytes)
2386                                         ret = 0;
2387                         }
2388                         if (ret > 0) {
2389                                 btrfs_release_path(path);
2390                                 metadata = 0;
2391
2392                                 key.objectid = head->bytenr;
2393                                 key.offset = head->num_bytes;
2394                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2395                                 goto again;
2396                         }
2397                 } else {
2398                         err = -EIO;
2399                         goto out;
2400                 }
2401         }
2402
2403         leaf = path->nodes[0];
2404         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2405 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2406         if (item_size < sizeof(*ei)) {
2407                 ret = convert_extent_item_v0(trans, fs_info, path, (u64)-1, 0);
2408                 if (ret < 0) {
2409                         err = ret;
2410                         goto out;
2411                 }
2412                 leaf = path->nodes[0];
2413                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2414         }
2415 #endif
2416         BUG_ON(item_size < sizeof(*ei));
2417         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2418         __run_delayed_extent_op(extent_op, leaf, ei);
2419
2420         btrfs_mark_buffer_dirty(leaf);
2421 out:
2422         btrfs_free_path(path);
2423         return err;
2424 }
2425
2426 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2427                                 struct btrfs_fs_info *fs_info,
2428                                 struct btrfs_delayed_ref_node *node,
2429                                 struct btrfs_delayed_extent_op *extent_op,
2430                                 int insert_reserved)
2431 {
2432         int ret = 0;
2433         struct btrfs_delayed_tree_ref *ref;
2434         struct btrfs_key ins;
2435         u64 parent = 0;
2436         u64 ref_root = 0;
2437         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
2438
2439         ref = btrfs_delayed_node_to_tree_ref(node);
2440         trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
2441
2442         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2443                 parent = ref->parent;
2444         ref_root = ref->root;
2445
2446         ins.objectid = node->bytenr;
2447         if (skinny_metadata) {
2448                 ins.offset = ref->level;
2449                 ins.type = BTRFS_METADATA_ITEM_KEY;
2450         } else {
2451                 ins.offset = node->num_bytes;
2452                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2453         }
2454
2455         if (node->ref_mod != 1) {
2456                 btrfs_err(fs_info,
2457         "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2458                           node->bytenr, node->ref_mod, node->action, ref_root,
2459                           parent);
2460                 return -EIO;
2461         }
2462         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2463                 BUG_ON(!extent_op || !extent_op->update_flags);
2464                 ret = alloc_reserved_tree_block(trans, fs_info,
2465                                                 parent, ref_root,
2466                                                 extent_op->flags_to_set,
2467                                                 &extent_op->key,
2468                                                 ref->level, &ins);
2469         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2470                 ret = __btrfs_inc_extent_ref(trans, fs_info, node,
2471                                              parent, ref_root,
2472                                              ref->level, 0, 1,
2473                                              extent_op);
2474         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2475                 ret = __btrfs_free_extent(trans, fs_info, node,
2476                                           parent, ref_root,
2477                                           ref->level, 0, 1, extent_op);
2478         } else {
2479                 BUG();
2480         }
2481         return ret;
2482 }
2483
2484 /* helper function to actually process a single delayed ref entry */
2485 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2486                                struct btrfs_fs_info *fs_info,
2487                                struct btrfs_delayed_ref_node *node,
2488                                struct btrfs_delayed_extent_op *extent_op,
2489                                int insert_reserved)
2490 {
2491         int ret = 0;
2492
2493         if (trans->aborted) {
2494                 if (insert_reserved)
2495                         btrfs_pin_extent(fs_info, node->bytenr,
2496                                          node->num_bytes, 1);
2497                 return 0;
2498         }
2499
2500         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2501             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2502                 ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
2503                                            insert_reserved);
2504         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2505                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2506                 ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
2507                                            insert_reserved);
2508         else
2509                 BUG();
2510         return ret;
2511 }
2512
2513 static inline struct btrfs_delayed_ref_node *
2514 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2515 {
2516         struct btrfs_delayed_ref_node *ref;
2517
2518         if (RB_EMPTY_ROOT(&head->ref_tree))
2519                 return NULL;
2520
2521         /*
2522          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2523          * This is to prevent a ref count from going down to zero, which deletes
2524          * the extent item from the extent tree, when there still are references
2525          * to add, which would fail because they would not find the extent item.
2526          */
2527         if (!list_empty(&head->ref_add_list))
2528                 return list_first_entry(&head->ref_add_list,
2529                                 struct btrfs_delayed_ref_node, add_list);
2530
2531         ref = rb_entry(rb_first(&head->ref_tree),
2532                        struct btrfs_delayed_ref_node, ref_node);
2533         ASSERT(list_empty(&ref->add_list));
2534         return ref;
2535 }
2536
2537 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
2538                                       struct btrfs_delayed_ref_head *head)
2539 {
2540         spin_lock(&delayed_refs->lock);
2541         head->processing = 0;
2542         delayed_refs->num_heads_ready++;
2543         spin_unlock(&delayed_refs->lock);
2544         btrfs_delayed_ref_unlock(head);
2545 }
2546
2547 static int cleanup_extent_op(struct btrfs_trans_handle *trans,
2548                              struct btrfs_fs_info *fs_info,
2549                              struct btrfs_delayed_ref_head *head)
2550 {
2551         struct btrfs_delayed_extent_op *extent_op = head->extent_op;
2552         int ret;
2553
2554         if (!extent_op)
2555                 return 0;
2556         head->extent_op = NULL;
2557         if (head->must_insert_reserved) {
2558                 btrfs_free_delayed_extent_op(extent_op);
2559                 return 0;
2560         }
2561         spin_unlock(&head->lock);
2562         ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
2563         btrfs_free_delayed_extent_op(extent_op);
2564         return ret ? ret : 1;
2565 }
2566
2567 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
2568                             struct btrfs_fs_info *fs_info,
2569                             struct btrfs_delayed_ref_head *head)
2570 {
2571         struct btrfs_delayed_ref_root *delayed_refs;
2572         int ret;
2573
2574         delayed_refs = &trans->transaction->delayed_refs;
2575
2576         ret = cleanup_extent_op(trans, fs_info, head);
2577         if (ret < 0) {
2578                 unselect_delayed_ref_head(delayed_refs, head);
2579                 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2580                 return ret;
2581         } else if (ret) {
2582                 return ret;
2583         }
2584
2585         /*
2586          * Need to drop our head ref lock and re-acquire the delayed ref lock
2587          * and then re-check to make sure nobody got added.
2588          */
2589         spin_unlock(&head->lock);
2590         spin_lock(&delayed_refs->lock);
2591         spin_lock(&head->lock);
2592         if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
2593                 spin_unlock(&head->lock);
2594                 spin_unlock(&delayed_refs->lock);
2595                 return 1;
2596         }
2597         delayed_refs->num_heads--;
2598         rb_erase(&head->href_node, &delayed_refs->href_root);
2599         RB_CLEAR_NODE(&head->href_node);
2600         spin_unlock(&head->lock);
2601         spin_unlock(&delayed_refs->lock);
2602         atomic_dec(&delayed_refs->num_entries);
2603
2604         trace_run_delayed_ref_head(fs_info, head, 0);
2605
2606         if (head->total_ref_mod < 0) {
2607                 struct btrfs_space_info *space_info;
2608                 u64 flags;
2609
2610                 if (head->is_data)
2611                         flags = BTRFS_BLOCK_GROUP_DATA;
2612                 else if (head->is_system)
2613                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
2614                 else
2615                         flags = BTRFS_BLOCK_GROUP_METADATA;
2616                 space_info = __find_space_info(fs_info, flags);
2617                 ASSERT(space_info);
2618                 percpu_counter_add(&space_info->total_bytes_pinned,
2619                                    -head->num_bytes);
2620
2621                 if (head->is_data) {
2622                         spin_lock(&delayed_refs->lock);
2623                         delayed_refs->pending_csums -= head->num_bytes;
2624                         spin_unlock(&delayed_refs->lock);
2625                 }
2626         }
2627
2628         if (head->must_insert_reserved) {
2629                 btrfs_pin_extent(fs_info, head->bytenr,
2630                                  head->num_bytes, 1);
2631                 if (head->is_data) {
2632                         ret = btrfs_del_csums(trans, fs_info, head->bytenr,
2633                                               head->num_bytes);
2634                 }
2635         }
2636
2637         /* Also free its reserved qgroup space */
2638         btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
2639                                       head->qgroup_reserved);
2640         btrfs_delayed_ref_unlock(head);
2641         btrfs_put_delayed_ref_head(head);
2642         return 0;
2643 }
2644
2645 /*
2646  * Returns 0 on success or if called with an already aborted transaction.
2647  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2648  */
2649 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2650                                              unsigned long nr)
2651 {
2652         struct btrfs_fs_info *fs_info = trans->fs_info;
2653         struct btrfs_delayed_ref_root *delayed_refs;
2654         struct btrfs_delayed_ref_node *ref;
2655         struct btrfs_delayed_ref_head *locked_ref = NULL;
2656         struct btrfs_delayed_extent_op *extent_op;
2657         ktime_t start = ktime_get();
2658         int ret;
2659         unsigned long count = 0;
2660         unsigned long actual_count = 0;
2661         int must_insert_reserved = 0;
2662
2663         delayed_refs = &trans->transaction->delayed_refs;
2664         while (1) {
2665                 if (!locked_ref) {
2666                         if (count >= nr)
2667                                 break;
2668
2669                         spin_lock(&delayed_refs->lock);
2670                         locked_ref = btrfs_select_ref_head(trans);
2671                         if (!locked_ref) {
2672                                 spin_unlock(&delayed_refs->lock);
2673                                 break;
2674                         }
2675
2676                         /* grab the lock that says we are going to process
2677                          * all the refs for this head */
2678                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2679                         spin_unlock(&delayed_refs->lock);
2680                         /*
2681                          * we may have dropped the spin lock to get the head
2682                          * mutex lock, and that might have given someone else
2683                          * time to free the head.  If that's true, it has been
2684                          * removed from our list and we can move on.
2685                          */
2686                         if (ret == -EAGAIN) {
2687                                 locked_ref = NULL;
2688                                 count++;
2689                                 continue;
2690                         }
2691                 }
2692
2693                 /*
2694                  * We need to try and merge add/drops of the same ref since we
2695                  * can run into issues with relocate dropping the implicit ref
2696                  * and then it being added back again before the drop can
2697                  * finish.  If we merged anything we need to re-loop so we can
2698                  * get a good ref.
2699                  * Or we can get node references of the same type that weren't
2700                  * merged when created due to bumps in the tree mod seq, and
2701                  * we need to merge them to prevent adding an inline extent
2702                  * backref before dropping it (triggering a BUG_ON at
2703                  * insert_inline_extent_backref()).
2704                  */
2705                 spin_lock(&locked_ref->lock);
2706                 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
2707
2708                 /*
2709                  * locked_ref is the head node, so we have to go one
2710                  * node back for any delayed ref updates
2711                  */
2712                 ref = select_delayed_ref(locked_ref);
2713
2714                 if (ref && ref->seq &&
2715                     btrfs_check_delayed_seq(fs_info, ref->seq)) {
2716                         spin_unlock(&locked_ref->lock);
2717                         unselect_delayed_ref_head(delayed_refs, locked_ref);
2718                         locked_ref = NULL;
2719                         cond_resched();
2720                         count++;
2721                         continue;
2722                 }
2723
2724                 /*
2725                  * We're done processing refs in this ref_head, clean everything
2726                  * up and move on to the next ref_head.
2727                  */
2728                 if (!ref) {
2729                         ret = cleanup_ref_head(trans, fs_info, locked_ref);
2730                         if (ret > 0 ) {
2731                                 /* We dropped our lock, we need to loop. */
2732                                 ret = 0;
2733                                 continue;
2734                         } else if (ret) {
2735                                 return ret;
2736                         }
2737                         locked_ref = NULL;
2738                         count++;
2739                         continue;
2740                 }
2741
2742                 actual_count++;
2743                 ref->in_tree = 0;
2744                 rb_erase(&ref->ref_node, &locked_ref->ref_tree);
2745                 RB_CLEAR_NODE(&ref->ref_node);
2746                 if (!list_empty(&ref->add_list))
2747                         list_del(&ref->add_list);
2748                 /*
2749                  * When we play the delayed ref, also correct the ref_mod on
2750                  * head
2751                  */
2752                 switch (ref->action) {
2753                 case BTRFS_ADD_DELAYED_REF:
2754                 case BTRFS_ADD_DELAYED_EXTENT:
2755                         locked_ref->ref_mod -= ref->ref_mod;
2756                         break;
2757                 case BTRFS_DROP_DELAYED_REF:
2758                         locked_ref->ref_mod += ref->ref_mod;
2759                         break;
2760                 default:
2761                         WARN_ON(1);
2762                 }
2763                 atomic_dec(&delayed_refs->num_entries);
2764
2765                 /*
2766                  * Record the must-insert_reserved flag before we drop the spin
2767                  * lock.
2768                  */
2769                 must_insert_reserved = locked_ref->must_insert_reserved;
2770                 locked_ref->must_insert_reserved = 0;
2771
2772                 extent_op = locked_ref->extent_op;
2773                 locked_ref->extent_op = NULL;
2774                 spin_unlock(&locked_ref->lock);
2775
2776                 ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
2777                                           must_insert_reserved);
2778
2779                 btrfs_free_delayed_extent_op(extent_op);
2780                 if (ret) {
2781                         unselect_delayed_ref_head(delayed_refs, locked_ref);
2782                         btrfs_put_delayed_ref(ref);
2783                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2784                                     ret);
2785                         return ret;
2786                 }
2787
2788                 btrfs_put_delayed_ref(ref);
2789                 count++;
2790                 cond_resched();
2791         }
2792
2793         /*
2794          * We don't want to include ref heads since we can have empty ref heads
2795          * and those will drastically skew our runtime down since we just do
2796          * accounting, no actual extent tree updates.
2797          */
2798         if (actual_count > 0) {
2799                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2800                 u64 avg;
2801
2802                 /*
2803                  * We weigh the current average higher than our current runtime
2804                  * to avoid large swings in the average.
2805                  */
2806                 spin_lock(&delayed_refs->lock);
2807                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2808                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2809                 spin_unlock(&delayed_refs->lock);
2810         }
2811         return 0;
2812 }
2813
2814 #ifdef SCRAMBLE_DELAYED_REFS
2815 /*
2816  * Normally delayed refs get processed in ascending bytenr order. This
2817  * correlates in most cases to the order added. To expose dependencies on this
2818  * order, we start to process the tree in the middle instead of the beginning
2819  */
2820 static u64 find_middle(struct rb_root *root)
2821 {
2822         struct rb_node *n = root->rb_node;
2823         struct btrfs_delayed_ref_node *entry;
2824         int alt = 1;
2825         u64 middle;
2826         u64 first = 0, last = 0;
2827
2828         n = rb_first(root);
2829         if (n) {
2830                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2831                 first = entry->bytenr;
2832         }
2833         n = rb_last(root);
2834         if (n) {
2835                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2836                 last = entry->bytenr;
2837         }
2838         n = root->rb_node;
2839
2840         while (n) {
2841                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2842                 WARN_ON(!entry->in_tree);
2843
2844                 middle = entry->bytenr;
2845
2846                 if (alt)
2847                         n = n->rb_left;
2848                 else
2849                         n = n->rb_right;
2850
2851                 alt = 1 - alt;
2852         }
2853         return middle;
2854 }
2855 #endif
2856
2857 static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
2858 {
2859         u64 num_bytes;
2860
2861         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2862                              sizeof(struct btrfs_extent_inline_ref));
2863         if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2864                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2865
2866         /*
2867          * We don't ever fill up leaves all the way so multiply by 2 just to be
2868          * closer to what we're really going to want to use.
2869          */
2870         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
2871 }
2872
2873 /*
2874  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2875  * would require to store the csums for that many bytes.
2876  */
2877 u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
2878 {
2879         u64 csum_size;
2880         u64 num_csums_per_leaf;
2881         u64 num_csums;
2882
2883         csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
2884         num_csums_per_leaf = div64_u64(csum_size,
2885                         (u64)btrfs_super_csum_size(fs_info->super_copy));
2886         num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
2887         num_csums += num_csums_per_leaf - 1;
2888         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2889         return num_csums;
2890 }
2891
2892 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2893                                        struct btrfs_fs_info *fs_info)
2894 {
2895         struct btrfs_block_rsv *global_rsv;
2896         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2897         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2898         unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
2899         u64 num_bytes, num_dirty_bgs_bytes;
2900         int ret = 0;
2901
2902         num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
2903         num_heads = heads_to_leaves(fs_info, num_heads);
2904         if (num_heads > 1)
2905                 num_bytes += (num_heads - 1) * fs_info->nodesize;
2906         num_bytes <<= 1;
2907         num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
2908                                                         fs_info->nodesize;
2909         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
2910                                                              num_dirty_bgs);
2911         global_rsv = &fs_info->global_block_rsv;
2912
2913         /*
2914          * If we can't allocate any more chunks lets make sure we have _lots_ of
2915          * wiggle room since running delayed refs can create more delayed refs.
2916          */
2917         if (global_rsv->space_info->full) {
2918                 num_dirty_bgs_bytes <<= 1;
2919                 num_bytes <<= 1;
2920         }
2921
2922         spin_lock(&global_rsv->lock);
2923         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2924                 ret = 1;
2925         spin_unlock(&global_rsv->lock);
2926         return ret;
2927 }
2928
2929 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2930                                        struct btrfs_fs_info *fs_info)
2931 {
2932         u64 num_entries =
2933                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2934         u64 avg_runtime;
2935         u64 val;
2936
2937         smp_mb();
2938         avg_runtime = fs_info->avg_delayed_ref_runtime;
2939         val = num_entries * avg_runtime;
2940         if (val >= NSEC_PER_SEC)
2941                 return 1;
2942         if (val >= NSEC_PER_SEC / 2)
2943                 return 2;
2944
2945         return btrfs_check_space_for_delayed_refs(trans, fs_info);
2946 }
2947
2948 struct async_delayed_refs {
2949         struct btrfs_root *root;
2950         u64 transid;
2951         int count;
2952         int error;
2953         int sync;
2954         struct completion wait;
2955         struct btrfs_work work;
2956 };
2957
2958 static inline struct async_delayed_refs *
2959 to_async_delayed_refs(struct btrfs_work *work)
2960 {
2961         return container_of(work, struct async_delayed_refs, work);
2962 }
2963
2964 static void delayed_ref_async_start(struct btrfs_work *work)
2965 {
2966         struct async_delayed_refs *async = to_async_delayed_refs(work);
2967         struct btrfs_trans_handle *trans;
2968         struct btrfs_fs_info *fs_info = async->root->fs_info;
2969         int ret;
2970
2971         /* if the commit is already started, we don't need to wait here */
2972         if (btrfs_transaction_blocked(fs_info))
2973                 goto done;
2974
2975         trans = btrfs_join_transaction(async->root);
2976         if (IS_ERR(trans)) {
2977                 async->error = PTR_ERR(trans);
2978                 goto done;
2979         }
2980
2981         /*
2982          * trans->sync means that when we call end_transaction, we won't
2983          * wait on delayed refs
2984          */
2985         trans->sync = true;
2986
2987         /* Don't bother flushing if we got into a different transaction */
2988         if (trans->transid > async->transid)
2989                 goto end;
2990
2991         ret = btrfs_run_delayed_refs(trans, async->count);
2992         if (ret)
2993                 async->error = ret;
2994 end:
2995         ret = btrfs_end_transaction(trans);
2996         if (ret && !async->error)
2997                 async->error = ret;
2998 done:
2999         if (async->sync)
3000                 complete(&async->wait);
3001         else
3002                 kfree(async);
3003 }
3004
3005 int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
3006                                  unsigned long count, u64 transid, int wait)
3007 {
3008         struct async_delayed_refs *async;
3009         int ret;
3010
3011         async = kmalloc(sizeof(*async), GFP_NOFS);
3012         if (!async)
3013                 return -ENOMEM;
3014
3015         async->root = fs_info->tree_root;
3016         async->count = count;
3017         async->error = 0;
3018         async->transid = transid;
3019         if (wait)
3020                 async->sync = 1;
3021         else
3022                 async->sync = 0;
3023         init_completion(&async->wait);
3024
3025         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
3026                         delayed_ref_async_start, NULL, NULL);
3027
3028         btrfs_queue_work(fs_info->extent_workers, &async->work);
3029
3030         if (wait) {
3031                 wait_for_completion(&async->wait);
3032                 ret = async->error;
3033                 kfree(async);
3034                 return ret;
3035         }
3036         return 0;
3037 }
3038
3039 /*
3040  * this starts processing the delayed reference count updates and
3041  * extent insertions we have queued up so far.  count can be
3042  * 0, which means to process everything in the tree at the start
3043  * of the run (but not newly added entries), or it can be some target
3044  * number you'd like to process.
3045  *
3046  * Returns 0 on success or if called with an aborted transaction
3047  * Returns <0 on error and aborts the transaction
3048  */
3049 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
3050                            unsigned long count)
3051 {
3052         struct btrfs_fs_info *fs_info = trans->fs_info;
3053         struct rb_node *node;
3054         struct btrfs_delayed_ref_root *delayed_refs;
3055         struct btrfs_delayed_ref_head *head;
3056         int ret;
3057         int run_all = count == (unsigned long)-1;
3058         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
3059
3060         /* We'll clean this up in btrfs_cleanup_transaction */
3061         if (trans->aborted)
3062                 return 0;
3063
3064         if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
3065                 return 0;
3066
3067         delayed_refs = &trans->transaction->delayed_refs;
3068         if (count == 0)
3069                 count = atomic_read(&delayed_refs->num_entries) * 2;
3070
3071 again:
3072 #ifdef SCRAMBLE_DELAYED_REFS
3073         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
3074 #endif
3075         trans->can_flush_pending_bgs = false;
3076         ret = __btrfs_run_delayed_refs(trans, count);
3077         if (ret < 0) {
3078                 btrfs_abort_transaction(trans, ret);
3079                 return ret;
3080         }
3081
3082         if (run_all) {
3083                 if (!list_empty(&trans->new_bgs))
3084                         btrfs_create_pending_block_groups(trans);
3085
3086                 spin_lock(&delayed_refs->lock);
3087                 node = rb_first(&delayed_refs->href_root);
3088                 if (!node) {
3089                         spin_unlock(&delayed_refs->lock);
3090                         goto out;
3091                 }
3092                 head = rb_entry(node, struct btrfs_delayed_ref_head,
3093                                 href_node);
3094                 refcount_inc(&head->refs);
3095                 spin_unlock(&delayed_refs->lock);
3096
3097                 /* Mutex was contended, block until it's released and retry. */
3098                 mutex_lock(&head->mutex);
3099                 mutex_unlock(&head->mutex);
3100
3101                 btrfs_put_delayed_ref_head(head);
3102                 cond_resched();
3103                 goto again;
3104         }
3105 out:
3106         trans->can_flush_pending_bgs = can_flush_pending_bgs;
3107         return 0;
3108 }
3109
3110 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3111                                 struct btrfs_fs_info *fs_info,
3112                                 u64 bytenr, u64 num_bytes, u64 flags,
3113                                 int level, int is_data)
3114 {
3115         struct btrfs_delayed_extent_op *extent_op;
3116         int ret;
3117
3118         extent_op = btrfs_alloc_delayed_extent_op();
3119         if (!extent_op)
3120                 return -ENOMEM;
3121
3122         extent_op->flags_to_set = flags;
3123         extent_op->update_flags = true;
3124         extent_op->update_key = false;
3125         extent_op->is_data = is_data ? true : false;
3126         extent_op->level = level;
3127
3128         ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
3129                                           num_bytes, extent_op);
3130         if (ret)
3131                 btrfs_free_delayed_extent_op(extent_op);
3132         return ret;
3133 }
3134
3135 static noinline int check_delayed_ref(struct btrfs_root *root,
3136                                       struct btrfs_path *path,
3137                                       u64 objectid, u64 offset, u64 bytenr)
3138 {
3139         struct btrfs_delayed_ref_head *head;
3140         struct btrfs_delayed_ref_node *ref;
3141         struct btrfs_delayed_data_ref *data_ref;
3142         struct btrfs_delayed_ref_root *delayed_refs;
3143         struct btrfs_transaction *cur_trans;
3144         struct rb_node *node;
3145         int ret = 0;
3146
3147         spin_lock(&root->fs_info->trans_lock);
3148         cur_trans = root->fs_info->running_transaction;
3149         if (cur_trans)
3150                 refcount_inc(&cur_trans->use_count);
3151         spin_unlock(&root->fs_info->trans_lock);
3152         if (!cur_trans)
3153                 return 0;
3154
3155         delayed_refs = &cur_trans->delayed_refs;
3156         spin_lock(&delayed_refs->lock);
3157         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
3158         if (!head) {
3159                 spin_unlock(&delayed_refs->lock);
3160                 btrfs_put_transaction(cur_trans);
3161                 return 0;
3162         }
3163
3164         if (!mutex_trylock(&head->mutex)) {
3165                 refcount_inc(&head->refs);
3166                 spin_unlock(&delayed_refs->lock);
3167
3168                 btrfs_release_path(path);
3169
3170                 /*
3171                  * Mutex was contended, block until it's released and let
3172                  * caller try again
3173                  */
3174                 mutex_lock(&head->mutex);
3175                 mutex_unlock(&head->mutex);
3176                 btrfs_put_delayed_ref_head(head);
3177                 btrfs_put_transaction(cur_trans);
3178                 return -EAGAIN;
3179         }
3180         spin_unlock(&delayed_refs->lock);
3181
3182         spin_lock(&head->lock);
3183         /*
3184          * XXX: We should replace this with a proper search function in the
3185          * future.
3186          */
3187         for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
3188                 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
3189                 /* If it's a shared ref we know a cross reference exists */
3190                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3191                         ret = 1;
3192                         break;
3193                 }
3194
3195                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3196
3197                 /*
3198                  * If our ref doesn't match the one we're currently looking at
3199                  * then we have a cross reference.
3200                  */
3201                 if (data_ref->root != root->root_key.objectid ||
3202                     data_ref->objectid != objectid ||
3203                     data_ref->offset != offset) {
3204                         ret = 1;
3205                         break;
3206                 }
3207         }
3208         spin_unlock(&head->lock);
3209         mutex_unlock(&head->mutex);
3210         btrfs_put_transaction(cur_trans);
3211         return ret;
3212 }
3213
3214 static noinline int check_committed_ref(struct btrfs_root *root,
3215                                         struct btrfs_path *path,
3216                                         u64 objectid, u64 offset, u64 bytenr)
3217 {
3218         struct btrfs_fs_info *fs_info = root->fs_info;
3219         struct btrfs_root *extent_root = fs_info->extent_root;
3220         struct extent_buffer *leaf;
3221         struct btrfs_extent_data_ref *ref;
3222         struct btrfs_extent_inline_ref *iref;
3223         struct btrfs_extent_item *ei;
3224         struct btrfs_key key;
3225         u32 item_size;
3226         int type;
3227         int ret;
3228
3229         key.objectid = bytenr;
3230         key.offset = (u64)-1;
3231         key.type = BTRFS_EXTENT_ITEM_KEY;
3232
3233         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3234         if (ret < 0)
3235                 goto out;
3236         BUG_ON(ret == 0); /* Corruption */
3237
3238         ret = -ENOENT;
3239         if (path->slots[0] == 0)
3240                 goto out;
3241
3242         path->slots[0]--;
3243         leaf = path->nodes[0];
3244         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3245
3246         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3247                 goto out;
3248
3249         ret = 1;
3250         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3251 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3252         if (item_size < sizeof(*ei)) {
3253                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3254                 goto out;
3255         }
3256 #endif
3257         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3258
3259         if (item_size != sizeof(*ei) +
3260             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3261                 goto out;
3262
3263         if (btrfs_extent_generation(leaf, ei) <=
3264             btrfs_root_last_snapshot(&root->root_item))
3265                 goto out;
3266
3267         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3268
3269         type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
3270         if (type != BTRFS_EXTENT_DATA_REF_KEY)
3271                 goto out;
3272
3273         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3274         if (btrfs_extent_refs(leaf, ei) !=
3275             btrfs_extent_data_ref_count(leaf, ref) ||
3276             btrfs_extent_data_ref_root(leaf, ref) !=
3277             root->root_key.objectid ||
3278             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3279             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3280                 goto out;
3281
3282         ret = 0;
3283 out:
3284         return ret;
3285 }
3286
3287 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
3288                           u64 bytenr)
3289 {
3290         struct btrfs_path *path;
3291         int ret;
3292         int ret2;
3293
3294         path = btrfs_alloc_path();
3295         if (!path)
3296                 return -ENOENT;
3297
3298         do {
3299                 ret = check_committed_ref(root, path, objectid,
3300                                           offset, bytenr);
3301                 if (ret && ret != -ENOENT)
3302                         goto out;
3303
3304                 ret2 = check_delayed_ref(root, path, objectid,
3305                                          offset, bytenr);
3306         } while (ret2 == -EAGAIN);
3307
3308         if (ret2 && ret2 != -ENOENT) {
3309                 ret = ret2;
3310                 goto out;
3311         }
3312
3313         if (ret != -ENOENT || ret2 != -ENOENT)
3314                 ret = 0;
3315 out:
3316         btrfs_free_path(path);
3317         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3318                 WARN_ON(ret > 0);
3319         return ret;
3320 }
3321
3322 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3323                            struct btrfs_root *root,
3324                            struct extent_buffer *buf,
3325                            int full_backref, int inc)
3326 {
3327         struct btrfs_fs_info *fs_info = root->fs_info;
3328         u64 bytenr;
3329         u64 num_bytes;
3330         u64 parent;
3331         u64 ref_root;
3332         u32 nritems;
3333         struct btrfs_key key;
3334         struct btrfs_file_extent_item *fi;
3335         int i;
3336         int level;
3337         int ret = 0;
3338         int (*process_func)(struct btrfs_trans_handle *,
3339                             struct btrfs_root *,
3340                             u64, u64, u64, u64, u64, u64);
3341
3342
3343         if (btrfs_is_testing(fs_info))
3344                 return 0;
3345
3346         ref_root = btrfs_header_owner(buf);
3347         nritems = btrfs_header_nritems(buf);
3348         level = btrfs_header_level(buf);
3349
3350         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3351                 return 0;
3352
3353         if (inc)
3354                 process_func = btrfs_inc_extent_ref;
3355         else
3356                 process_func = btrfs_free_extent;
3357
3358         if (full_backref)
3359                 parent = buf->start;
3360         else
3361                 parent = 0;
3362
3363         for (i = 0; i < nritems; i++) {
3364                 if (level == 0) {
3365                         btrfs_item_key_to_cpu(buf, &key, i);
3366                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3367                                 continue;
3368                         fi = btrfs_item_ptr(buf, i,
3369                                             struct btrfs_file_extent_item);
3370                         if (btrfs_file_extent_type(buf, fi) ==
3371                             BTRFS_FILE_EXTENT_INLINE)
3372                                 continue;
3373                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3374                         if (bytenr == 0)
3375                                 continue;
3376
3377                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3378                         key.offset -= btrfs_file_extent_offset(buf, fi);
3379                         ret = process_func(trans, root, bytenr, num_bytes,
3380                                            parent, ref_root, key.objectid,
3381                                            key.offset);
3382                         if (ret)
3383                                 goto fail;
3384                 } else {
3385                         bytenr = btrfs_node_blockptr(buf, i);
3386                         num_bytes = fs_info->nodesize;
3387                         ret = process_func(trans, root, bytenr, num_bytes,
3388                                            parent, ref_root, level - 1, 0);
3389                         if (ret)
3390                                 goto fail;
3391                 }
3392         }
3393         return 0;
3394 fail:
3395         return ret;
3396 }
3397
3398 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3399                   struct extent_buffer *buf, int full_backref)
3400 {
3401         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3402 }
3403
3404 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3405                   struct extent_buffer *buf, int full_backref)
3406 {
3407         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3408 }
3409
3410 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3411                                  struct btrfs_fs_info *fs_info,
3412                                  struct btrfs_path *path,
3413                                  struct btrfs_block_group_cache *cache)
3414 {
3415         int ret;
3416         struct btrfs_root *extent_root = fs_info->extent_root;
3417         unsigned long bi;
3418         struct extent_buffer *leaf;
3419
3420         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3421         if (ret) {
3422                 if (ret > 0)
3423                         ret = -ENOENT;
3424                 goto fail;
3425         }
3426
3427         leaf = path->nodes[0];
3428         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3429         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3430         btrfs_mark_buffer_dirty(leaf);
3431 fail:
3432         btrfs_release_path(path);
3433         return ret;
3434
3435 }
3436
3437 static struct btrfs_block_group_cache *
3438 next_block_group(struct btrfs_fs_info *fs_info,
3439                  struct btrfs_block_group_cache *cache)
3440 {
3441         struct rb_node *node;
3442
3443         spin_lock(&fs_info->block_group_cache_lock);
3444
3445         /* If our block group was removed, we need a full search. */
3446         if (RB_EMPTY_NODE(&cache->cache_node)) {
3447                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3448
3449                 spin_unlock(&fs_info->block_group_cache_lock);
3450                 btrfs_put_block_group(cache);
3451                 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
3452         }
3453         node = rb_next(&cache->cache_node);
3454         btrfs_put_block_group(cache);
3455         if (node) {
3456                 cache = rb_entry(node, struct btrfs_block_group_cache,
3457                                  cache_node);
3458                 btrfs_get_block_group(cache);
3459         } else
3460                 cache = NULL;
3461         spin_unlock(&fs_info->block_group_cache_lock);
3462         return cache;
3463 }
3464
3465 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3466                             struct btrfs_trans_handle *trans,
3467                             struct btrfs_path *path)
3468 {
3469         struct btrfs_fs_info *fs_info = block_group->fs_info;
3470         struct btrfs_root *root = fs_info->tree_root;
3471         struct inode *inode = NULL;
3472         struct extent_changeset *data_reserved = NULL;
3473         u64 alloc_hint = 0;
3474         int dcs = BTRFS_DC_ERROR;
3475         u64 num_pages = 0;
3476         int retries = 0;
3477         int ret = 0;
3478
3479         /*
3480          * If this block group is smaller than 100 megs don't bother caching the
3481          * block group.
3482          */
3483         if (block_group->key.offset < (100 * SZ_1M)) {
3484                 spin_lock(&block_group->lock);
3485                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3486                 spin_unlock(&block_group->lock);
3487                 return 0;
3488         }
3489
3490         if (trans->aborted)
3491                 return 0;
3492 again:
3493         inode = lookup_free_space_inode(fs_info, block_group, path);
3494         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3495                 ret = PTR_ERR(inode);
3496                 btrfs_release_path(path);
3497                 goto out;
3498         }
3499
3500         if (IS_ERR(inode)) {
3501                 BUG_ON(retries);
3502                 retries++;
3503
3504                 if (block_group->ro)
3505                         goto out_free;
3506
3507                 ret = create_free_space_inode(fs_info, trans, block_group,
3508                                               path);
3509                 if (ret)
3510                         goto out_free;
3511                 goto again;
3512         }
3513
3514         /*
3515          * We want to set the generation to 0, that way if anything goes wrong
3516          * from here on out we know not to trust this cache when we load up next
3517          * time.
3518          */
3519         BTRFS_I(inode)->generation = 0;
3520         ret = btrfs_update_inode(trans, root, inode);
3521         if (ret) {
3522                 /*
3523                  * So theoretically we could recover from this, simply set the
3524                  * super cache generation to 0 so we know to invalidate the
3525                  * cache, but then we'd have to keep track of the block groups
3526                  * that fail this way so we know we _have_ to reset this cache
3527                  * before the next commit or risk reading stale cache.  So to
3528                  * limit our exposure to horrible edge cases lets just abort the
3529                  * transaction, this only happens in really bad situations
3530                  * anyway.
3531                  */
3532                 btrfs_abort_transaction(trans, ret);
3533                 goto out_put;
3534         }
3535         WARN_ON(ret);
3536
3537         /* We've already setup this tr