Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op,
85                                 int no_quota);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (cache->cached != BTRFS_CACHE_STARTED) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         /* We're loading it the fast way, so we don't have a caching_ctl. */
324         if (!cache->caching_ctl) {
325                 spin_unlock(&cache->lock);
326                 return NULL;
327         }
328
329         ctl = cache->caching_ctl;
330         atomic_inc(&ctl->count);
331         spin_unlock(&cache->lock);
332         return ctl;
333 }
334
335 static void put_caching_control(struct btrfs_caching_control *ctl)
336 {
337         if (atomic_dec_and_test(&ctl->count))
338                 kfree(ctl);
339 }
340
341 /*
342  * this is only called by cache_block_group, since we could have freed extents
343  * we need to check the pinned_extents for any extents that can't be used yet
344  * since their free space will be released as soon as the transaction commits.
345  */
346 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
347                               struct btrfs_fs_info *info, u64 start, u64 end)
348 {
349         u64 extent_start, extent_end, size, total_added = 0;
350         int ret;
351
352         while (start < end) {
353                 ret = find_first_extent_bit(info->pinned_extents, start,
354                                             &extent_start, &extent_end,
355                                             EXTENT_DIRTY | EXTENT_UPTODATE,
356                                             NULL);
357                 if (ret)
358                         break;
359
360                 if (extent_start <= start) {
361                         start = extent_end + 1;
362                 } else if (extent_start > start && extent_start < end) {
363                         size = extent_start - start;
364                         total_added += size;
365                         ret = btrfs_add_free_space(block_group, start,
366                                                    size);
367                         BUG_ON(ret); /* -ENOMEM or logic error */
368                         start = extent_end + 1;
369                 } else {
370                         break;
371                 }
372         }
373
374         if (start < end) {
375                 size = end - start;
376                 total_added += size;
377                 ret = btrfs_add_free_space(block_group, start, size);
378                 BUG_ON(ret); /* -ENOMEM or logic error */
379         }
380
381         return total_added;
382 }
383
384 static noinline void caching_thread(struct btrfs_work *work)
385 {
386         struct btrfs_block_group_cache *block_group;
387         struct btrfs_fs_info *fs_info;
388         struct btrfs_caching_control *caching_ctl;
389         struct btrfs_root *extent_root;
390         struct btrfs_path *path;
391         struct extent_buffer *leaf;
392         struct btrfs_key key;
393         u64 total_found = 0;
394         u64 last = 0;
395         u32 nritems;
396         int ret = -ENOMEM;
397
398         caching_ctl = container_of(work, struct btrfs_caching_control, work);
399         block_group = caching_ctl->block_group;
400         fs_info = block_group->fs_info;
401         extent_root = fs_info->extent_root;
402
403         path = btrfs_alloc_path();
404         if (!path)
405                 goto out;
406
407         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
408
409         /*
410          * We don't want to deadlock with somebody trying to allocate a new
411          * extent for the extent root while also trying to search the extent
412          * root to add free space.  So we skip locking and search the commit
413          * root, since its read-only
414          */
415         path->skip_locking = 1;
416         path->search_commit_root = 1;
417         path->reada = 1;
418
419         key.objectid = last;
420         key.offset = 0;
421         key.type = BTRFS_EXTENT_ITEM_KEY;
422 again:
423         mutex_lock(&caching_ctl->mutex);
424         /* need to make sure the commit_root doesn't disappear */
425         down_read(&fs_info->commit_root_sem);
426
427 next:
428         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
429         if (ret < 0)
430                 goto err;
431
432         leaf = path->nodes[0];
433         nritems = btrfs_header_nritems(leaf);
434
435         while (1) {
436                 if (btrfs_fs_closing(fs_info) > 1) {
437                         last = (u64)-1;
438                         break;
439                 }
440
441                 if (path->slots[0] < nritems) {
442                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
443                 } else {
444                         ret = find_next_key(path, 0, &key);
445                         if (ret)
446                                 break;
447
448                         if (need_resched() ||
449                             rwsem_is_contended(&fs_info->commit_root_sem)) {
450                                 caching_ctl->progress = last;
451                                 btrfs_release_path(path);
452                                 up_read(&fs_info->commit_root_sem);
453                                 mutex_unlock(&caching_ctl->mutex);
454                                 cond_resched();
455                                 goto again;
456                         }
457
458                         ret = btrfs_next_leaf(extent_root, path);
459                         if (ret < 0)
460                                 goto err;
461                         if (ret)
462                                 break;
463                         leaf = path->nodes[0];
464                         nritems = btrfs_header_nritems(leaf);
465                         continue;
466                 }
467
468                 if (key.objectid < last) {
469                         key.objectid = last;
470                         key.offset = 0;
471                         key.type = BTRFS_EXTENT_ITEM_KEY;
472
473                         caching_ctl->progress = last;
474                         btrfs_release_path(path);
475                         goto next;
476                 }
477
478                 if (key.objectid < block_group->key.objectid) {
479                         path->slots[0]++;
480                         continue;
481                 }
482
483                 if (key.objectid >= block_group->key.objectid +
484                     block_group->key.offset)
485                         break;
486
487                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
488                     key.type == BTRFS_METADATA_ITEM_KEY) {
489                         total_found += add_new_free_space(block_group,
490                                                           fs_info, last,
491                                                           key.objectid);
492                         if (key.type == BTRFS_METADATA_ITEM_KEY)
493                                 last = key.objectid +
494                                         fs_info->tree_root->nodesize;
495                         else
496                                 last = key.objectid + key.offset;
497
498                         if (total_found > (1024 * 1024 * 2)) {
499                                 total_found = 0;
500                                 wake_up(&caching_ctl->wait);
501                         }
502                 }
503                 path->slots[0]++;
504         }
505         ret = 0;
506
507         total_found += add_new_free_space(block_group, fs_info, last,
508                                           block_group->key.objectid +
509                                           block_group->key.offset);
510         caching_ctl->progress = (u64)-1;
511
512         spin_lock(&block_group->lock);
513         block_group->caching_ctl = NULL;
514         block_group->cached = BTRFS_CACHE_FINISHED;
515         spin_unlock(&block_group->lock);
516
517 err:
518         btrfs_free_path(path);
519         up_read(&fs_info->commit_root_sem);
520
521         free_excluded_extents(extent_root, block_group);
522
523         mutex_unlock(&caching_ctl->mutex);
524 out:
525         if (ret) {
526                 spin_lock(&block_group->lock);
527                 block_group->caching_ctl = NULL;
528                 block_group->cached = BTRFS_CACHE_ERROR;
529                 spin_unlock(&block_group->lock);
530         }
531         wake_up(&caching_ctl->wait);
532
533         put_caching_control(caching_ctl);
534         btrfs_put_block_group(block_group);
535 }
536
537 static int cache_block_group(struct btrfs_block_group_cache *cache,
538                              int load_cache_only)
539 {
540         DEFINE_WAIT(wait);
541         struct btrfs_fs_info *fs_info = cache->fs_info;
542         struct btrfs_caching_control *caching_ctl;
543         int ret = 0;
544
545         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
546         if (!caching_ctl)
547                 return -ENOMEM;
548
549         INIT_LIST_HEAD(&caching_ctl->list);
550         mutex_init(&caching_ctl->mutex);
551         init_waitqueue_head(&caching_ctl->wait);
552         caching_ctl->block_group = cache;
553         caching_ctl->progress = cache->key.objectid;
554         atomic_set(&caching_ctl->count, 1);
555         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
556                         caching_thread, NULL, NULL);
557
558         spin_lock(&cache->lock);
559         /*
560          * This should be a rare occasion, but this could happen I think in the
561          * case where one thread starts to load the space cache info, and then
562          * some other thread starts a transaction commit which tries to do an
563          * allocation while the other thread is still loading the space cache
564          * info.  The previous loop should have kept us from choosing this block
565          * group, but if we've moved to the state where we will wait on caching
566          * block groups we need to first check if we're doing a fast load here,
567          * so we can wait for it to finish, otherwise we could end up allocating
568          * from a block group who's cache gets evicted for one reason or
569          * another.
570          */
571         while (cache->cached == BTRFS_CACHE_FAST) {
572                 struct btrfs_caching_control *ctl;
573
574                 ctl = cache->caching_ctl;
575                 atomic_inc(&ctl->count);
576                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
577                 spin_unlock(&cache->lock);
578
579                 schedule();
580
581                 finish_wait(&ctl->wait, &wait);
582                 put_caching_control(ctl);
583                 spin_lock(&cache->lock);
584         }
585
586         if (cache->cached != BTRFS_CACHE_NO) {
587                 spin_unlock(&cache->lock);
588                 kfree(caching_ctl);
589                 return 0;
590         }
591         WARN_ON(cache->caching_ctl);
592         cache->caching_ctl = caching_ctl;
593         cache->cached = BTRFS_CACHE_FAST;
594         spin_unlock(&cache->lock);
595
596         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
597                 ret = load_free_space_cache(fs_info, cache);
598
599                 spin_lock(&cache->lock);
600                 if (ret == 1) {
601                         cache->caching_ctl = NULL;
602                         cache->cached = BTRFS_CACHE_FINISHED;
603                         cache->last_byte_to_unpin = (u64)-1;
604                 } else {
605                         if (load_cache_only) {
606                                 cache->caching_ctl = NULL;
607                                 cache->cached = BTRFS_CACHE_NO;
608                         } else {
609                                 cache->cached = BTRFS_CACHE_STARTED;
610                         }
611                 }
612                 spin_unlock(&cache->lock);
613                 wake_up(&caching_ctl->wait);
614                 if (ret == 1) {
615                         put_caching_control(caching_ctl);
616                         free_excluded_extents(fs_info->extent_root, cache);
617                         return 0;
618                 }
619         } else {
620                 /*
621                  * We are not going to do the fast caching, set cached to the
622                  * appropriate value and wakeup any waiters.
623                  */
624                 spin_lock(&cache->lock);
625                 if (load_cache_only) {
626                         cache->caching_ctl = NULL;
627                         cache->cached = BTRFS_CACHE_NO;
628                 } else {
629                         cache->cached = BTRFS_CACHE_STARTED;
630                 }
631                 spin_unlock(&cache->lock);
632                 wake_up(&caching_ctl->wait);
633         }
634
635         if (load_cache_only) {
636                 put_caching_control(caching_ctl);
637                 return 0;
638         }
639
640         down_write(&fs_info->commit_root_sem);
641         atomic_inc(&caching_ctl->count);
642         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
643         up_write(&fs_info->commit_root_sem);
644
645         btrfs_get_block_group(cache);
646
647         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
648
649         return ret;
650 }
651
652 /*
653  * return the block group that starts at or after bytenr
654  */
655 static struct btrfs_block_group_cache *
656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
657 {
658         struct btrfs_block_group_cache *cache;
659
660         cache = block_group_cache_tree_search(info, bytenr, 0);
661
662         return cache;
663 }
664
665 /*
666  * return the block group that contains the given bytenr
667  */
668 struct btrfs_block_group_cache *btrfs_lookup_block_group(
669                                                  struct btrfs_fs_info *info,
670                                                  u64 bytenr)
671 {
672         struct btrfs_block_group_cache *cache;
673
674         cache = block_group_cache_tree_search(info, bytenr, 1);
675
676         return cache;
677 }
678
679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
680                                                   u64 flags)
681 {
682         struct list_head *head = &info->space_info;
683         struct btrfs_space_info *found;
684
685         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
686
687         rcu_read_lock();
688         list_for_each_entry_rcu(found, head, list) {
689                 if (found->flags & flags) {
690                         rcu_read_unlock();
691                         return found;
692                 }
693         }
694         rcu_read_unlock();
695         return NULL;
696 }
697
698 /*
699  * after adding space to the filesystem, we need to clear the full flags
700  * on all the space infos.
701  */
702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
703 {
704         struct list_head *head = &info->space_info;
705         struct btrfs_space_info *found;
706
707         rcu_read_lock();
708         list_for_each_entry_rcu(found, head, list)
709                 found->full = 0;
710         rcu_read_unlock();
711 }
712
713 /* simple helper to search for an existing extent at a given offset */
714 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716         int ret;
717         struct btrfs_key key;
718         struct btrfs_path *path;
719
720         path = btrfs_alloc_path();
721         if (!path)
722                 return -ENOMEM;
723
724         key.objectid = start;
725         key.offset = len;
726         key.type = BTRFS_EXTENT_ITEM_KEY;
727         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728                                 0, 0);
729         if (ret > 0) {
730                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
731                 if (key.objectid == start &&
732                     key.type == BTRFS_METADATA_ITEM_KEY)
733                         ret = 0;
734         }
735         btrfs_free_path(path);
736         return ret;
737 }
738
739 /*
740  * helper function to lookup reference count and flags of a tree block.
741  *
742  * the head node for delayed ref is used to store the sum of all the
743  * reference count modifications queued up in the rbtree. the head
744  * node may also store the extent flags to set. This way you can check
745  * to see what the reference count and extent flags would be if all of
746  * the delayed refs are not processed.
747  */
748 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
749                              struct btrfs_root *root, u64 bytenr,
750                              u64 offset, int metadata, u64 *refs, u64 *flags)
751 {
752         struct btrfs_delayed_ref_head *head;
753         struct btrfs_delayed_ref_root *delayed_refs;
754         struct btrfs_path *path;
755         struct btrfs_extent_item *ei;
756         struct extent_buffer *leaf;
757         struct btrfs_key key;
758         u32 item_size;
759         u64 num_refs;
760         u64 extent_flags;
761         int ret;
762
763         /*
764          * If we don't have skinny metadata, don't bother doing anything
765          * different
766          */
767         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
768                 offset = root->nodesize;
769                 metadata = 0;
770         }
771
772         path = btrfs_alloc_path();
773         if (!path)
774                 return -ENOMEM;
775
776         if (!trans) {
777                 path->skip_locking = 1;
778                 path->search_commit_root = 1;
779         }
780
781 search_again:
782         key.objectid = bytenr;
783         key.offset = offset;
784         if (metadata)
785                 key.type = BTRFS_METADATA_ITEM_KEY;
786         else
787                 key.type = BTRFS_EXTENT_ITEM_KEY;
788
789 again:
790         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
791                                 &key, path, 0, 0);
792         if (ret < 0)
793                 goto out_free;
794
795         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
796                 if (path->slots[0]) {
797                         path->slots[0]--;
798                         btrfs_item_key_to_cpu(path->nodes[0], &key,
799                                               path->slots[0]);
800                         if (key.objectid == bytenr &&
801                             key.type == BTRFS_EXTENT_ITEM_KEY &&
802                             key.offset == root->nodesize)
803                                 ret = 0;
804                 }
805                 if (ret) {
806                         key.objectid = bytenr;
807                         key.type = BTRFS_EXTENT_ITEM_KEY;
808                         key.offset = root->nodesize;
809                         btrfs_release_path(path);
810                         goto again;
811                 }
812         }
813
814         if (ret == 0) {
815                 leaf = path->nodes[0];
816                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
817                 if (item_size >= sizeof(*ei)) {
818                         ei = btrfs_item_ptr(leaf, path->slots[0],
819                                             struct btrfs_extent_item);
820                         num_refs = btrfs_extent_refs(leaf, ei);
821                         extent_flags = btrfs_extent_flags(leaf, ei);
822                 } else {
823 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
824                         struct btrfs_extent_item_v0 *ei0;
825                         BUG_ON(item_size != sizeof(*ei0));
826                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
827                                              struct btrfs_extent_item_v0);
828                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
829                         /* FIXME: this isn't correct for data */
830                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
831 #else
832                         BUG();
833 #endif
834                 }
835                 BUG_ON(num_refs == 0);
836         } else {
837                 num_refs = 0;
838                 extent_flags = 0;
839                 ret = 0;
840         }
841
842         if (!trans)
843                 goto out;
844
845         delayed_refs = &trans->transaction->delayed_refs;
846         spin_lock(&delayed_refs->lock);
847         head = btrfs_find_delayed_ref_head(trans, bytenr);
848         if (head) {
849                 if (!mutex_trylock(&head->mutex)) {
850                         atomic_inc(&head->node.refs);
851                         spin_unlock(&delayed_refs->lock);
852
853                         btrfs_release_path(path);
854
855                         /*
856                          * Mutex was contended, block until it's released and try
857                          * again
858                          */
859                         mutex_lock(&head->mutex);
860                         mutex_unlock(&head->mutex);
861                         btrfs_put_delayed_ref(&head->node);
862                         goto search_again;
863                 }
864                 spin_lock(&head->lock);
865                 if (head->extent_op && head->extent_op->update_flags)
866                         extent_flags |= head->extent_op->flags_to_set;
867                 else
868                         BUG_ON(num_refs == 0);
869
870                 num_refs += head->node.ref_mod;
871                 spin_unlock(&head->lock);
872                 mutex_unlock(&head->mutex);
873         }
874         spin_unlock(&delayed_refs->lock);
875 out:
876         WARN_ON(num_refs == 0);
877         if (refs)
878                 *refs = num_refs;
879         if (flags)
880                 *flags = extent_flags;
881 out_free:
882         btrfs_free_path(path);
883         return ret;
884 }
885
886 /*
887  * Back reference rules.  Back refs have three main goals:
888  *
889  * 1) differentiate between all holders of references to an extent so that
890  *    when a reference is dropped we can make sure it was a valid reference
891  *    before freeing the extent.
892  *
893  * 2) Provide enough information to quickly find the holders of an extent
894  *    if we notice a given block is corrupted or bad.
895  *
896  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
897  *    maintenance.  This is actually the same as #2, but with a slightly
898  *    different use case.
899  *
900  * There are two kinds of back refs. The implicit back refs is optimized
901  * for pointers in non-shared tree blocks. For a given pointer in a block,
902  * back refs of this kind provide information about the block's owner tree
903  * and the pointer's key. These information allow us to find the block by
904  * b-tree searching. The full back refs is for pointers in tree blocks not
905  * referenced by their owner trees. The location of tree block is recorded
906  * in the back refs. Actually the full back refs is generic, and can be
907  * used in all cases the implicit back refs is used. The major shortcoming
908  * of the full back refs is its overhead. Every time a tree block gets
909  * COWed, we have to update back refs entry for all pointers in it.
910  *
911  * For a newly allocated tree block, we use implicit back refs for
912  * pointers in it. This means most tree related operations only involve
913  * implicit back refs. For a tree block created in old transaction, the
914  * only way to drop a reference to it is COW it. So we can detect the
915  * event that tree block loses its owner tree's reference and do the
916  * back refs conversion.
917  *
918  * When a tree block is COW'd through a tree, there are four cases:
919  *
920  * The reference count of the block is one and the tree is the block's
921  * owner tree. Nothing to do in this case.
922  *
923  * The reference count of the block is one and the tree is not the
924  * block's owner tree. In this case, full back refs is used for pointers
925  * in the block. Remove these full back refs, add implicit back refs for
926  * every pointers in the new block.
927  *
928  * The reference count of the block is greater than one and the tree is
929  * the block's owner tree. In this case, implicit back refs is used for
930  * pointers in the block. Add full back refs for every pointers in the
931  * block, increase lower level extents' reference counts. The original
932  * implicit back refs are entailed to the new block.
933  *
934  * The reference count of the block is greater than one and the tree is
935  * not the block's owner tree. Add implicit back refs for every pointer in
936  * the new block, increase lower level extents' reference count.
937  *
938  * Back Reference Key composing:
939  *
940  * The key objectid corresponds to the first byte in the extent,
941  * The key type is used to differentiate between types of back refs.
942  * There are different meanings of the key offset for different types
943  * of back refs.
944  *
945  * File extents can be referenced by:
946  *
947  * - multiple snapshots, subvolumes, or different generations in one subvol
948  * - different files inside a single subvolume
949  * - different offsets inside a file (bookend extents in file.c)
950  *
951  * The extent ref structure for the implicit back refs has fields for:
952  *
953  * - Objectid of the subvolume root
954  * - objectid of the file holding the reference
955  * - original offset in the file
956  * - how many bookend extents
957  *
958  * The key offset for the implicit back refs is hash of the first
959  * three fields.
960  *
961  * The extent ref structure for the full back refs has field for:
962  *
963  * - number of pointers in the tree leaf
964  *
965  * The key offset for the implicit back refs is the first byte of
966  * the tree leaf
967  *
968  * When a file extent is allocated, The implicit back refs is used.
969  * the fields are filled in:
970  *
971  *     (root_key.objectid, inode objectid, offset in file, 1)
972  *
973  * When a file extent is removed file truncation, we find the
974  * corresponding implicit back refs and check the following fields:
975  *
976  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
977  *
978  * Btree extents can be referenced by:
979  *
980  * - Different subvolumes
981  *
982  * Both the implicit back refs and the full back refs for tree blocks
983  * only consist of key. The key offset for the implicit back refs is
984  * objectid of block's owner tree. The key offset for the full back refs
985  * is the first byte of parent block.
986  *
987  * When implicit back refs is used, information about the lowest key and
988  * level of the tree block are required. These information are stored in
989  * tree block info structure.
990  */
991
992 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
993 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
994                                   struct btrfs_root *root,
995                                   struct btrfs_path *path,
996                                   u64 owner, u32 extra_size)
997 {
998         struct btrfs_extent_item *item;
999         struct btrfs_extent_item_v0 *ei0;
1000         struct btrfs_extent_ref_v0 *ref0;
1001         struct btrfs_tree_block_info *bi;
1002         struct extent_buffer *leaf;
1003         struct btrfs_key key;
1004         struct btrfs_key found_key;
1005         u32 new_size = sizeof(*item);
1006         u64 refs;
1007         int ret;
1008
1009         leaf = path->nodes[0];
1010         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1011
1012         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1013         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1014                              struct btrfs_extent_item_v0);
1015         refs = btrfs_extent_refs_v0(leaf, ei0);
1016
1017         if (owner == (u64)-1) {
1018                 while (1) {
1019                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1020                                 ret = btrfs_next_leaf(root, path);
1021                                 if (ret < 0)
1022                                         return ret;
1023                                 BUG_ON(ret > 0); /* Corruption */
1024                                 leaf = path->nodes[0];
1025                         }
1026                         btrfs_item_key_to_cpu(leaf, &found_key,
1027                                               path->slots[0]);
1028                         BUG_ON(key.objectid != found_key.objectid);
1029                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1030                                 path->slots[0]++;
1031                                 continue;
1032                         }
1033                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1034                                               struct btrfs_extent_ref_v0);
1035                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1036                         break;
1037                 }
1038         }
1039         btrfs_release_path(path);
1040
1041         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1042                 new_size += sizeof(*bi);
1043
1044         new_size -= sizeof(*ei0);
1045         ret = btrfs_search_slot(trans, root, &key, path,
1046                                 new_size + extra_size, 1);
1047         if (ret < 0)
1048                 return ret;
1049         BUG_ON(ret); /* Corruption */
1050
1051         btrfs_extend_item(root, path, new_size);
1052
1053         leaf = path->nodes[0];
1054         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1055         btrfs_set_extent_refs(leaf, item, refs);
1056         /* FIXME: get real generation */
1057         btrfs_set_extent_generation(leaf, item, 0);
1058         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1059                 btrfs_set_extent_flags(leaf, item,
1060                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1061                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1062                 bi = (struct btrfs_tree_block_info *)(item + 1);
1063                 /* FIXME: get first key of the block */
1064                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1065                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1066         } else {
1067                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1068         }
1069         btrfs_mark_buffer_dirty(leaf);
1070         return 0;
1071 }
1072 #endif
1073
1074 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1075 {
1076         u32 high_crc = ~(u32)0;
1077         u32 low_crc = ~(u32)0;
1078         __le64 lenum;
1079
1080         lenum = cpu_to_le64(root_objectid);
1081         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1082         lenum = cpu_to_le64(owner);
1083         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1084         lenum = cpu_to_le64(offset);
1085         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1086
1087         return ((u64)high_crc << 31) ^ (u64)low_crc;
1088 }
1089
1090 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1091                                      struct btrfs_extent_data_ref *ref)
1092 {
1093         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1094                                     btrfs_extent_data_ref_objectid(leaf, ref),
1095                                     btrfs_extent_data_ref_offset(leaf, ref));
1096 }
1097
1098 static int match_extent_data_ref(struct extent_buffer *leaf,
1099                                  struct btrfs_extent_data_ref *ref,
1100                                  u64 root_objectid, u64 owner, u64 offset)
1101 {
1102         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1103             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1104             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1105                 return 0;
1106         return 1;
1107 }
1108
1109 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1110                                            struct btrfs_root *root,
1111                                            struct btrfs_path *path,
1112                                            u64 bytenr, u64 parent,
1113                                            u64 root_objectid,
1114                                            u64 owner, u64 offset)
1115 {
1116         struct btrfs_key key;
1117         struct btrfs_extent_data_ref *ref;
1118         struct extent_buffer *leaf;
1119         u32 nritems;
1120         int ret;
1121         int recow;
1122         int err = -ENOENT;
1123
1124         key.objectid = bytenr;
1125         if (parent) {
1126                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1127                 key.offset = parent;
1128         } else {
1129                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1130                 key.offset = hash_extent_data_ref(root_objectid,
1131                                                   owner, offset);
1132         }
1133 again:
1134         recow = 0;
1135         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1136         if (ret < 0) {
1137                 err = ret;
1138                 goto fail;
1139         }
1140
1141         if (parent) {
1142                 if (!ret)
1143                         return 0;
1144 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1145                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1146                 btrfs_release_path(path);
1147                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1148                 if (ret < 0) {
1149                         err = ret;
1150                         goto fail;
1151                 }
1152                 if (!ret)
1153                         return 0;
1154 #endif
1155                 goto fail;
1156         }
1157
1158         leaf = path->nodes[0];
1159         nritems = btrfs_header_nritems(leaf);
1160         while (1) {
1161                 if (path->slots[0] >= nritems) {
1162                         ret = btrfs_next_leaf(root, path);
1163                         if (ret < 0)
1164                                 err = ret;
1165                         if (ret)
1166                                 goto fail;
1167
1168                         leaf = path->nodes[0];
1169                         nritems = btrfs_header_nritems(leaf);
1170                         recow = 1;
1171                 }
1172
1173                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1174                 if (key.objectid != bytenr ||
1175                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1176                         goto fail;
1177
1178                 ref = btrfs_item_ptr(leaf, path->slots[0],
1179                                      struct btrfs_extent_data_ref);
1180
1181                 if (match_extent_data_ref(leaf, ref, root_objectid,
1182                                           owner, offset)) {
1183                         if (recow) {
1184                                 btrfs_release_path(path);
1185                                 goto again;
1186                         }
1187                         err = 0;
1188                         break;
1189                 }
1190                 path->slots[0]++;
1191         }
1192 fail:
1193         return err;
1194 }
1195
1196 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1197                                            struct btrfs_root *root,
1198                                            struct btrfs_path *path,
1199                                            u64 bytenr, u64 parent,
1200                                            u64 root_objectid, u64 owner,
1201                                            u64 offset, int refs_to_add)
1202 {
1203         struct btrfs_key key;
1204         struct extent_buffer *leaf;
1205         u32 size;
1206         u32 num_refs;
1207         int ret;
1208
1209         key.objectid = bytenr;
1210         if (parent) {
1211                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1212                 key.offset = parent;
1213                 size = sizeof(struct btrfs_shared_data_ref);
1214         } else {
1215                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1216                 key.offset = hash_extent_data_ref(root_objectid,
1217                                                   owner, offset);
1218                 size = sizeof(struct btrfs_extent_data_ref);
1219         }
1220
1221         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1222         if (ret && ret != -EEXIST)
1223                 goto fail;
1224
1225         leaf = path->nodes[0];
1226         if (parent) {
1227                 struct btrfs_shared_data_ref *ref;
1228                 ref = btrfs_item_ptr(leaf, path->slots[0],
1229                                      struct btrfs_shared_data_ref);
1230                 if (ret == 0) {
1231                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1232                 } else {
1233                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1234                         num_refs += refs_to_add;
1235                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1236                 }
1237         } else {
1238                 struct btrfs_extent_data_ref *ref;
1239                 while (ret == -EEXIST) {
1240                         ref = btrfs_item_ptr(leaf, path->slots[0],
1241                                              struct btrfs_extent_data_ref);
1242                         if (match_extent_data_ref(leaf, ref, root_objectid,
1243                                                   owner, offset))
1244                                 break;
1245                         btrfs_release_path(path);
1246                         key.offset++;
1247                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1248                                                       size);
1249                         if (ret && ret != -EEXIST)
1250                                 goto fail;
1251
1252                         leaf = path->nodes[0];
1253                 }
1254                 ref = btrfs_item_ptr(leaf, path->slots[0],
1255                                      struct btrfs_extent_data_ref);
1256                 if (ret == 0) {
1257                         btrfs_set_extent_data_ref_root(leaf, ref,
1258                                                        root_objectid);
1259                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1260                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1261                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1262                 } else {
1263                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1264                         num_refs += refs_to_add;
1265                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1266                 }
1267         }
1268         btrfs_mark_buffer_dirty(leaf);
1269         ret = 0;
1270 fail:
1271         btrfs_release_path(path);
1272         return ret;
1273 }
1274
1275 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1276                                            struct btrfs_root *root,
1277                                            struct btrfs_path *path,
1278                                            int refs_to_drop, int *last_ref)
1279 {
1280         struct btrfs_key key;
1281         struct btrfs_extent_data_ref *ref1 = NULL;
1282         struct btrfs_shared_data_ref *ref2 = NULL;
1283         struct extent_buffer *leaf;
1284         u32 num_refs = 0;
1285         int ret = 0;
1286
1287         leaf = path->nodes[0];
1288         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1289
1290         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1291                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1292                                       struct btrfs_extent_data_ref);
1293                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1294         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1295                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1296                                       struct btrfs_shared_data_ref);
1297                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1298 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1299         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1300                 struct btrfs_extent_ref_v0 *ref0;
1301                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1302                                       struct btrfs_extent_ref_v0);
1303                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1304 #endif
1305         } else {
1306                 BUG();
1307         }
1308
1309         BUG_ON(num_refs < refs_to_drop);
1310         num_refs -= refs_to_drop;
1311
1312         if (num_refs == 0) {
1313                 ret = btrfs_del_item(trans, root, path);
1314                 *last_ref = 1;
1315         } else {
1316                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1317                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1318                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1319                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1320 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1321                 else {
1322                         struct btrfs_extent_ref_v0 *ref0;
1323                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1324                                         struct btrfs_extent_ref_v0);
1325                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1326                 }
1327 #endif
1328                 btrfs_mark_buffer_dirty(leaf);
1329         }
1330         return ret;
1331 }
1332
1333 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1334                                           struct btrfs_path *path,
1335                                           struct btrfs_extent_inline_ref *iref)
1336 {
1337         struct btrfs_key key;
1338         struct extent_buffer *leaf;
1339         struct btrfs_extent_data_ref *ref1;
1340         struct btrfs_shared_data_ref *ref2;
1341         u32 num_refs = 0;
1342
1343         leaf = path->nodes[0];
1344         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1345         if (iref) {
1346                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1347                     BTRFS_EXTENT_DATA_REF_KEY) {
1348                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1349                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1350                 } else {
1351                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1352                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1353                 }
1354         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1355                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1356                                       struct btrfs_extent_data_ref);
1357                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1358         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1359                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1360                                       struct btrfs_shared_data_ref);
1361                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1362 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1363         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1364                 struct btrfs_extent_ref_v0 *ref0;
1365                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1366                                       struct btrfs_extent_ref_v0);
1367                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1368 #endif
1369         } else {
1370                 WARN_ON(1);
1371         }
1372         return num_refs;
1373 }
1374
1375 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1376                                           struct btrfs_root *root,
1377                                           struct btrfs_path *path,
1378                                           u64 bytenr, u64 parent,
1379                                           u64 root_objectid)
1380 {
1381         struct btrfs_key key;
1382         int ret;
1383
1384         key.objectid = bytenr;
1385         if (parent) {
1386                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1387                 key.offset = parent;
1388         } else {
1389                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1390                 key.offset = root_objectid;
1391         }
1392
1393         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1394         if (ret > 0)
1395                 ret = -ENOENT;
1396 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1397         if (ret == -ENOENT && parent) {
1398                 btrfs_release_path(path);
1399                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1400                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1401                 if (ret > 0)
1402                         ret = -ENOENT;
1403         }
1404 #endif
1405         return ret;
1406 }
1407
1408 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1409                                           struct btrfs_root *root,
1410                                           struct btrfs_path *path,
1411                                           u64 bytenr, u64 parent,
1412                                           u64 root_objectid)
1413 {
1414         struct btrfs_key key;
1415         int ret;
1416
1417         key.objectid = bytenr;
1418         if (parent) {
1419                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1420                 key.offset = parent;
1421         } else {
1422                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1423                 key.offset = root_objectid;
1424         }
1425
1426         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1427         btrfs_release_path(path);
1428         return ret;
1429 }
1430
1431 static inline int extent_ref_type(u64 parent, u64 owner)
1432 {
1433         int type;
1434         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1435                 if (parent > 0)
1436                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1437                 else
1438                         type = BTRFS_TREE_BLOCK_REF_KEY;
1439         } else {
1440                 if (parent > 0)
1441                         type = BTRFS_SHARED_DATA_REF_KEY;
1442                 else
1443                         type = BTRFS_EXTENT_DATA_REF_KEY;
1444         }
1445         return type;
1446 }
1447
1448 static int find_next_key(struct btrfs_path *path, int level,
1449                          struct btrfs_key *key)
1450
1451 {
1452         for (; level < BTRFS_MAX_LEVEL; level++) {
1453                 if (!path->nodes[level])
1454                         break;
1455                 if (path->slots[level] + 1 >=
1456                     btrfs_header_nritems(path->nodes[level]))
1457                         continue;
1458                 if (level == 0)
1459                         btrfs_item_key_to_cpu(path->nodes[level], key,
1460                                               path->slots[level] + 1);
1461                 else
1462                         btrfs_node_key_to_cpu(path->nodes[level], key,
1463                                               path->slots[level] + 1);
1464                 return 0;
1465         }
1466         return 1;
1467 }
1468
1469 /*
1470  * look for inline back ref. if back ref is found, *ref_ret is set
1471  * to the address of inline back ref, and 0 is returned.
1472  *
1473  * if back ref isn't found, *ref_ret is set to the address where it
1474  * should be inserted, and -ENOENT is returned.
1475  *
1476  * if insert is true and there are too many inline back refs, the path
1477  * points to the extent item, and -EAGAIN is returned.
1478  *
1479  * NOTE: inline back refs are ordered in the same way that back ref
1480  *       items in the tree are ordered.
1481  */
1482 static noinline_for_stack
1483 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1484                                  struct btrfs_root *root,
1485                                  struct btrfs_path *path,
1486                                  struct btrfs_extent_inline_ref **ref_ret,
1487                                  u64 bytenr, u64 num_bytes,
1488                                  u64 parent, u64 root_objectid,
1489                                  u64 owner, u64 offset, int insert)
1490 {
1491         struct btrfs_key key;
1492         struct extent_buffer *leaf;
1493         struct btrfs_extent_item *ei;
1494         struct btrfs_extent_inline_ref *iref;
1495         u64 flags;
1496         u64 item_size;
1497         unsigned long ptr;
1498         unsigned long end;
1499         int extra_size;
1500         int type;
1501         int want;
1502         int ret;
1503         int err = 0;
1504         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1505                                                  SKINNY_METADATA);
1506
1507         key.objectid = bytenr;
1508         key.type = BTRFS_EXTENT_ITEM_KEY;
1509         key.offset = num_bytes;
1510
1511         want = extent_ref_type(parent, owner);
1512         if (insert) {
1513                 extra_size = btrfs_extent_inline_ref_size(want);
1514                 path->keep_locks = 1;
1515         } else
1516                 extra_size = -1;
1517
1518         /*
1519          * Owner is our parent level, so we can just add one to get the level
1520          * for the block we are interested in.
1521          */
1522         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1523                 key.type = BTRFS_METADATA_ITEM_KEY;
1524                 key.offset = owner;
1525         }
1526
1527 again:
1528         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1529         if (ret < 0) {
1530                 err = ret;
1531                 goto out;
1532         }
1533
1534         /*
1535          * We may be a newly converted file system which still has the old fat
1536          * extent entries for metadata, so try and see if we have one of those.
1537          */
1538         if (ret > 0 && skinny_metadata) {
1539                 skinny_metadata = false;
1540                 if (path->slots[0]) {
1541                         path->slots[0]--;
1542                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1543                                               path->slots[0]);
1544                         if (key.objectid == bytenr &&
1545                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1546                             key.offset == num_bytes)
1547                                 ret = 0;
1548                 }
1549                 if (ret) {
1550                         key.objectid = bytenr;
1551                         key.type = BTRFS_EXTENT_ITEM_KEY;
1552                         key.offset = num_bytes;
1553                         btrfs_release_path(path);
1554                         goto again;
1555                 }
1556         }
1557
1558         if (ret && !insert) {
1559                 err = -ENOENT;
1560                 goto out;
1561         } else if (WARN_ON(ret)) {
1562                 err = -EIO;
1563                 goto out;
1564         }
1565
1566         leaf = path->nodes[0];
1567         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1568 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1569         if (item_size < sizeof(*ei)) {
1570                 if (!insert) {
1571                         err = -ENOENT;
1572                         goto out;
1573                 }
1574                 ret = convert_extent_item_v0(trans, root, path, owner,
1575                                              extra_size);
1576                 if (ret < 0) {
1577                         err = ret;
1578                         goto out;
1579                 }
1580                 leaf = path->nodes[0];
1581                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1582         }
1583 #endif
1584         BUG_ON(item_size < sizeof(*ei));
1585
1586         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1587         flags = btrfs_extent_flags(leaf, ei);
1588
1589         ptr = (unsigned long)(ei + 1);
1590         end = (unsigned long)ei + item_size;
1591
1592         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1593                 ptr += sizeof(struct btrfs_tree_block_info);
1594                 BUG_ON(ptr > end);
1595         }
1596
1597         err = -ENOENT;
1598         while (1) {
1599                 if (ptr >= end) {
1600                         WARN_ON(ptr > end);
1601                         break;
1602                 }
1603                 iref = (struct btrfs_extent_inline_ref *)ptr;
1604                 type = btrfs_extent_inline_ref_type(leaf, iref);
1605                 if (want < type)
1606                         break;
1607                 if (want > type) {
1608                         ptr += btrfs_extent_inline_ref_size(type);
1609                         continue;
1610                 }
1611
1612                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1613                         struct btrfs_extent_data_ref *dref;
1614                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1615                         if (match_extent_data_ref(leaf, dref, root_objectid,
1616                                                   owner, offset)) {
1617                                 err = 0;
1618                                 break;
1619                         }
1620                         if (hash_extent_data_ref_item(leaf, dref) <
1621                             hash_extent_data_ref(root_objectid, owner, offset))
1622                                 break;
1623                 } else {
1624                         u64 ref_offset;
1625                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1626                         if (parent > 0) {
1627                                 if (parent == ref_offset) {
1628                                         err = 0;
1629                                         break;
1630                                 }
1631                                 if (ref_offset < parent)
1632                                         break;
1633                         } else {
1634                                 if (root_objectid == ref_offset) {
1635                                         err = 0;
1636                                         break;
1637                                 }
1638                                 if (ref_offset < root_objectid)
1639                                         break;
1640                         }
1641                 }
1642                 ptr += btrfs_extent_inline_ref_size(type);
1643         }
1644         if (err == -ENOENT && insert) {
1645                 if (item_size + extra_size >=
1646                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1647                         err = -EAGAIN;
1648                         goto out;
1649                 }
1650                 /*
1651                  * To add new inline back ref, we have to make sure
1652                  * there is no corresponding back ref item.
1653                  * For simplicity, we just do not add new inline back
1654                  * ref if there is any kind of item for this block
1655                  */
1656                 if (find_next_key(path, 0, &key) == 0 &&
1657                     key.objectid == bytenr &&
1658                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1659                         err = -EAGAIN;
1660                         goto out;
1661                 }
1662         }
1663         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1664 out:
1665         if (insert) {
1666                 path->keep_locks = 0;
1667                 btrfs_unlock_up_safe(path, 1);
1668         }
1669         return err;
1670 }
1671
1672 /*
1673  * helper to add new inline back ref
1674  */
1675 static noinline_for_stack
1676 void setup_inline_extent_backref(struct btrfs_root *root,
1677                                  struct btrfs_path *path,
1678                                  struct btrfs_extent_inline_ref *iref,
1679                                  u64 parent, u64 root_objectid,
1680                                  u64 owner, u64 offset, int refs_to_add,
1681                                  struct btrfs_delayed_extent_op *extent_op)
1682 {
1683         struct extent_buffer *leaf;
1684         struct btrfs_extent_item *ei;
1685         unsigned long ptr;
1686         unsigned long end;
1687         unsigned long item_offset;
1688         u64 refs;
1689         int size;
1690         int type;
1691
1692         leaf = path->nodes[0];
1693         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1694         item_offset = (unsigned long)iref - (unsigned long)ei;
1695
1696         type = extent_ref_type(parent, owner);
1697         size = btrfs_extent_inline_ref_size(type);
1698
1699         btrfs_extend_item(root, path, size);
1700
1701         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1702         refs = btrfs_extent_refs(leaf, ei);
1703         refs += refs_to_add;
1704         btrfs_set_extent_refs(leaf, ei, refs);
1705         if (extent_op)
1706                 __run_delayed_extent_op(extent_op, leaf, ei);
1707
1708         ptr = (unsigned long)ei + item_offset;
1709         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1710         if (ptr < end - size)
1711                 memmove_extent_buffer(leaf, ptr + size, ptr,
1712                                       end - size - ptr);
1713
1714         iref = (struct btrfs_extent_inline_ref *)ptr;
1715         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1716         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1717                 struct btrfs_extent_data_ref *dref;
1718                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1719                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1720                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1721                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1722                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1723         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1724                 struct btrfs_shared_data_ref *sref;
1725                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1726                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1727                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1728         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1729                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1730         } else {
1731                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1732         }
1733         btrfs_mark_buffer_dirty(leaf);
1734 }
1735
1736 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1737                                  struct btrfs_root *root,
1738                                  struct btrfs_path *path,
1739                                  struct btrfs_extent_inline_ref **ref_ret,
1740                                  u64 bytenr, u64 num_bytes, u64 parent,
1741                                  u64 root_objectid, u64 owner, u64 offset)
1742 {
1743         int ret;
1744
1745         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1746                                            bytenr, num_bytes, parent,
1747                                            root_objectid, owner, offset, 0);
1748         if (ret != -ENOENT)
1749                 return ret;
1750
1751         btrfs_release_path(path);
1752         *ref_ret = NULL;
1753
1754         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1755                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1756                                             root_objectid);
1757         } else {
1758                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1759                                              root_objectid, owner, offset);
1760         }
1761         return ret;
1762 }
1763
1764 /*
1765  * helper to update/remove inline back ref
1766  */
1767 static noinline_for_stack
1768 void update_inline_extent_backref(struct btrfs_root *root,
1769                                   struct btrfs_path *path,
1770                                   struct btrfs_extent_inline_ref *iref,
1771                                   int refs_to_mod,
1772                                   struct btrfs_delayed_extent_op *extent_op,
1773                                   int *last_ref)
1774 {
1775         struct extent_buffer *leaf;
1776         struct btrfs_extent_item *ei;
1777         struct btrfs_extent_data_ref *dref = NULL;
1778         struct btrfs_shared_data_ref *sref = NULL;
1779         unsigned long ptr;
1780         unsigned long end;
1781         u32 item_size;
1782         int size;
1783         int type;
1784         u64 refs;
1785
1786         leaf = path->nodes[0];
1787         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1788         refs = btrfs_extent_refs(leaf, ei);
1789         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1790         refs += refs_to_mod;
1791         btrfs_set_extent_refs(leaf, ei, refs);
1792         if (extent_op)
1793                 __run_delayed_extent_op(extent_op, leaf, ei);
1794
1795         type = btrfs_extent_inline_ref_type(leaf, iref);
1796
1797         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1798                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1799                 refs = btrfs_extent_data_ref_count(leaf, dref);
1800         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1801                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1802                 refs = btrfs_shared_data_ref_count(leaf, sref);
1803         } else {
1804                 refs = 1;
1805                 BUG_ON(refs_to_mod != -1);
1806         }
1807
1808         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1809         refs += refs_to_mod;
1810
1811         if (refs > 0) {
1812                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1813                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1814                 else
1815                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1816         } else {
1817                 *last_ref = 1;
1818                 size =  btrfs_extent_inline_ref_size(type);
1819                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1820                 ptr = (unsigned long)iref;
1821                 end = (unsigned long)ei + item_size;
1822                 if (ptr + size < end)
1823                         memmove_extent_buffer(leaf, ptr, ptr + size,
1824                                               end - ptr - size);
1825                 item_size -= size;
1826                 btrfs_truncate_item(root, path, item_size, 1);
1827         }
1828         btrfs_mark_buffer_dirty(leaf);
1829 }
1830
1831 static noinline_for_stack
1832 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1833                                  struct btrfs_root *root,
1834                                  struct btrfs_path *path,
1835                                  u64 bytenr, u64 num_bytes, u64 parent,
1836                                  u64 root_objectid, u64 owner,
1837                                  u64 offset, int refs_to_add,
1838                                  struct btrfs_delayed_extent_op *extent_op)
1839 {
1840         struct btrfs_extent_inline_ref *iref;
1841         int ret;
1842
1843         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1844                                            bytenr, num_bytes, parent,
1845                                            root_objectid, owner, offset, 1);
1846         if (ret == 0) {
1847                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1848                 update_inline_extent_backref(root, path, iref,
1849                                              refs_to_add, extent_op, NULL);
1850         } else if (ret == -ENOENT) {
1851                 setup_inline_extent_backref(root, path, iref, parent,
1852                                             root_objectid, owner, offset,
1853                                             refs_to_add, extent_op);
1854                 ret = 0;
1855         }
1856         return ret;
1857 }
1858
1859 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1860                                  struct btrfs_root *root,
1861                                  struct btrfs_path *path,
1862                                  u64 bytenr, u64 parent, u64 root_objectid,
1863                                  u64 owner, u64 offset, int refs_to_add)
1864 {
1865         int ret;
1866         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1867                 BUG_ON(refs_to_add != 1);
1868                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1869                                             parent, root_objectid);
1870         } else {
1871                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1872                                              parent, root_objectid,
1873                                              owner, offset, refs_to_add);
1874         }
1875         return ret;
1876 }
1877
1878 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1879                                  struct btrfs_root *root,
1880                                  struct btrfs_path *path,
1881                                  struct btrfs_extent_inline_ref *iref,
1882                                  int refs_to_drop, int is_data, int *last_ref)
1883 {
1884         int ret = 0;
1885
1886         BUG_ON(!is_data && refs_to_drop != 1);
1887         if (iref) {
1888                 update_inline_extent_backref(root, path, iref,
1889                                              -refs_to_drop, NULL, last_ref);
1890         } else if (is_data) {
1891                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1892                                              last_ref);
1893         } else {
1894                 *last_ref = 1;
1895                 ret = btrfs_del_item(trans, root, path);
1896         }
1897         return ret;
1898 }
1899
1900 static int btrfs_issue_discard(struct block_device *bdev,
1901                                 u64 start, u64 len)
1902 {
1903         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1904 }
1905
1906 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1907                                 u64 num_bytes, u64 *actual_bytes)
1908 {
1909         int ret;
1910         u64 discarded_bytes = 0;
1911         struct btrfs_bio *bbio = NULL;
1912
1913
1914         /* Tell the block device(s) that the sectors can be discarded */
1915         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1916                               bytenr, &num_bytes, &bbio, 0);
1917         /* Error condition is -ENOMEM */
1918         if (!ret) {
1919                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1920                 int i;
1921
1922
1923                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1924                         if (!stripe->dev->can_discard)
1925                                 continue;
1926
1927                         ret = btrfs_issue_discard(stripe->dev->bdev,
1928                                                   stripe->physical,
1929                                                   stripe->length);
1930                         if (!ret)
1931                                 discarded_bytes += stripe->length;
1932                         else if (ret != -EOPNOTSUPP)
1933                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1934
1935                         /*
1936                          * Just in case we get back EOPNOTSUPP for some reason,
1937                          * just ignore the return value so we don't screw up
1938                          * people calling discard_extent.
1939                          */
1940                         ret = 0;
1941                 }
1942                 kfree(bbio);
1943         }
1944
1945         if (actual_bytes)
1946                 *actual_bytes = discarded_bytes;
1947
1948
1949         if (ret == -EOPNOTSUPP)
1950                 ret = 0;
1951         return ret;
1952 }
1953
1954 /* Can return -ENOMEM */
1955 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1956                          struct btrfs_root *root,
1957                          u64 bytenr, u64 num_bytes, u64 parent,
1958                          u64 root_objectid, u64 owner, u64 offset,
1959                          int no_quota)
1960 {
1961         int ret;
1962         struct btrfs_fs_info *fs_info = root->fs_info;
1963
1964         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1965                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1966
1967         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1968                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1969                                         num_bytes,
1970                                         parent, root_objectid, (int)owner,
1971                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1972         } else {
1973                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1974                                         num_bytes,
1975                                         parent, root_objectid, owner, offset,
1976                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1977         }
1978         return ret;
1979 }
1980
1981 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1982                                   struct btrfs_root *root,
1983                                   u64 bytenr, u64 num_bytes,
1984                                   u64 parent, u64 root_objectid,
1985                                   u64 owner, u64 offset, int refs_to_add,
1986                                   int no_quota,
1987                                   struct btrfs_delayed_extent_op *extent_op)
1988 {
1989         struct btrfs_fs_info *fs_info = root->fs_info;
1990         struct btrfs_path *path;
1991         struct extent_buffer *leaf;
1992         struct btrfs_extent_item *item;
1993         struct btrfs_key key;
1994         u64 refs;
1995         int ret;
1996         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
1997
1998         path = btrfs_alloc_path();
1999         if (!path)
2000                 return -ENOMEM;
2001
2002         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2003                 no_quota = 1;
2004
2005         path->reada = 1;
2006         path->leave_spinning = 1;
2007         /* this will setup the path even if it fails to insert the back ref */
2008         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2009                                            bytenr, num_bytes, parent,
2010                                            root_objectid, owner, offset,
2011                                            refs_to_add, extent_op);
2012         if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
2013                 goto out;
2014         /*
2015          * Ok we were able to insert an inline extent and it appears to be a new
2016          * reference, deal with the qgroup accounting.
2017          */
2018         if (!ret && !no_quota) {
2019                 ASSERT(root->fs_info->quota_enabled);
2020                 leaf = path->nodes[0];
2021                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2022                 item = btrfs_item_ptr(leaf, path->slots[0],
2023                                       struct btrfs_extent_item);
2024                 if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
2025                         type = BTRFS_QGROUP_OPER_ADD_SHARED;
2026                 btrfs_release_path(path);
2027
2028                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2029                                               bytenr, num_bytes, type, 0);
2030                 goto out;
2031         }
2032
2033         /*
2034          * Ok we had -EAGAIN which means we didn't have space to insert and
2035          * inline extent ref, so just update the reference count and add a
2036          * normal backref.
2037          */
2038         leaf = path->nodes[0];
2039         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2040         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2041         refs = btrfs_extent_refs(leaf, item);
2042         if (refs)
2043                 type = BTRFS_QGROUP_OPER_ADD_SHARED;
2044         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2045         if (extent_op)
2046                 __run_delayed_extent_op(extent_op, leaf, item);
2047
2048         btrfs_mark_buffer_dirty(leaf);
2049         btrfs_release_path(path);
2050
2051         if (!no_quota) {
2052                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2053                                               bytenr, num_bytes, type, 0);
2054                 if (ret)
2055                         goto out;
2056         }
2057
2058         path->reada = 1;
2059         path->leave_spinning = 1;
2060         /* now insert the actual backref */
2061         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2062                                     path, bytenr, parent, root_objectid,
2063                                     owner, offset, refs_to_add);
2064         if (ret)
2065                 btrfs_abort_transaction(trans, root, ret);
2066 out:
2067         btrfs_free_path(path);
2068         return ret;
2069 }
2070
2071 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2072                                 struct btrfs_root *root,
2073                                 struct btrfs_delayed_ref_node *node,
2074                                 struct btrfs_delayed_extent_op *extent_op,
2075                                 int insert_reserved)
2076 {
2077         int ret = 0;
2078         struct btrfs_delayed_data_ref *ref;
2079         struct btrfs_key ins;
2080         u64 parent = 0;
2081         u64 ref_root = 0;
2082         u64 flags = 0;
2083
2084         ins.objectid = node->bytenr;
2085         ins.offset = node->num_bytes;
2086         ins.type = BTRFS_EXTENT_ITEM_KEY;
2087
2088         ref = btrfs_delayed_node_to_data_ref(node);
2089         trace_run_delayed_data_ref(node, ref, node->action);
2090
2091         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2092                 parent = ref->parent;
2093         ref_root = ref->root;
2094
2095         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2096                 if (extent_op)
2097                         flags |= extent_op->flags_to_set;
2098                 ret = alloc_reserved_file_extent(trans, root,
2099                                                  parent, ref_root, flags,
2100                                                  ref->objectid, ref->offset,
2101                                                  &ins, node->ref_mod);
2102         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2103                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2104                                              node->num_bytes, parent,
2105                                              ref_root, ref->objectid,
2106                                              ref->offset, node->ref_mod,
2107                                              node->no_quota, extent_op);
2108         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2109                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2110                                           node->num_bytes, parent,
2111                                           ref_root, ref->objectid,
2112                                           ref->offset, node->ref_mod,
2113                                           extent_op, node->no_quota);
2114         } else {
2115                 BUG();
2116         }
2117         return ret;
2118 }
2119
2120 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2121                                     struct extent_buffer *leaf,
2122                                     struct btrfs_extent_item *ei)
2123 {
2124         u64 flags = btrfs_extent_flags(leaf, ei);
2125         if (extent_op->update_flags) {
2126                 flags |= extent_op->flags_to_set;
2127                 btrfs_set_extent_flags(leaf, ei, flags);
2128         }
2129
2130         if (extent_op->update_key) {
2131                 struct btrfs_tree_block_info *bi;
2132                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2133                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2134                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2135         }
2136 }
2137
2138 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2139                                  struct btrfs_root *root,
2140                                  struct btrfs_delayed_ref_node *node,
2141                                  struct btrfs_delayed_extent_op *extent_op)
2142 {
2143         struct btrfs_key key;
2144         struct btrfs_path *path;
2145         struct btrfs_extent_item *ei;
2146         struct extent_buffer *leaf;
2147         u32 item_size;
2148         int ret;
2149         int err = 0;
2150         int metadata = !extent_op->is_data;
2151
2152         if (trans->aborted)
2153                 return 0;
2154
2155         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2156                 metadata = 0;
2157
2158         path = btrfs_alloc_path();
2159         if (!path)
2160                 return -ENOMEM;
2161
2162         key.objectid = node->bytenr;
2163
2164         if (metadata) {
2165                 key.type = BTRFS_METADATA_ITEM_KEY;
2166                 key.offset = extent_op->level;
2167         } else {
2168                 key.type = BTRFS_EXTENT_ITEM_KEY;
2169                 key.offset = node->num_bytes;
2170         }
2171
2172 again:
2173         path->reada = 1;
2174         path->leave_spinning = 1;
2175         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2176                                 path, 0, 1);
2177         if (ret < 0) {
2178                 err = ret;
2179                 goto out;
2180         }
2181         if (ret > 0) {
2182                 if (metadata) {
2183                         if (path->slots[0] > 0) {
2184                                 path->slots[0]--;
2185                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2186                                                       path->slots[0]);
2187                                 if (key.objectid == node->bytenr &&
2188                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2189                                     key.offset == node->num_bytes)
2190                                         ret = 0;
2191                         }
2192                         if (ret > 0) {
2193                                 btrfs_release_path(path);
2194                                 metadata = 0;
2195
2196                                 key.objectid = node->bytenr;
2197                                 key.offset = node->num_bytes;
2198                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2199                                 goto again;
2200                         }
2201                 } else {
2202                         err = -EIO;
2203                         goto out;
2204                 }
2205         }
2206
2207         leaf = path->nodes[0];
2208         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2209 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2210         if (item_size < sizeof(*ei)) {
2211                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2212                                              path, (u64)-1, 0);
2213                 if (ret < 0) {
2214                         err = ret;
2215                         goto out;
2216                 }
2217                 leaf = path->nodes[0];
2218                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2219         }
2220 #endif
2221         BUG_ON(item_size < sizeof(*ei));
2222         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2223         __run_delayed_extent_op(extent_op, leaf, ei);
2224
2225         btrfs_mark_buffer_dirty(leaf);
2226 out:
2227         btrfs_free_path(path);
2228         return err;
2229 }
2230
2231 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2232                                 struct btrfs_root *root,
2233                                 struct btrfs_delayed_ref_node *node,
2234                                 struct btrfs_delayed_extent_op *extent_op,
2235                                 int insert_reserved)
2236 {
2237         int ret = 0;
2238         struct btrfs_delayed_tree_ref *ref;
2239         struct btrfs_key ins;
2240         u64 parent = 0;
2241         u64 ref_root = 0;
2242         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2243                                                  SKINNY_METADATA);
2244
2245         ref = btrfs_delayed_node_to_tree_ref(node);
2246         trace_run_delayed_tree_ref(node, ref, node->action);
2247
2248         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2249                 parent = ref->parent;
2250         ref_root = ref->root;
2251
2252         ins.objectid = node->bytenr;
2253         if (skinny_metadata) {
2254                 ins.offset = ref->level;
2255                 ins.type = BTRFS_METADATA_ITEM_KEY;
2256         } else {
2257                 ins.offset = node->num_bytes;
2258                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2259         }
2260
2261         BUG_ON(node->ref_mod != 1);
2262         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2263                 BUG_ON(!extent_op || !extent_op->update_flags);
2264                 ret = alloc_reserved_tree_block(trans, root,
2265                                                 parent, ref_root,
2266                                                 extent_op->flags_to_set,
2267                                                 &extent_op->key,
2268                                                 ref->level, &ins,
2269                                                 node->no_quota);
2270         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2271                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2272                                              node->num_bytes, parent, ref_root,
2273                                              ref->level, 0, 1, node->no_quota,
2274                                              extent_op);
2275         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2276                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2277                                           node->num_bytes, parent, ref_root,
2278                                           ref->level, 0, 1, extent_op,
2279                                           node->no_quota);
2280         } else {
2281                 BUG();
2282         }
2283         return ret;
2284 }
2285
2286 /* helper function to actually process a single delayed ref entry */
2287 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2288                                struct btrfs_root *root,
2289                                struct btrfs_delayed_ref_node *node,
2290                                struct btrfs_delayed_extent_op *extent_op,
2291                                int insert_reserved)
2292 {
2293         int ret = 0;
2294
2295         if (trans->aborted) {
2296                 if (insert_reserved)
2297                         btrfs_pin_extent(root, node->bytenr,
2298                                          node->num_bytes, 1);
2299                 return 0;
2300         }
2301
2302         if (btrfs_delayed_ref_is_head(node)) {
2303                 struct btrfs_delayed_ref_head *head;
2304                 /*
2305                  * we've hit the end of the chain and we were supposed
2306                  * to insert this extent into the tree.  But, it got
2307                  * deleted before we ever needed to insert it, so all
2308                  * we have to do is clean up the accounting
2309                  */
2310                 BUG_ON(extent_op);
2311                 head = btrfs_delayed_node_to_head(node);
2312                 trace_run_delayed_ref_head(node, head, node->action);
2313
2314                 if (insert_reserved) {
2315                         btrfs_pin_extent(root, node->bytenr,
2316                                          node->num_bytes, 1);
2317                         if (head->is_data) {
2318                                 ret = btrfs_del_csums(trans, root,
2319                                                       node->bytenr,
2320                                                       node->num_bytes);
2321                         }
2322                 }
2323                 return ret;
2324         }
2325
2326         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2327             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2328                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2329                                            insert_reserved);
2330         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2331                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2332                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2333                                            insert_reserved);
2334         else
2335                 BUG();
2336         return ret;
2337 }
2338
2339 static noinline struct btrfs_delayed_ref_node *
2340 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2341 {
2342         struct rb_node *node;
2343         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2344
2345         /*
2346          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2347          * this prevents ref count from going down to zero when
2348          * there still are pending delayed ref.
2349          */
2350         node = rb_first(&head->ref_root);
2351         while (node) {
2352                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2353                                 rb_node);
2354                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2355                         return ref;
2356                 else if (last == NULL)
2357                         last = ref;
2358                 node = rb_next(node);
2359         }
2360         return last;
2361 }
2362
2363 /*
2364  * Returns 0 on success or if called with an already aborted transaction.
2365  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2366  */
2367 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2368                                              struct btrfs_root *root,
2369                                              unsigned long nr)
2370 {
2371         struct btrfs_delayed_ref_root *delayed_refs;
2372         struct btrfs_delayed_ref_node *ref;
2373         struct btrfs_delayed_ref_head *locked_ref = NULL;
2374         struct btrfs_delayed_extent_op *extent_op;
2375         struct btrfs_fs_info *fs_info = root->fs_info;
2376         ktime_t start = ktime_get();
2377         int ret;
2378         unsigned long count = 0;
2379         unsigned long actual_count = 0;
2380         int must_insert_reserved = 0;
2381
2382         delayed_refs = &trans->transaction->delayed_refs;
2383         while (1) {
2384                 if (!locked_ref) {
2385                         if (count >= nr)
2386                                 break;
2387
2388                         spin_lock(&delayed_refs->lock);
2389                         locked_ref = btrfs_select_ref_head(trans);
2390                         if (!locked_ref) {
2391                                 spin_unlock(&delayed_refs->lock);
2392                                 break;
2393                         }
2394
2395                         /* grab the lock that says we are going to process
2396                          * all the refs for this head */
2397                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2398                         spin_unlock(&delayed_refs->lock);
2399                         /*
2400                          * we may have dropped the spin lock to get the head
2401                          * mutex lock, and that might have given someone else
2402                          * time to free the head.  If that's true, it has been
2403                          * removed from our list and we can move on.
2404                          */
2405                         if (ret == -EAGAIN) {
2406                                 locked_ref = NULL;
2407                                 count++;
2408                                 continue;
2409                         }
2410                 }
2411
2412                 /*
2413                  * We need to try and merge add/drops of the same ref since we
2414                  * can run into issues with relocate dropping the implicit ref
2415                  * and then it being added back again before the drop can
2416                  * finish.  If we merged anything we need to re-loop so we can
2417                  * get a good ref.
2418                  */
2419                 spin_lock(&locked_ref->lock);
2420                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2421                                          locked_ref);
2422
2423                 /*
2424                  * locked_ref is the head node, so we have to go one
2425                  * node back for any delayed ref updates
2426                  */
2427                 ref = select_delayed_ref(locked_ref);
2428
2429                 if (ref && ref->seq &&
2430                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2431                         spin_unlock(&locked_ref->lock);
2432                         btrfs_delayed_ref_unlock(locked_ref);
2433                         spin_lock(&delayed_refs->lock);
2434                         locked_ref->processing = 0;
2435                         delayed_refs->num_heads_ready++;
2436                         spin_unlock(&delayed_refs->lock);
2437                         locked_ref = NULL;
2438                         cond_resched();
2439                         count++;
2440                         continue;
2441                 }
2442
2443                 /*
2444                  * record the must insert reserved flag before we
2445                  * drop the spin lock.
2446                  */
2447                 must_insert_reserved = locked_ref->must_insert_reserved;
2448                 locked_ref->must_insert_reserved = 0;
2449
2450                 extent_op = locked_ref->extent_op;
2451                 locked_ref->extent_op = NULL;
2452
2453                 if (!ref) {
2454
2455
2456                         /* All delayed refs have been processed, Go ahead
2457                          * and send the head node to run_one_delayed_ref,
2458                          * so that any accounting fixes can happen
2459                          */
2460                         ref = &locked_ref->node;
2461
2462                         if (extent_op && must_insert_reserved) {
2463                                 btrfs_free_delayed_extent_op(extent_op);
2464                                 extent_op = NULL;
2465                         }
2466
2467                         if (extent_op) {
2468                                 spin_unlock(&locked_ref->lock);
2469                                 ret = run_delayed_extent_op(trans, root,
2470                                                             ref, extent_op);
2471                                 btrfs_free_delayed_extent_op(extent_op);
2472
2473                                 if (ret) {
2474                                         /*
2475                                          * Need to reset must_insert_reserved if
2476                                          * there was an error so the abort stuff
2477                                          * can cleanup the reserved space
2478                                          * properly.
2479                                          */
2480                                         if (must_insert_reserved)
2481                                                 locked_ref->must_insert_reserved = 1;
2482                                         locked_ref->processing = 0;
2483                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2484                                         btrfs_delayed_ref_unlock(locked_ref);
2485                                         return ret;
2486                                 }
2487                                 continue;
2488                         }
2489
2490                         /*
2491                          * Need to drop our head ref lock and re-aqcuire the
2492                          * delayed ref lock and then re-check to make sure
2493                          * nobody got added.
2494                          */
2495                         spin_unlock(&locked_ref->lock);
2496                         spin_lock(&delayed_refs->lock);
2497                         spin_lock(&locked_ref->lock);
2498                         if (rb_first(&locked_ref->ref_root) ||
2499                             locked_ref->extent_op) {
2500                                 spin_unlock(&locked_ref->lock);
2501                                 spin_unlock(&delayed_refs->lock);
2502                                 continue;
2503                         }
2504                         ref->in_tree = 0;
2505                         delayed_refs->num_heads--;
2506                         rb_erase(&locked_ref->href_node,
2507                                  &delayed_refs->href_root);
2508                         spin_unlock(&delayed_refs->lock);
2509                 } else {
2510                         actual_count++;
2511                         ref->in_tree = 0;
2512                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2513                 }
2514                 atomic_dec(&delayed_refs->num_entries);
2515
2516                 if (!btrfs_delayed_ref_is_head(ref)) {
2517                         /*
2518                          * when we play the delayed ref, also correct the
2519                          * ref_mod on head
2520                          */
2521                         switch (ref->action) {
2522                         case BTRFS_ADD_DELAYED_REF:
2523                         case BTRFS_ADD_DELAYED_EXTENT:
2524                                 locked_ref->node.ref_mod -= ref->ref_mod;
2525                                 break;
2526                         case BTRFS_DROP_DELAYED_REF:
2527                                 locked_ref->node.ref_mod += ref->ref_mod;
2528                                 break;
2529                         default:
2530                                 WARN_ON(1);
2531                         }
2532                 }
2533                 spin_unlock(&locked_ref->lock);
2534
2535                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2536                                           must_insert_reserved);
2537
2538                 btrfs_free_delayed_extent_op(extent_op);
2539                 if (ret) {
2540                         locked_ref->processing = 0;
2541                         btrfs_delayed_ref_unlock(locked_ref);
2542                         btrfs_put_delayed_ref(ref);
2543                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2544                         return ret;
2545                 }
2546
2547                 /*
2548                  * If this node is a head, that means all the refs in this head
2549                  * have been dealt with, and we will pick the next head to deal
2550                  * with, so we must unlock the head and drop it from the cluster
2551                  * list before we release it.
2552                  */
2553                 if (btrfs_delayed_ref_is_head(ref)) {
2554                         btrfs_delayed_ref_unlock(locked_ref);
2555                         locked_ref = NULL;
2556                 }
2557                 btrfs_put_delayed_ref(ref);
2558                 count++;
2559                 cond_resched();
2560         }
2561
2562         /*
2563          * We don't want to include ref heads since we can have empty ref heads
2564          * and those will drastically skew our runtime down since we just do
2565          * accounting, no actual extent tree updates.
2566          */
2567         if (actual_count > 0) {
2568                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2569                 u64 avg;
2570
2571                 /*
2572                  * We weigh the current average higher than our current runtime
2573                  * to avoid large swings in the average.
2574                  */
2575                 spin_lock(&delayed_refs->lock);
2576                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2577                 avg = div64_u64(avg, 4);
2578                 fs_info->avg_delayed_ref_runtime = avg;
2579                 spin_unlock(&delayed_refs->lock);
2580         }
2581         return 0;
2582 }
2583
2584 #ifdef SCRAMBLE_DELAYED_REFS
2585 /*
2586  * Normally delayed refs get processed in ascending bytenr order. This
2587  * correlates in most cases to the order added. To expose dependencies on this
2588  * order, we start to process the tree in the middle instead of the beginning
2589  */
2590 static u64 find_middle(struct rb_root *root)
2591 {
2592         struct rb_node *n = root->rb_node;
2593         struct btrfs_delayed_ref_node *entry;
2594         int alt = 1;
2595         u64 middle;
2596         u64 first = 0, last = 0;
2597
2598         n = rb_first(root);
2599         if (n) {
2600                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2601                 first = entry->bytenr;
2602         }
2603         n = rb_last(root);
2604         if (n) {
2605                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2606                 last = entry->bytenr;
2607         }
2608         n = root->rb_node;
2609
2610         while (n) {
2611                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2612                 WARN_ON(!entry->in_tree);
2613
2614                 middle = entry->bytenr;
2615
2616                 if (alt)
2617                         n = n->rb_left;
2618                 else
2619                         n = n->rb_right;
2620
2621                 alt = 1 - alt;
2622         }
2623         return middle;
2624 }
2625 #endif
2626
2627 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2628 {
2629         u64 num_bytes;
2630
2631         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2632                              sizeof(struct btrfs_extent_inline_ref));
2633         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2634                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2635
2636         /*
2637          * We don't ever fill up leaves all the way so multiply by 2 just to be
2638          * closer to what we're really going to want to ouse.
2639          */
2640         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2641 }
2642
2643 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2644                                        struct btrfs_root *root)
2645 {
2646         struct btrfs_block_rsv *global_rsv;
2647         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2648         u64 num_bytes;
2649         int ret = 0;
2650
2651         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2652         num_heads = heads_to_leaves(root, num_heads);
2653         if (num_heads > 1)
2654                 num_bytes += (num_heads - 1) * root->nodesize;
2655         num_bytes <<= 1;
2656         global_rsv = &root->fs_info->global_block_rsv;
2657
2658         /*
2659          * If we can't allocate any more chunks lets make sure we have _lots_ of
2660          * wiggle room since running delayed refs can create more delayed refs.
2661          */
2662         if (global_rsv->space_info->full)
2663                 num_bytes <<= 1;
2664
2665         spin_lock(&global_rsv->lock);
2666         if (global_rsv->reserved <= num_bytes)
2667                 ret = 1;
2668         spin_unlock(&global_rsv->lock);
2669         return ret;
2670 }
2671
2672 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2673                                        struct btrfs_root *root)
2674 {
2675         struct btrfs_fs_info *fs_info = root->fs_info;
2676         u64 num_entries =
2677                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2678         u64 avg_runtime;
2679         u64 val;
2680
2681         smp_mb();
2682         avg_runtime = fs_info->avg_delayed_ref_runtime;
2683         val = num_entries * avg_runtime;
2684         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2685                 return 1;
2686         if (val >= NSEC_PER_SEC / 2)
2687                 return 2;
2688
2689         return btrfs_check_space_for_delayed_refs(trans, root);
2690 }
2691
2692 struct async_delayed_refs {
2693         struct btrfs_root *root;
2694         int count;
2695         int error;
2696         int sync;
2697         struct completion wait;
2698         struct btrfs_work work;
2699 };
2700
2701 static void delayed_ref_async_start(struct btrfs_work *work)
2702 {
2703         struct async_delayed_refs *async;
2704         struct btrfs_trans_handle *trans;
2705         int ret;
2706
2707         async = container_of(work, struct async_delayed_refs, work);
2708
2709         trans = btrfs_join_transaction(async->root);
2710         if (IS_ERR(trans)) {
2711                 async->error = PTR_ERR(trans);
2712                 goto done;
2713         }
2714
2715         /*
2716          * trans->sync means that when we call end_transaciton, we won't
2717          * wait on delayed refs
2718          */
2719         trans->sync = true;
2720         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2721         if (ret)
2722                 async->error = ret;
2723
2724         ret = btrfs_end_transaction(trans, async->root);
2725         if (ret && !async->error)
2726                 async->error = ret;
2727 done:
2728         if (async->sync)
2729                 complete(&async->wait);
2730         else
2731                 kfree(async);
2732 }
2733
2734 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2735                                  unsigned long count, int wait)
2736 {
2737         struct async_delayed_refs *async;
2738         int ret;
2739
2740         async = kmalloc(sizeof(*async), GFP_NOFS);
2741         if (!async)
2742                 return -ENOMEM;
2743
2744         async->root = root->fs_info->tree_root;
2745         async->count = count;
2746         async->error = 0;
2747         if (wait)
2748                 async->sync = 1;
2749         else
2750                 async->sync = 0;
2751         init_completion(&async->wait);
2752
2753         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2754                         delayed_ref_async_start, NULL, NULL);
2755
2756         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2757
2758         if (wait) {
2759                 wait_for_completion(&async->wait);
2760                 ret = async->error;
2761                 kfree(async);
2762                 return ret;
2763         }
2764         return 0;
2765 }
2766
2767 /*
2768  * this starts processing the delayed reference count updates and
2769  * extent insertions we have queued up so far.  count can be
2770  * 0, which means to process everything in the tree at the start
2771  * of the run (but not newly added entries), or it can be some target
2772  * number you'd like to process.
2773  *
2774  * Returns 0 on success or if called with an aborted transaction
2775  * Returns <0 on error and aborts the transaction
2776  */
2777 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2778                            struct btrfs_root *root, unsigned long count)
2779 {
2780         struct rb_node *node;
2781         struct btrfs_delayed_ref_root *delayed_refs;
2782         struct btrfs_delayed_ref_head *head;
2783         int ret;
2784         int run_all = count == (unsigned long)-1;
2785         int run_most = 0;
2786
2787         /* We'll clean this up in btrfs_cleanup_transaction */
2788         if (trans->aborted)
2789                 return 0;
2790
2791         if (root == root->fs_info->extent_root)
2792                 root = root->fs_info->tree_root;
2793
2794         delayed_refs = &trans->transaction->delayed_refs;
2795         if (count == 0) {
2796                 count = atomic_read(&delayed_refs->num_entries) * 2;
2797                 run_most = 1;
2798         }
2799
2800 again:
2801 #ifdef SCRAMBLE_DELAYED_REFS
2802         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2803 #endif
2804         ret = __btrfs_run_delayed_refs(trans, root, count);
2805         if (ret < 0) {
2806                 btrfs_abort_transaction(trans, root, ret);
2807                 return ret;
2808         }
2809
2810         if (run_all) {
2811                 if (!list_empty(&trans->new_bgs))
2812                         btrfs_create_pending_block_groups(trans, root);
2813
2814                 spin_lock(&delayed_refs->lock);
2815                 node = rb_first(&delayed_refs->href_root);
2816                 if (!node) {
2817                         spin_unlock(&delayed_refs->lock);
2818                         goto out;
2819                 }
2820                 count = (unsigned long)-1;
2821
2822                 while (node) {
2823                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2824                                         href_node);
2825                         if (btrfs_delayed_ref_is_head(&head->node)) {
2826                                 struct btrfs_delayed_ref_node *ref;
2827
2828                                 ref = &head->node;
2829                                 atomic_inc(&ref->refs);
2830
2831                                 spin_unlock(&delayed_refs->lock);
2832                                 /*
2833                                  * Mutex was contended, block until it's
2834                                  * released and try again
2835                                  */
2836                                 mutex_lock(&head->mutex);
2837                                 mutex_unlock(&head->mutex);
2838
2839                                 btrfs_put_delayed_ref(ref);
2840                                 cond_resched();
2841                                 goto again;
2842                         } else {
2843                                 WARN_ON(1);
2844                         }
2845                         node = rb_next(node);
2846                 }
2847                 spin_unlock(&delayed_refs->lock);
2848                 cond_resched();
2849                 goto again;
2850         }
2851 out:
2852         ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
2853         if (ret)
2854                 return ret;
2855         assert_qgroups_uptodate(trans);
2856         return 0;
2857 }
2858
2859 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2860                                 struct btrfs_root *root,
2861                                 u64 bytenr, u64 num_bytes, u64 flags,
2862                                 int level, int is_data)
2863 {
2864         struct btrfs_delayed_extent_op *extent_op;
2865         int ret;
2866
2867         extent_op = btrfs_alloc_delayed_extent_op();
2868         if (!extent_op)
2869                 return -ENOMEM;
2870
2871         extent_op->flags_to_set = flags;
2872         extent_op->update_flags = 1;
2873         extent_op->update_key = 0;
2874         extent_op->is_data = is_data ? 1 : 0;
2875         extent_op->level = level;
2876
2877         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2878                                           num_bytes, extent_op);
2879         if (ret)
2880                 btrfs_free_delayed_extent_op(extent_op);
2881         return ret;
2882 }
2883
2884 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2885                                       struct btrfs_root *root,
2886                                       struct btrfs_path *path,
2887                                       u64 objectid, u64 offset, u64 bytenr)
2888 {
2889         struct btrfs_delayed_ref_head *head;
2890         struct btrfs_delayed_ref_node *ref;
2891         struct btrfs_delayed_data_ref *data_ref;
2892         struct btrfs_delayed_ref_root *delayed_refs;
2893         struct rb_node *node;
2894         int ret = 0;
2895
2896         delayed_refs = &trans->transaction->delayed_refs;
2897         spin_lock(&delayed_refs->lock);
2898         head = btrfs_find_delayed_ref_head(trans, bytenr);
2899         if (!head) {
2900                 spin_unlock(&delayed_refs->lock);
2901                 return 0;
2902         }
2903
2904         if (!mutex_trylock(&head->mutex)) {
2905                 atomic_inc(&head->node.refs);
2906                 spin_unlock(&delayed_refs->lock);
2907
2908                 btrfs_release_path(path);
2909
2910                 /*
2911                  * Mutex was contended, block until it's released and let
2912                  * caller try again
2913                  */
2914                 mutex_lock(&head->mutex);
2915                 mutex_unlock(&head->mutex);
2916                 btrfs_put_delayed_ref(&head->node);
2917                 return -EAGAIN;
2918         }
2919         spin_unlock(&delayed_refs->lock);
2920
2921         spin_lock(&head->lock);
2922         node = rb_first(&head->ref_root);
2923         while (node) {
2924                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2925                 node = rb_next(node);
2926
2927                 /* If it's a shared ref we know a cross reference exists */
2928                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2929                         ret = 1;
2930                         break;
2931                 }
2932
2933                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2934
2935                 /*
2936                  * If our ref doesn't match the one we're currently looking at
2937                  * then we have a cross reference.
2938                  */
2939                 if (data_ref->root != root->root_key.objectid ||
2940                     data_ref->objectid != objectid ||
2941                     data_ref->offset != offset) {
2942                         ret = 1;
2943                         break;
2944                 }
2945         }
2946         spin_unlock(&head->lock);
2947         mutex_unlock(&head->mutex);
2948         return ret;
2949 }
2950
2951 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2952                                         struct btrfs_root *root,
2953                                         struct btrfs_path *path,
2954                                         u64 objectid, u64 offset, u64 bytenr)
2955 {
2956         struct btrfs_root *extent_root = root->fs_info->extent_root;
2957         struct extent_buffer *leaf;
2958         struct btrfs_extent_data_ref *ref;
2959         struct btrfs_extent_inline_ref *iref;
2960         struct btrfs_extent_item *ei;
2961         struct btrfs_key key;
2962         u32 item_size;
2963         int ret;
2964
2965         key.objectid = bytenr;
2966         key.offset = (u64)-1;
2967         key.type = BTRFS_EXTENT_ITEM_KEY;
2968
2969         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2970         if (ret < 0)
2971                 goto out;
2972         BUG_ON(ret == 0); /* Corruption */
2973
2974         ret = -ENOENT;
2975         if (path->slots[0] == 0)
2976                 goto out;
2977
2978         path->slots[0]--;
2979         leaf = path->nodes[0];
2980         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2981
2982         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2983                 goto out;
2984
2985         ret = 1;
2986         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2987 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2988         if (item_size < sizeof(*ei)) {
2989                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2990                 goto out;
2991         }
2992 #endif
2993         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2994
2995         if (item_size != sizeof(*ei) +
2996             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2997                 goto out;
2998
2999         if (btrfs_extent_generation(leaf, ei) <=
3000             btrfs_root_last_snapshot(&root->root_item))
3001                 goto out;
3002
3003         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3004         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3005             BTRFS_EXTENT_DATA_REF_KEY)
3006                 goto out;
3007
3008         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3009         if (btrfs_extent_refs(leaf, ei) !=
3010             btrfs_extent_data_ref_count(leaf, ref) ||
3011             btrfs_extent_data_ref_root(leaf, ref) !=
3012             root->root_key.objectid ||
3013             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3014             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3015                 goto out;
3016
3017         ret = 0;
3018 out:
3019         return ret;
3020 }
3021
3022 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3023                           struct btrfs_root *root,
3024                           u64 objectid, u64 offset, u64 bytenr)
3025 {
3026         struct btrfs_path *path;
3027         int ret;
3028         int ret2;
3029
3030         path = btrfs_alloc_path();
3031         if (!path)
3032                 return -ENOENT;
3033
3034         do {
3035                 ret = check_committed_ref(trans, root, path, objectid,
3036                                           offset, bytenr);
3037                 if (ret && ret != -ENOENT)
3038                         goto out;
3039
3040                 ret2 = check_delayed_ref(trans, root, path, objectid,
3041                                          offset, bytenr);
3042         } while (ret2 == -EAGAIN);
3043
3044         if (ret2 && ret2 != -ENOENT) {
3045                 ret = ret2;
3046                 goto out;
3047         }
3048
3049         if (ret != -ENOENT || ret2 != -ENOENT)
3050                 ret = 0;
3051 out:
3052         btrfs_free_path(path);
3053         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3054                 WARN_ON(ret > 0);
3055         return ret;
3056 }
3057
3058 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3059                            struct btrfs_root *root,
3060                            struct extent_buffer *buf,
3061                            int full_backref, int inc)
3062 {
3063         u64 bytenr;
3064         u64 num_bytes;
3065         u64 parent;
3066         u64 ref_root;
3067         u32 nritems;
3068         struct btrfs_key key;
3069         struct btrfs_file_extent_item *fi;
3070         int i;
3071         int level;
3072         int ret = 0;
3073         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3074                             u64, u64, u64, u64, u64, u64, int);
3075
3076
3077         if (btrfs_test_is_dummy_root(root))
3078                 return 0;
3079
3080         ref_root = btrfs_header_owner(buf);
3081         nritems = btrfs_header_nritems(buf);
3082         level = btrfs_header_level(buf);
3083
3084         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3085                 return 0;
3086
3087         if (inc)
3088                 process_func = btrfs_inc_extent_ref;
3089         else
3090                 process_func = btrfs_free_extent;
3091
3092         if (full_backref)
3093                 parent = buf->start;
3094         else
3095                 parent = 0;
3096
3097         for (i = 0; i < nritems; i++) {
3098                 if (level == 0) {
3099                         btrfs_item_key_to_cpu(buf, &key, i);
3100                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3101                                 continue;
3102                         fi = btrfs_item_ptr(buf, i,
3103                                             struct btrfs_file_extent_item);
3104                         if (btrfs_file_extent_type(buf, fi) ==
3105                             BTRFS_FILE_EXTENT_INLINE)
3106                                 continue;
3107                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3108                         if (bytenr == 0)
3109                                 continue;
3110
3111                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3112                         key.offset -= btrfs_file_extent_offset(buf, fi);
3113                         ret = process_func(trans, root, bytenr, num_bytes,
3114                                            parent, ref_root, key.objectid,
3115                                            key.offset, 1);
3116                         if (ret)
3117                                 goto fail;
3118                 } else {
3119                         bytenr = btrfs_node_blockptr(buf, i);
3120                         num_bytes = root->nodesize;
3121                         ret = process_func(trans, root, bytenr, num_bytes,
3122                                            parent, ref_root, level - 1, 0,
3123                                            1);
3124                         if (ret)
3125                                 goto fail;
3126                 }
3127         }
3128         return 0;
3129 fail:
3130         return ret;
3131 }
3132
3133 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3134                   struct extent_buffer *buf, int full_backref)
3135 {
3136         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3137 }
3138
3139 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3140                   struct extent_buffer *buf, int full_backref)
3141 {
3142         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3143 }
3144
3145 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3146                                  struct btrfs_root *root,
3147                                  struct btrfs_path *path,
3148                                  struct btrfs_block_group_cache *cache)
3149 {
3150         int ret;
3151         struct btrfs_root *extent_root = root->fs_info->extent_root;
3152         unsigned long bi;
3153         struct extent_buffer *leaf;
3154
3155         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3156         if (ret < 0)
3157                 goto fail;
3158         BUG_ON(ret); /* Corruption */
3159
3160         leaf = path->nodes[0];
3161         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3162         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3163         btrfs_mark_buffer_dirty(leaf);
3164         btrfs_release_path(path);
3165 fail:
3166         if (ret) {
3167                 btrfs_abort_transaction(trans, root, ret);
3168                 return ret;
3169         }
3170         return 0;
3171
3172 }
3173
3174 static struct btrfs_block_group_cache *
3175 next_block_group(struct btrfs_root *root,
3176                  struct btrfs_block_group_cache *cache)
3177 {
3178         struct rb_node *node;
3179         spin_lock(&root->fs_info->block_group_cache_lock);
3180         node = rb_next(&cache->cache_node);
3181         btrfs_put_block_group(cache);
3182         if (node) {
3183                 cache = rb_entry(node, struct btrfs_block_group_cache,
3184                                  cache_node);
3185                 btrfs_get_block_group(cache);
3186         } else
3187                 cache = NULL;
3188         spin_unlock(&root->fs_info->block_group_cache_lock);
3189         return cache;
3190 }
3191
3192 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3193                             struct btrfs_trans_handle *trans,
3194                             struct btrfs_path *path)
3195 {
3196         struct btrfs_root *root = block_group->fs_info->tree_root;
3197         struct inode *inode = NULL;
3198         u64 alloc_hint = 0;
3199         int dcs = BTRFS_DC_ERROR;
3200         int num_pages = 0;
3201         int retries = 0;
3202         int ret = 0;
3203
3204         /*
3205          * If this block group is smaller than 100 megs don't bother caching the
3206          * block group.
3207          */
3208         if (block_group->key.offset < (100 * 1024 * 1024)) {
3209                 spin_lock(&block_group->lock);
3210                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3211                 spin_unlock(&block_group->lock);
3212                 return 0;
3213         }
3214
3215 again:
3216         inode = lookup_free_space_inode(root, block_group, path);
3217         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3218                 ret = PTR_ERR(inode);
3219                 btrfs_release_path(path);
3220                 goto out;
3221         }
3222
3223         if (IS_ERR(inode)) {
3224                 BUG_ON(retries);
3225                 retries++;
3226
3227                 if (block_group->ro)
3228                         goto out_free;
3229
3230                 ret = create_free_space_inode(root, trans, block_group, path);
3231                 if (ret)
3232                         goto out_free;
3233                 goto again;
3234         }
3235
3236         /* We've already setup this transaction, go ahead and exit */
3237         if (block_group->cache_generation == trans->transid &&
3238             i_size_read(inode)) {
3239                 dcs = BTRFS_DC_SETUP;
3240                 goto out_put;
3241         }
3242
3243         /*
3244          * We want to set the generation to 0, that way if anything goes wrong
3245          * from here on out we know not to trust this cache when we load up next
3246          * time.
3247          */
3248         BTRFS_I(inode)->generation = 0;
3249         ret = btrfs_update_inode(trans, root, inode);
3250         WARN_ON(ret);
3251
3252         if (i_size_read(inode) > 0) {
3253                 ret = btrfs_check_trunc_cache_free_space(root,
3254                                         &root->fs_info->global_block_rsv);
3255                 if (ret)
3256                         goto out_put;
3257
3258                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3259                 if (ret)
3260                         goto out_put;
3261         }
3262
3263         spin_lock(&block_group->lock);
3264         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3265             !btrfs_test_opt(root, SPACE_CACHE) ||
3266             block_group->delalloc_bytes) {
3267                 /*
3268                  * don't bother trying to write stuff out _if_
3269                  * a) we're not cached,
3270                  * b) we're with nospace_cache mount option.
3271                  */
3272                 dcs = BTRFS_DC_WRITTEN;
3273                 spin_unlock(&block_group->lock);
3274                 goto out_put;
3275         }
3276         spin_unlock(&block_group->lock);
3277
3278         /*
3279          * Try to preallocate enough space based on how big the block group is.
3280          * Keep in mind this has to include any pinned space which could end up
3281          * taking up quite a bit since it's not folded into the other space
3282          * cache.
3283          */
3284         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3285         if (!num_pages)
3286                 num_pages = 1;
3287
3288         num_pages *= 16;
3289         num_pages *= PAGE_CACHE_SIZE;
3290
3291         ret = btrfs_check_data_free_space(inode, num_pages);
3292         if (ret)
3293                 goto out_put;
3294
3295         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3296                                               num_pages, num_pages,
3297                                               &alloc_hint);
3298         if (!ret)
3299                 dcs = BTRFS_DC_SETUP;
3300         btrfs_free_reserved_data_space(inode, num_pages);
3301
3302 out_put:
3303         iput(inode);
3304 out_free:
3305         btrfs_release_path(path);
3306 out:
3307         spin_lock(&block_group->lock);
3308         if (!ret && dcs == BTRFS_DC_SETUP)
3309                 block_group->cache_generation = trans->transid;
3310         block_group->disk_cache_state = dcs;
3311         spin_unlock(&block_group->lock);
3312
3313         return ret;
3314 }
3315
3316 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3317                                    struct btrfs_root *root)
3318 {
3319         struct btrfs_block_group_cache *cache;
3320         int err = 0;
3321         struct btrfs_path *path;
3322         u64 last = 0;
3323
3324         path = btrfs_alloc_path();
3325         if (!path)
3326                 return -ENOMEM;
3327
3328 again:
3329         while (1) {
3330                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3331                 while (cache) {
3332                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3333                                 break;
3334                         cache = next_block_group(root, cache);
3335                 }
3336                 if (!cache) {
3337                         if (last == 0)
3338                                 break;
3339                         last = 0;
3340                         continue;
3341                 }
3342                 err = cache_save_setup(cache, trans, path);
3343                 last = cache->key.objectid + cache->key.offset;
3344                 btrfs_put_block_group(cache);
3345         }
3346
3347         while (1) {
3348                 if (last == 0) {
3349                         err = btrfs_run_delayed_refs(trans, root,
3350                                                      (unsigned long)-1);
3351                         if (err) /* File system offline */
3352                                 goto out;
3353                 }
3354
3355                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3356                 while (cache) {
3357                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3358                                 btrfs_put_block_group(cache);
3359                                 goto again;
3360                         }
3361
3362                         if (cache->dirty)
3363                                 break;
3364                         cache = next_block_group(root, cache);
3365                 }
3366                 if (!cache) {
3367                         if (last == 0)
3368                                 break;
3369                         last = 0;
3370                         continue;
3371                 }
3372
3373                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3374                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3375                 cache->dirty = 0;
3376                 last = cache->key.objectid + cache->key.offset;
3377
3378                 err = write_one_cache_group(trans, root, path, cache);
3379                 btrfs_put_block_group(cache);
3380                 if (err) /* File system offline */
3381                         goto out;
3382         }
3383
3384         while (1) {
3385                 /*
3386                  * I don't think this is needed since we're just marking our
3387                  * preallocated extent as written, but just in case it can't
3388                  * hurt.
3389                  */
3390                 if (last == 0) {
3391                         err = btrfs_run_delayed_refs(trans, root,
3392                                                      (unsigned long)-1);
3393                         if (err) /* File system offline */
3394                                 goto out;
3395                 }
3396
3397                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3398                 while (cache) {
3399                         /*
3400                          * Really this shouldn't happen, but it could if we
3401                          * couldn't write the entire preallocated extent and
3402                          * splitting the extent resulted in a new block.
3403                          */
3404                         if (cache->dirty) {
3405                                 btrfs_put_block_group(cache);
3406                                 goto again;
3407                         }
3408                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3409                                 break;
3410                         cache = next_block_group(root, cache);
3411                 }
3412                 if (!cache) {
3413                         if (last == 0)
3414                                 break;
3415                         last = 0;
3416                         continue;
3417                 }
3418
3419                 err = btrfs_write_out_cache(root, trans, cache, path);
3420
3421                 /*
3422                  * If we didn't have an error then the cache state is still
3423                  * NEED_WRITE, so we can set it to WRITTEN.
3424                  */
3425                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3426                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3427                 last = cache->key.objectid + cache->key.offset;
3428                 btrfs_put_block_group(cache);
3429         }
3430 out:
3431
3432         btrfs_free_path(path);
3433         return err;
3434 }
3435
3436 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3437 {
3438         struct btrfs_block_group_cache *block_group;
3439         int readonly = 0;
3440
3441         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3442         if (!block_group || block_group->ro)
3443                 readonly = 1;
3444         if (block_group)
3445                 btrfs_put_block_group(block_group);
3446         return readonly;
3447 }
3448
3449 static const char *alloc_name(u64 flags)
3450 {
3451         switch (flags) {
3452         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3453                 return "mixed";
3454         case BTRFS_BLOCK_GROUP_METADATA:
3455                 return "metadata";
3456         case BTRFS_BLOCK_GROUP_DATA:
3457                 return "data";
3458         case BTRFS_BLOCK_GROUP_SYSTEM:
3459                 return "system";
3460         default:
3461                 WARN_ON(1);
3462                 return "invalid-combination";
3463         };
3464 }
3465
3466 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3467                              u64 total_bytes, u64 bytes_used,
3468                              struct btrfs_space_info **space_info)
3469 {
3470         struct btrfs_space_info *found;
3471         int i;
3472         int factor;
3473         int ret;
3474
3475         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3476                      BTRFS_BLOCK_GROUP_RAID10))
3477                 factor = 2;
3478         else
3479                 factor = 1;
3480
3481         found = __find_space_info(info, flags);
3482         if (found) {
3483                 spin_lock(&found->lock);
3484                 found->total_bytes += total_bytes;
3485                 found->disk_total += total_bytes * factor;
3486                 found->bytes_used += bytes_used;
3487                 found->disk_used += bytes_used * factor;
3488                 found->full = 0;
3489                 spin_unlock(&found->lock);
3490                 *space_info = found;
3491                 return 0;
3492         }
3493         found = kzalloc(sizeof(*found), GFP_NOFS);
3494         if (!found)
3495                 return -ENOMEM;
3496
3497         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3498         if (ret) {
3499                 kfree(found);
3500                 return ret;
3501         }
3502
3503         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3504                 INIT_LIST_HEAD(&found->block_groups[i]);
3505         init_rwsem(&found->groups_sem);
3506         spin_lock_init(&found->lock);
3507         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3508         found->total_bytes = total_bytes;
3509         found->disk_total = total_bytes * factor;
3510         found->bytes_used = bytes_used;
3511         found->disk_used = bytes_used * factor;
3512         found->bytes_pinned = 0;
3513         found->bytes_reserved = 0;
3514         found->bytes_readonly = 0;
3515         found->bytes_may_use = 0;
3516         found->full = 0;
3517         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3518         found->chunk_alloc = 0;
3519         found->flush = 0;
3520         init_waitqueue_head(&found->wait);
3521
3522         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3523                                     info->space_info_kobj, "%s",
3524                                     alloc_name(found->flags));
3525         if (ret) {
3526                 kfree(found);
3527                 return ret;
3528         }
3529
3530         *space_info = found;
3531         list_add_rcu(&found->list, &info->space_info);
3532         if (flags & BTRFS_BLOCK_GROUP_DATA)
3533                 info->data_sinfo = found;
3534
3535         return ret;
3536 }
3537
3538 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3539 {
3540         u64 extra_flags = chunk_to_extended(flags) &
3541                                 BTRFS_EXTENDED_PROFILE_MASK;
3542
3543         write_seqlock(&fs_info->profiles_lock);
3544         if (flags & BTRFS_BLOCK_GROUP_DATA)
3545                 fs_info->avail_data_alloc_bits |= extra_flags;
3546         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3547                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3548         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3549                 fs_info->avail_system_alloc_bits |= extra_flags;
3550         write_sequnlock(&fs_info->profiles_lock);
3551 }
3552
3553 /*
3554  * returns target flags in extended format or 0 if restripe for this
3555  * chunk_type is not in progress
3556  *
3557  * should be called with either volume_mutex or balance_lock held
3558  */
3559 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3560 {
3561         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3562         u64 target = 0;
3563
3564         if (!bctl)
3565                 return 0;
3566
3567         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3568             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3569                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3570         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3571                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3572                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3573         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3574                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3575                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3576         }
3577
3578         return target;
3579 }
3580
3581 /*
3582  * @flags: available profiles in extended format (see ctree.h)
3583  *
3584  * Returns reduced profile in chunk format.  If profile changing is in
3585  * progress (either running or paused) picks the target profile (if it's
3586  * already available), otherwise falls back to plain reducing.
3587  */
3588 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3589 {
3590         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3591         u64 target;
3592         u64 tmp;
3593
3594         /*
3595          * see if restripe for this chunk_type is in progress, if so
3596          * try to reduce to the target profile
3597          */
3598         spin_lock(&root->fs_info->balance_lock);
3599         target = get_restripe_target(root->fs_info, flags);
3600         if (target) {
3601                 /* pick target profile only if it's already available */
3602                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3603                         spin_unlock(&root->fs_info->balance_lock);
3604                         return extended_to_chunk(target);
3605                 }
3606         }
3607         spin_unlock(&root->fs_info->balance_lock);
3608
3609         /* First, mask out the RAID levels which aren't possible */
3610         if (num_devices == 1)
3611                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3612                            BTRFS_BLOCK_GROUP_RAID5);
3613         if (num_devices < 3)
3614                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3615         if (num_devices < 4)
3616                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3617
3618         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3619                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3620                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3621         flags &= ~tmp;
3622
3623         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3624                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3625         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3626                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3627         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3628                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3629         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3630                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3631         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3632                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3633
3634         return extended_to_chunk(flags | tmp);
3635 }
3636
3637 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3638 {
3639         unsigned seq;
3640         u64 flags;
3641
3642         do {
3643                 flags = orig_flags;
3644                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3645
3646                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3647                         flags |= root->fs_info->avail_data_alloc_bits;
3648                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3649                         flags |= root->fs_info->avail_system_alloc_bits;
3650                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3651                         flags |= root->fs_info->avail_metadata_alloc_bits;
3652         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3653
3654         return btrfs_reduce_alloc_profile(root, flags);
3655 }
3656
3657 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3658 {
3659         u64 flags;
3660         u64 ret;
3661
3662         if (data)
3663                 flags = BTRFS_BLOCK_GROUP_DATA;
3664         else if (root == root->fs_info->chunk_root)
3665                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3666         else
3667                 flags = BTRFS_BLOCK_GROUP_METADATA;
3668
3669         ret = get_alloc_profile(root, flags);
3670         return ret;
3671 }
3672
3673 /*
3674  * This will check the space that the inode allocates from to make sure we have
3675  * enough space for bytes.
3676  */
3677 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3678 {
3679         struct btrfs_space_info *data_sinfo;
3680         struct btrfs_root *root = BTRFS_I(inode)->root;
3681         struct btrfs_fs_info *fs_info = root->fs_info;
3682         u64 used;
3683         int ret = 0, committed = 0, alloc_chunk = 1;
3684
3685         /* make sure bytes are sectorsize aligned */
3686         bytes = ALIGN(bytes, root->sectorsize);
3687
3688         if (btrfs_is_free_space_inode(inode)) {
3689                 committed = 1;
3690                 ASSERT(current->journal_info);
3691         }
3692
3693         data_sinfo = fs_info->data_sinfo;
3694         if (!data_sinfo)
3695                 goto alloc;
3696
3697 again:
3698         /* make sure we have enough space to handle the data first */
3699         spin_lock(&data_sinfo->lock);
3700         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3701                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3702                 data_sinfo->bytes_may_use;
3703
3704         if (used + bytes > data_sinfo->total_bytes) {
3705                 struct btrfs_trans_handle *trans;
3706
3707                 /*
3708                  * if we don't have enough free bytes in this space then we need
3709                  * to alloc a new chunk.
3710                  */
3711                 if (!data_sinfo->full && alloc_chunk) {
3712                         u64 alloc_target;
3713
3714                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3715                         spin_unlock(&data_sinfo->lock);
3716 alloc:
3717                         alloc_target = btrfs_get_alloc_profile(root, 1);
3718                         /*
3719                          * It is ugly that we don't call nolock join
3720                          * transaction for the free space inode case here.
3721                          * But it is safe because we only do the data space
3722                          * reservation for the free space cache in the
3723                          * transaction context, the common join transaction
3724                          * just increase the counter of the current transaction
3725                          * handler, doesn't try to acquire the trans_lock of
3726                          * the fs.
3727                          */
3728                         trans = btrfs_join_transaction(root);
3729                         if (IS_ERR(trans))
3730                                 return PTR_ERR(trans);
3731
3732                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3733                                              alloc_target,
3734                                              CHUNK_ALLOC_NO_FORCE);
3735                         btrfs_end_transaction(trans, root);
3736                         if (ret < 0) {
3737                                 if (ret != -ENOSPC)
3738                                         return ret;
3739                                 else
3740                                         goto commit_trans;
3741                         }
3742
3743                         if (!data_sinfo)
3744                                 data_sinfo = fs_info->data_sinfo;
3745
3746                         goto again;
3747                 }
3748
3749                 /*
3750                  * If we don't have enough pinned space to deal with this
3751                  * allocation don't bother committing the transaction.
3752                  */
3753                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3754                                            bytes) < 0)
3755                         committed = 1;
3756                 spin_unlock(&data_sinfo->lock);
3757
3758                 /* commit the current transaction and try again */
3759 commit_trans:
3760                 if (!committed &&
3761                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3762                         committed = 1;
3763
3764                         trans = btrfs_join_transaction(root);
3765                         if (IS_ERR(trans))
3766                                 return PTR_ERR(trans);
3767                         ret = btrfs_commit_transaction(trans, root);
3768                         if (ret)
3769                                 return ret;
3770                         goto again;
3771                 }
3772
3773                 trace_btrfs_space_reservation(root->fs_info,
3774                                               "space_info:enospc",
3775                                               data_sinfo->flags, bytes, 1);
3776                 return -ENOSPC;
3777         }
3778         data_sinfo->bytes_may_use += bytes;
3779         trace_btrfs_space_reservation(root->fs_info, "space_info",
3780                                       data_sinfo->flags, bytes, 1);
3781         spin_unlock(&data_sinfo->lock);
3782
3783         return 0;
3784 }
3785
3786 /*
3787  * Called if we need to clear a data reservation for this inode.
3788  */
3789 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3790 {
3791         struct btrfs_root *root = BTRFS_I(inode)->root;
3792         struct btrfs_space_info *data_sinfo;
3793
3794         /* make sure bytes are sectorsize aligned */
3795         bytes = ALIGN(bytes, root->sectorsize);
3796
3797         data_sinfo = root->fs_info->data_sinfo;
3798         spin_lock(&data_sinfo->lock);
3799         WARN_ON(data_sinfo->bytes_may_use < bytes);
3800         data_sinfo->bytes_may_use -= bytes;
3801         trace_btrfs_space_reservation(root->fs_info, "space_info",
3802                                       data_sinfo->flags, bytes, 0);
3803         spin_unlock(&data_sinfo->lock);
3804 }
3805
3806 static void force_metadata_allocation(struct btrfs_fs_info *info)
3807 {
3808         struct list_head *head = &info->space_info;
3809         struct btrfs_space_info *found;
3810
3811         rcu_read_lock();
3812         list_for_each_entry_rcu(found, head, list) {
3813                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3814                         found->force_alloc = CHUNK_ALLOC_FORCE;
3815         }
3816         rcu_read_unlock();
3817 }
3818
3819 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3820 {
3821         return (global->size << 1);
3822 }
3823
3824 static int should_alloc_chunk(struct btrfs_root *root,
3825                               struct btrfs_space_info *sinfo, int force)
3826 {
3827         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3828         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3829         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3830         u64 thresh;
3831
3832         if (force == CHUNK_ALLOC_FORCE)
3833                 return 1;
3834
3835         /*
3836          * We need to take into account the global rsv because for all intents
3837          * and purposes it's used space.  Don't worry about locking the
3838          * global_rsv, it doesn't change except when the transaction commits.
3839          */
3840         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3841                 num_allocated += calc_global_rsv_need_space(global_rsv);
3842
3843         /*
3844          * in limited mode, we want to have some free space up to
3845          * about 1% of the FS size.
3846          */
3847         if (force == CHUNK_ALLOC_LIMITED) {
3848                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3849                 thresh = max_t(u64, 64 * 1024 * 1024,
3850                                div_factor_fine(thresh, 1));
3851
3852                 if (num_bytes - num_allocated < thresh)
3853                         return 1;
3854         }
3855
3856         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3857                 return 0;
3858         return 1;
3859 }
3860
3861 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3862 {
3863         u64 num_dev;
3864
3865         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3866                     BTRFS_BLOCK_GROUP_RAID0 |
3867                     BTRFS_BLOCK_GROUP_RAID5 |
3868                     BTRFS_BLOCK_GROUP_RAID6))
3869                 num_dev = root->fs_info->fs_devices->rw_devices;
3870         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3871                 num_dev = 2;
3872         else
3873                 num_dev = 1;    /* DUP or single */
3874
3875         /* metadata for updaing devices and chunk tree */
3876         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3877 }
3878
3879 static void check_system_chunk(struct btrfs_trans_handle *trans,
3880                                struct btrfs_root *root, u64 type)
3881 {
3882         struct btrfs_space_info *info;
3883         u64 left;
3884         u64 thresh;
3885
3886         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3887         spin_lock(&info->lock);
3888         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3889                 info->bytes_reserved - info->bytes_readonly;
3890         spin_unlock(&info->lock);
3891
3892         thresh = get_system_chunk_thresh(root, type);
3893         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3894                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3895                         left, thresh, type);
3896                 dump_space_info(info, 0, 0);
3897         }
3898
3899         if (left < thresh) {
3900                 u64 flags;
3901
3902                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3903                 btrfs_alloc_chunk(trans, root, flags);
3904         }
3905 }
3906
3907 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3908                           struct btrfs_root *extent_root, u64 flags, int force)
3909 {
3910         struct btrfs_space_info *space_info;
3911         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3912         int wait_for_alloc = 0;
3913         int ret = 0;
3914
3915         /* Don't re-enter if we're already allocating a chunk */
3916         if (trans->allocating_chunk)
3917                 return -ENOSPC;
3918
3919         space_info = __find_space_info(extent_root->fs_info, flags);
3920         if (!space_info) {
3921                 ret = update_space_info(extent_root->fs_info, flags,
3922                                         0, 0, &space_info);
3923                 BUG_ON(ret); /* -ENOMEM */
3924         }
3925         BUG_ON(!space_info); /* Logic error */
3926
3927 again:
3928         spin_lock(&space_info->lock);
3929         if (force < space_info->force_alloc)
3930                 force = space_info->force_alloc;
3931         if (space_info->full) {
3932                 if (should_alloc_chunk(extent_root, space_info, force))
3933                         ret = -ENOSPC;
3934                 else
3935                         ret = 0;
3936                 spin_unlock(&space_info->lock);
3937                 return ret;
3938         }
3939
3940         if (!should_alloc_chunk(extent_root, space_info, force)) {
3941                 spin_unlock(&space_info->lock);
3942                 return 0;
3943         } else if (space_info->chunk_alloc) {
3944                 wait_for_alloc = 1;
3945         } else {
3946                 space_info->chunk_alloc = 1;
3947         }
3948
3949         spin_unlock(&space_info->lock);
3950
3951         mutex_lock(&fs_info->chunk_mutex);
3952
3953         /*
3954          * The chunk_mutex is held throughout the entirety of a chunk
3955          * allocation, so once we've acquired the chunk_mutex we know that the
3956          * other guy is done and we need to recheck and see if we should
3957          * allocate.
3958          */
3959         if (wait_for_alloc) {
3960                 mutex_unlock(&fs_info->chunk_mutex);
3961                 wait_for_alloc = 0;
3962                 goto again;
3963         }
3964
3965         trans->allocating_chunk = true;
3966
3967         /*
3968          * If we have mixed data/metadata chunks we want to make sure we keep
3969          * allocating mixed chunks instead of individual chunks.
3970          */
3971         if (btrfs_mixed_space_info(space_info))
3972                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3973
3974         /*
3975          * if we're doing a data chunk, go ahead and make sure that
3976          * we keep a reasonable number of metadata chunks allocated in the
3977          * FS as well.
3978          */
3979         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3980                 fs_info->data_chunk_allocations++;
3981                 if (!(fs_info->data_chunk_allocations %
3982                       fs_info->metadata_ratio))
3983                         force_metadata_allocation(fs_info);
3984         }
3985
3986         /*
3987          * Check if we have enough space in SYSTEM chunk because we may need
3988          * to update devices.
3989          */
3990         check_system_chunk(trans, extent_root, flags);
3991
3992         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3993         trans->allocating_chunk = false;
3994
3995         spin_lock(&space_info->lock);
3996         if (ret < 0 && ret != -ENOSPC)
3997                 goto out;
3998         if (ret)
3999                 space_info->full = 1;
4000         else
4001                 ret = 1;
4002
4003         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4004 out:
4005         space_info->chunk_alloc = 0;
4006         spin_unlock(&space_info->lock);
4007         mutex_unlock(&fs_info->chunk_mutex);
4008         return ret;
4009 }
4010
4011 static int can_overcommit(struct btrfs_root *root,
4012                           struct btrfs_space_info *space_info, u64 bytes,
4013                           enum btrfs_reserve_flush_enum flush)
4014 {
4015         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4016         u64 profile = btrfs_get_alloc_profile(root, 0);
4017         u64 space_size;
4018         u64 avail;
4019         u64 used;
4020
4021         used = space_info->bytes_used + space_info->bytes_reserved +
4022                 space_info->bytes_pinned + space_info->bytes_readonly;
4023
4024         /*
4025          * We only want to allow over committing if we have lots of actual space
4026          * free, but if we don't have enough space to handle the global reserve
4027          * space then we could end up having a real enospc problem when trying
4028          * to allocate a chunk or some other such important allocation.
4029          */
4030         spin_lock(&global_rsv->lock);
4031         space_size = calc_global_rsv_need_space(global_rsv);
4032         spin_unlock(&global_rsv->lock);
4033         if (used + space_size >= space_info->total_bytes)
4034                 return 0;
4035
4036         used += space_info->bytes_may_use;
4037
4038         spin_lock(&root->fs_info->free_chunk_lock);
4039         avail = root->fs_info->free_chunk_space;
4040         spin_unlock(&root->fs_info->free_chunk_lock);
4041
4042         /*
4043          * If we have dup, raid1 or raid10 then only half of the free
4044          * space is actually useable.  For raid56, the space info used
4045          * doesn't include the parity drive, so we don't have to
4046          * change the math
4047          */
4048         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4049                        BTRFS_BLOCK_GROUP_RAID1 |
4050                        BTRFS_BLOCK_GROUP_RAID10))
4051                 avail >>= 1;
4052
4053         /*
4054          * If we aren't flushing all things, let us overcommit up to
4055          * 1/2th of the space. If we can flush, don't let us overcommit
4056          * too much, let it overcommit up to 1/8 of the space.
4057          */
4058         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4059                 avail >>= 3;
4060         else
4061                 avail >>= 1;
4062
4063         if (used + bytes < space_info->total_bytes + avail)
4064                 return 1;
4065         return 0;
4066 }
4067
4068 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4069                                          unsigned long nr_pages, int nr_items)
4070 {
4071         struct super_block *sb = root->fs_info->sb;
4072
4073         if (down_read_trylock(&sb->s_umount)) {
4074                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4075                 up_read(&sb->s_umount);
4076         } else {
4077                 /*
4078                  * We needn't worry the filesystem going from r/w to r/o though
4079                  * we don't acquire ->s_umount mutex, because the filesystem
4080                  * should guarantee the delalloc inodes list be empty after
4081                  * the filesystem is readonly(all dirty pages are written to
4082                  * the disk).
4083                  */
4084                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4085                 if (!current->journal_info)
4086                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4087         }
4088 }
4089
4090 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4091 {
4092         u64 bytes;
4093         int nr;
4094
4095         bytes = btrfs_calc_trans_metadata_size(root, 1);
4096         nr = (int)div64_u64(to_reclaim, bytes);
4097         if (!nr)
4098                 nr = 1;
4099         return nr;
4100 }
4101
4102 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4103
4104 /*
4105  * shrink metadata reservation for delalloc
4106  */
4107 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4108                             bool wait_ordered)
4109 {
4110         struct btrfs_block_rsv *block_rsv;
4111         struct btrfs_space_info *space_info;
4112         struct btrfs_trans_handle *trans;
4113         u64 delalloc_bytes;
4114         u64 max_reclaim;
4115         long time_left;
4116         unsigned long nr_pages;
4117         int loops;
4118         int items;
4119         enum btrfs_reserve_flush_enum flush;
4120
4121         /* Calc the number of the pages we need flush for space reservation */
4122         items = calc_reclaim_items_nr(root, to_reclaim);
4123         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4124
4125         trans = (struct btrfs_trans_handle *)current->journal_info;
4126         block_rsv = &root->fs_info->delalloc_block_rsv;
4127         space_info = block_rsv->space_info;
4128
4129         delalloc_bytes = percpu_counter_sum_positive(
4130                                                 &root->fs_info->delalloc_bytes);
4131         if (delalloc_bytes == 0) {
4132                 if (trans)
4133                         return;
4134                 if (wait_ordered)
4135                         btrfs_wait_ordered_roots(root->fs_info, items);
4136                 return;
4137         }
4138
4139         loops = 0;
4140         while (delalloc_bytes && loops < 3) {
4141                 max_reclaim = min(delalloc_bytes, to_reclaim);
4142                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4143                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4144                 /*
4145                  * We need to wait for the async pages to actually start before
4146                  * we do anything.
4147                  */
4148                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4149                 if (!max_reclaim)
4150                         goto skip_async;
4151
4152                 if (max_reclaim <= nr_pages)
4153                         max_reclaim = 0;
4154                 else
4155                         max_reclaim -= nr_pages;
4156
4157                 wait_event(root->fs_info->async_submit_wait,
4158                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4159                            (int)max_reclaim);
4160 skip_async:
4161                 if (!trans)
4162                         flush = BTRFS_RESERVE_FLUSH_ALL;
4163                 else
4164                         flush = BTRFS_RESERVE_NO_FLUSH;
4165                 spin_lock(&space_info->lock);
4166                 if (can_overcommit(root, space_info, orig, flush)) {
4167                         spin_unlock(&space_info->lock);
4168                         break;
4169                 }
4170                 spin_unlock(&space_info->lock);
4171
4172                 loops++;
4173                 if (wait_ordered && !trans) {
4174                         btrfs_wait_ordered_roots(root->fs_info, items);
4175                 } else {
4176                         time_left = schedule_timeout_killable(1);
4177                         if (time_left)
4178                                 break;
4179                 }
4180                 delalloc_bytes = percpu_counter_sum_positive(
4181                                                 &root->fs_info->delalloc_bytes);
4182         }
4183 }
4184
4185 /**
4186  * maybe_commit_transaction - possibly commit the transaction if its ok to
4187  * @root - the root we're allocating for
4188  * @bytes - the number of bytes we want to reserve
4189  * @force - force the commit
4190  *
4191  * This will check to make sure that committing the transaction will actually
4192  * get us somewhere and then commit the transaction if it does.  Otherwise it
4193  * will return -ENOSPC.
4194  */
4195 static int may_commit_transaction(struct btrfs_root *root,
4196                                   struct btrfs_space_info *space_info,
4197                                   u64 bytes, int force)
4198 {
4199         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4200         struct btrfs_trans_handle *trans;
4201
4202         trans = (struct btrfs_trans_handle *)current->journal_info;
4203         if (trans)
4204                 return -EAGAIN;
4205
4206         if (force)
4207                 goto commit;
4208
4209         /* See if there is enough pinned space to make this reservation */
4210         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4211                                    bytes) >= 0)
4212                 goto commit;
4213
4214         /*
4215          * See if there is some space in the delayed insertion reservation for
4216          * this reservation.
4217          */
4218         if (space_info != delayed_rsv->space_info)
4219                 return -ENOSPC;
4220
4221         spin_lock(&delayed_rsv->lock);
4222         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4223                                    bytes - delayed_rsv->size) >= 0) {
4224                 spin_unlock(&delayed_rsv->lock);
4225                 return -ENOSPC;
4226         }
4227         spin_unlock(&delayed_rsv->lock);
4228
4229 commit:
4230         trans = btrfs_join_transaction(root);
4231         if (IS_ERR(trans))
4232                 return -ENOSPC;
4233
4234         return btrfs_commit_transaction(trans, root);
4235 }
4236
4237 enum flush_state {
4238         FLUSH_DELAYED_ITEMS_NR  =       1,
4239         FLUSH_DELAYED_ITEMS     =       2,
4240         FLUSH_DELALLOC          =       3,
4241         FLUSH_DELALLOC_WAIT     =       4,
4242         ALLOC_CHUNK             =       5,
4243         COMMIT_TRANS            =       6,
4244 };
4245
4246 static int flush_space(struct btrfs_root *root,
4247                        struct btrfs_space_info *space_info, u64 num_bytes,
4248                        u64 orig_bytes, int state)
4249 {
4250         struct btrfs_trans_handle *trans;
4251         int nr;
4252         int ret = 0;
4253
4254         switch (state) {
4255         case FLUSH_DELAYED_ITEMS_NR:
4256         case FLUSH_DELAYED_ITEMS:
4257                 if (state == FLUSH_DELAYED_ITEMS_NR)
4258                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4259                 else
4260                         nr = -1;
4261
4262                 trans = btrfs_join_transaction(root);
4263                 if (IS_ERR(trans)) {
4264                         ret = PTR_ERR(trans);
4265                         break;
4266                 }
4267                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4268                 btrfs_end_transaction(trans, root);
4269                 break;
4270         case FLUSH_DELALLOC:
4271         case FLUSH_DELALLOC_WAIT:
4272                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4273                                 state == FLUSH_DELALLOC_WAIT);
4274                 break;
4275         case ALLOC_CHUNK:
4276                 trans = btrfs_join_transaction(root);
4277                 if (IS_ERR(trans)) {
4278                         ret = PTR_ERR(trans);
4279                         break;
4280                 }
4281                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4282                                      btrfs_get_alloc_profile(root, 0),
4283                                      CHUNK_ALLOC_NO_FORCE);
4284                 btrfs_end_transaction(trans, root);
4285                 if (ret == -ENOSPC)
4286                         ret = 0;
4287                 break;
4288         case COMMIT_TRANS:
4289                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4290                 break;
4291         default:
4292                 ret = -ENOSPC;
4293                 break;
4294         }
4295
4296         return ret;
4297 }
4298
4299 static inline u64
4300 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4301                                  struct btrfs_space_info *space_info)
4302 {
4303         u64 used;
4304         u64 expected;
4305         u64 to_reclaim;
4306
4307         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4308                                 16 * 1024 * 1024);
4309         spin_lock(&space_info->lock);
4310         if (can_overcommit(root, space_info, to_reclaim,
4311                            BTRFS_RESERVE_FLUSH_ALL)) {
4312                 to_reclaim = 0;
4313                 goto out;
4314         }
4315
4316         used = space_info->bytes_used + space_info->bytes_reserved +
4317                space_info->bytes_pinned + space_info->bytes_readonly +
4318                space_info->bytes_may_use;
4319         if (can_overcommit(root, space_info, 1024 * 1024,
4320                            BTRFS_RESERVE_FLUSH_ALL))
4321                 expected = div_factor_fine(space_info->total_bytes, 95);
4322         else
4323                 expected = div_factor_fine(space_info->total_bytes, 90);
4324
4325         if (used > expected)
4326                 to_reclaim = used - expected;
4327         else
4328                 to_reclaim = 0;
4329         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4330                                      space_info->bytes_reserved);
4331 out:
4332         spin_unlock(&space_info->lock);
4333
4334         return to_reclaim;
4335 }
4336
4337 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4338                                         struct btrfs_fs_info *fs_info, u64 used)
4339 {
4340         return (used >= div_factor_fine(space_info->total_bytes, 98) &&
4341                 !btrfs_fs_closing(fs_info) &&
4342                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4343 }
4344
4345 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4346                                        struct btrfs_fs_info *fs_info,
4347                                        int flush_state)
4348 {
4349         u64 used;
4350
4351         spin_lock(&space_info->lock);
4352         /*
4353          * We run out of space and have not got any free space via flush_space,
4354          * so don't bother doing async reclaim.
4355          */
4356         if (flush_state > COMMIT_TRANS && space_info->full) {
4357                 spin_unlock(&space_info->lock);
4358                 return 0;
4359         }
4360
4361         used = space_info->bytes_used + space_info->bytes_reserved +
4362                space_info->bytes_pinned + space_info->bytes_readonly +
4363                space_info->bytes_may_use;
4364         if (need_do_async_reclaim(space_info, fs_info, used)) {
4365                 spin_unlock(&space_info->lock);
4366                 return 1;
4367         }
4368         spin_unlock(&space_info->lock);
4369
4370         return 0;
4371 }
4372
4373 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4374 {
4375         struct btrfs_fs_info *fs_info;
4376         struct btrfs_space_info *space_info;
4377         u64 to_reclaim;
4378         int flush_state;
4379
4380         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4381         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4382
4383         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4384                                                       space_info);
4385         if (!to_reclaim)
4386                 return;
4387
4388         flush_state = FLUSH_DELAYED_ITEMS_NR;
4389         do {
4390                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4391                             to_reclaim, flush_state);
4392                 flush_state++;
4393                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4394                                                  flush_state))
4395                         return;
4396         } while (flush_state <= COMMIT_TRANS);
4397
4398         if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
4399                 queue_work(system_unbound_wq, work);
4400 }
4401
4402 void btrfs_init_async_reclaim_work(struct work_struct *work)
4403 {
4404         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4405 }
4406
4407 /**
4408  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4409  * @root - the root we're allocating for
4410  * @block_rsv - the block_rsv we're allocating for
4411  * @orig_bytes - the number of bytes we want
4412  * @flush - whether or not we can flush to make our reservation
4413  *
4414  * This will reserve orgi_bytes number of bytes from the space info associated
4415  * with the block_rsv.  If there is not enough space it will make an attempt to
4416  * flush out space to make room.  It will do this by flushing delalloc if
4417  * possible or committing the transaction.  If flush is 0 then no attempts to
4418  * regain reservations will be made and this will fail if there is not enough
4419  * space already.
4420  */
4421 static int reserve_metadata_bytes(struct btrfs_root *root,
4422                                   struct btrfs_block_rsv *block_rsv,
4423                                   u64 orig_bytes,
4424                                   enum btrfs_reserve_flush_enum flush)
4425 {
4426         struct btrfs_space_info *space_info = block_rsv->space_info;
4427         u64 used;
4428         u64 num_bytes = orig_bytes;
4429         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4430         int ret = 0;
4431         bool flushing = false;
4432
4433 again:
4434         ret = 0;
4435         spin_lock(&space_info->lock);
4436         /*
4437          * We only want to wait if somebody other than us is flushing and we
4438          * are actually allowed to flush all things.
4439          */
4440         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4441                space_info->flush) {
4442                 spin_unlock(&space_info->lock);
4443                 /*
4444                  * If we have a trans handle we can't wait because the flusher
4445                  * may have to commit the transaction, which would mean we would
4446                  * deadlock since we are waiting for the flusher to finish, but
4447                  * hold the current transaction open.
4448                  */
4449                 if (current->journal_info)
4450                         return -EAGAIN;
4451                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4452                 /* Must have been killed, return */
4453                 if (ret)
4454                         return -EINTR;
4455
4456                 spin_lock(&space_info->lock);
4457         }
4458
4459         ret = -ENOSPC;
4460         used = space_info->bytes_used + space_info->bytes_reserved +
4461                 space_info->bytes_pinned + space_info->bytes_readonly +
4462                 space_info->bytes_may_use;
4463
4464         /*
4465          * The idea here is that we've not already over-reserved the block group
4466          * then we can go ahead and save our reservation first and then start
4467          * flushing if we need to.  Otherwise if we've already overcommitted
4468          * lets start flushing stuff first and then come back and try to make
4469          * our reservation.
4470          */
4471         if (used <= space_info->total_bytes) {
4472                 if (used + orig_bytes <= space_info->total_bytes) {
4473                         space_info->bytes_may_use += orig_bytes;
4474                         trace_btrfs_space_reservation(root->fs_info,
4475                                 "space_info", space_info->flags, orig_bytes, 1);
4476                         ret = 0;
4477                 } else {
4478                         /*
4479                          * Ok set num_bytes to orig_bytes since we aren't
4480                          * overocmmitted, this way we only try and reclaim what
4481                          * we need.
4482                          */
4483                         num_bytes = orig_bytes;
4484                 }
4485         } else {
4486                 /*
4487                  * Ok we're over committed, set num_bytes to the overcommitted
4488                  * amount plus the amount of bytes that we need for this
4489                  * reservation.
4490                  */
4491                 num_bytes = used - space_info->total_bytes +
4492                         (orig_bytes * 2);
4493         }
4494
4495         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4496                 space_info->bytes_may_use += orig_bytes;
4497                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4498                                               space_info->flags, orig_bytes,
4499                                               1);
4500                 ret = 0;
4501         }
4502
4503         /*
4504          * Couldn't make our reservation, save our place so while we're trying
4505          * to reclaim space we can actually use it instead of somebody else
4506          * stealing it from us.
4507          *
4508          * We make the other tasks wait for the flush only when we can flush
4509          * all things.
4510          */
4511         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4512                 flushing = true;
4513                 space_info->flush = 1;
4514         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4515                 used += orig_bytes;
4516                 /*
4517                  * We will do the space reservation dance during log replay,
4518                  * which means we won't have fs_info->fs_root set, so don't do
4519                  * the async reclaim as we will panic.
4520                  */
4521                 if (!root->fs_info->log_root_recovering &&
4522                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4523                     !work_busy(&root->fs_info->async_reclaim_work))
4524                         queue_work(system_unbound_wq,
4525                                    &root->fs_info->async_reclaim_work);
4526         }
4527         spin_unlock(&space_info->lock);
4528
4529         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4530                 goto out;
4531
4532         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4533                           flush_state);
4534         flush_state++;
4535
4536         /*
4537          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4538          * would happen. So skip delalloc flush.
4539          */
4540         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4541             (flush_state == FLUSH_DELALLOC ||
4542              flush_state == FLUSH_DELALLOC_WAIT))
4543                 flush_state = ALLOC_CHUNK;
4544
4545         if (!ret)
4546                 goto again;
4547         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4548                  flush_state < COMMIT_TRANS)
4549                 goto again;
4550         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4551                  flush_state <= COMMIT_TRANS)
4552                 goto again;
4553
4554 out:
4555         if (ret == -ENOSPC &&
4556             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4557                 struct btrfs_block_rsv *global_rsv =
4558                         &root->fs_info->global_block_rsv;
4559
4560                 if (block_rsv != global_rsv &&
4561                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4562                         ret = 0;
4563         }
4564         if (ret == -ENOSPC)
4565                 trace_btrfs_space_reservation(root->fs_info,
4566                                               "space_info:enospc",
4567                                               space_info->flags, orig_bytes, 1);
4568         if (flushing) {
4569                 spin_lock(&space_info->lock);
4570                 space_info->flush = 0;
4571                 wake_up_all(&space_info->wait);
4572                 spin_unlock(&space_info->lock);
4573         }
4574         return ret;
4575 }
4576
4577 static struct btrfs_block_rsv *get_block_rsv(
4578                                         const struct btrfs_trans_handle *trans,
4579                                         const struct btrfs_root *root)
4580 {
4581         struct btrfs_block_rsv *block_rsv = NULL;
4582
4583         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4584                 block_rsv = trans->block_rsv;
4585
4586         if (root == root->fs_info->csum_root && trans->adding_csums)
4587                 block_rsv = trans->block_rsv;
4588
4589         if (root == root->fs_info->uuid_root)
4590                 block_rsv = trans->block_rsv;
4591
4592         if (!block_rsv)
4593                 block_rsv = root->block_rsv;
4594
4595         if (!block_rsv)
4596                 block_rsv = &root->fs_info->empty_block_rsv;
4597
4598         return block_rsv;
4599 }
4600
4601 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4602                                u64 num_bytes)
4603 {
4604         int ret = -ENOSPC;
4605         spin_lock(&block_rsv->lock);
4606         if (block_rsv->reserved >= num_bytes) {
4607                 block_rsv->reserved -= num_bytes;
4608                 if (block_rsv->reserved < block_rsv->size)
4609                         block_rsv->full = 0;
4610                 ret = 0;
4611         }
4612         spin_unlock(&block_rsv->lock);
4613         return ret;
4614 }
4615
4616 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4617                                 u64 num_bytes, int update_size)
4618 {
4619         spin_lock(&block_rsv->lock);
4620         block_rsv->reserved += num_bytes;
4621         if (update_size)
4622                 block_rsv->size += num_bytes;
4623         else if (block_rsv->reserved >= block_rsv->size)
4624                 block_rsv->full = 1;
4625         spin_unlock(&block_rsv->lock);
4626 }
4627
4628 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4629                              struct btrfs_block_rsv *dest, u64 num_bytes,
4630                              int min_factor)
4631 {
4632         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4633         u64 min_bytes;
4634
4635         if (global_rsv->space_info != dest->space_info)
4636                 return -ENOSPC;
4637
4638         spin_lock(&global_rsv->lock);
4639         min_bytes = div_factor(global_rsv->size, min_factor);
4640         if (global_rsv->reserved < min_bytes + num_bytes) {
4641                 spin_unlock(&global_rsv->lock);
4642                 return -ENOSPC;
4643         }
4644         global_rsv->reserved -= num_bytes;
4645         if (global_rsv->reserved < global_rsv->size)
4646                 global_rsv->full = 0;
4647         spin_unlock(&global_rsv->lock);
4648
4649         block_rsv_add_bytes(dest, num_bytes, 1);
4650         return 0;
4651 }
4652
4653 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4654                                     struct btrfs_block_rsv *block_rsv,
4655                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4656 {
4657         struct btrfs_space_info *space_info = block_rsv->space_info;
4658
4659         spin_lock(&block_rsv->lock);
4660         if (num_bytes == (u64)-1)
4661                 num_bytes = block_rsv->size;
4662         block_rsv->size -= num_bytes;
4663         if (block_rsv->reserved >= block_rsv->size) {
4664                 num_bytes = block_rsv->reserved - block_rsv->size;
4665                 block_rsv->reserved = block_rsv->size;
4666                 block_rsv->full = 1;
4667         } else {
4668                 num_bytes = 0;
4669         }
4670         spin_unlock(&block_rsv->lock);
4671
4672         if (num_bytes > 0) {
4673                 if (dest) {
4674                         spin_lock(&dest->lock);
4675                         if (!dest->full) {
4676                                 u64 bytes_to_add;
4677
4678                                 bytes_to_add = dest->size - dest->reserved;
4679                                 bytes_to_add = min(num_bytes, bytes_to_add);
4680                                 dest->reserved += bytes_to_add;
4681                                 if (dest->reserved >= dest->size)
4682                                         dest->full = 1;
4683                                 num_bytes -= bytes_to_add;
4684                         }
4685                         spin_unlock(&dest->lock);
4686                 }
4687                 if (num_bytes) {
4688                         spin_lock(&space_info->lock);
4689                         space_info->bytes_may_use -= num_bytes;
4690                         trace_btrfs_space_reservation(fs_info, "space_info",
4691                                         space_info->flags, num_bytes, 0);
4692                         spin_unlock(&space_info->lock);
4693                 }
4694         }
4695 }
4696
4697 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4698                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4699 {
4700         int ret;
4701
4702         ret = block_rsv_use_bytes(src, num_bytes);
4703         if (ret)
4704                 return ret;
4705
4706         block_rsv_add_bytes(dst, num_bytes, 1);
4707         return 0;
4708 }
4709
4710 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4711 {
4712         memset(rsv, 0, sizeof(*rsv));
4713         spin_lock_init(&rsv->lock);
4714         rsv->type = type;
4715 }
4716
4717 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4718                                               unsigned short type)
4719 {
4720         struct btrfs_block_rsv *block_rsv;
4721         struct btrfs_fs_info *fs_info = root->fs_info;
4722
4723         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4724         if (!block_rsv)
4725                 return NULL;
4726
4727         btrfs_init_block_rsv(block_rsv, type);
4728         block_rsv->space_info = __find_space_info(fs_info,
4729                                                   BTRFS_BLOCK_GROUP_METADATA);
4730         return block_rsv;
4731 }
4732
4733 void btrfs_free_block_rsv(struct btrfs_root *root,
4734                           struct btrfs_block_rsv *rsv)
4735 {
4736         if (!rsv)
4737                 return;
4738         btrfs_block_rsv_release(root, rsv, (u64)-1);
4739         kfree(rsv);
4740 }
4741
4742 int btrfs_block_rsv_add(struct btrfs_root *root,
4743                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4744                         enum btrfs_reserve_flush_enum flush)
4745 {
4746         int ret;
4747
4748         if (num_bytes == 0)
4749                 return 0;
4750
4751         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4752         if (!ret) {
4753                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4754                 return 0;
4755         }
4756
4757         return ret;
4758 }
4759
4760 int btrfs_block_rsv_check(struct btrfs_root *root,
4761                           struct btrfs_block_rsv *block_rsv, int min_factor)
4762 {
4763         u64 num_bytes = 0;
4764         int ret = -ENOSPC;
4765
4766         if (!block_rsv)
4767                 return 0;
4768
4769         spin_lock(&block_rsv->lock);
4770         num_bytes = div_factor(block_rsv->size, min_factor);
4771         if (block_rsv->reserved >= num_bytes)
4772                 ret = 0;
4773         spin_unlock(&block_rsv->lock);
4774
4775         return ret;
4776 }
4777
4778 int btrfs_block_rsv_refill(struct btrfs_root *root,
4779                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4780                            enum btrfs_reserve_flush_enum flush)
4781 {
4782         u64 num_bytes = 0;
4783         int ret = -ENOSPC;
4784
4785         if (!block_rsv)
4786                 return 0;
4787
4788         spin_lock(&block_rsv->lock);
4789         num_bytes = min_reserved;
4790         if (block_rsv->reserved >= num_bytes)
4791                 ret = 0;
4792         else
4793                 num_bytes -= block_rsv->reserved;
4794         spin_unlock(&block_rsv->lock);
4795
4796         if (!ret)
4797                 return 0;
4798
4799         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4800         if (!ret) {
4801                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4802                 return 0;
4803         }
4804
4805         return ret;
4806 }
4807
4808 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4809                             struct btrfs_block_rsv *dst_rsv,
4810                             u64 num_bytes)
4811 {
4812         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4813 }
4814
4815 void btrfs_block_rsv_release(struct btrfs_root *root,
4816                              struct btrfs_block_rsv *block_rsv,
4817                              u64 num_bytes)
4818 {
4819         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4820         if (global_rsv == block_rsv ||
4821             block_rsv->space_info != global_rsv->space_info)
4822                 global_rsv = NULL;
4823         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4824                                 num_bytes);
4825 }
4826
4827 /*
4828  * helper to calculate size of global block reservation.
4829  * the desired value is sum of space used by extent tree,
4830  * checksum tree and root tree
4831  */
4832 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4833 {
4834         struct btrfs_space_info *sinfo;
4835         u64 num_bytes;
4836         u64 meta_used;
4837         u64 data_used;
4838         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4839
4840         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4841         spin_lock(&sinfo->lock);
4842         data_used = sinfo->bytes_used;
4843         spin_unlock(&sinfo->lock);
4844
4845         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4846         spin_lock(&sinfo->lock);
4847         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4848                 data_used = 0;
4849         meta_used = sinfo->bytes_used;
4850         spin_unlock(&sinfo->lock);
4851
4852         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4853                     csum_size * 2;
4854         num_bytes += div64_u64(data_used + meta_used, 50);
4855
4856         if (num_bytes * 3 > meta_used)
4857                 num_bytes = div64_u64(meta_used, 3);
4858
4859         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
4860 }
4861
4862 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4863 {
4864         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4865         struct btrfs_space_info *sinfo = block_rsv->space_info;
4866         u64 num_bytes;
4867
4868         num_bytes = calc_global_metadata_size(fs_info);
4869
4870         spin_lock(&sinfo->lock);
4871         spin_lock(&block_rsv->lock);
4872
4873         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4874
4875         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4876                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4877                     sinfo->bytes_may_use;
4878
4879         if (sinfo->total_bytes > num_bytes) {
4880                 num_bytes = sinfo->total_bytes - num_bytes;
4881                 block_rsv->reserved += num_bytes;
4882                 sinfo->bytes_may_use += num_bytes;
4883                 trace_btrfs_space_reservation(fs_info, "space_info",
4884                                       sinfo->flags, num_bytes, 1);
4885         }
4886
4887         if (block_rsv->reserved >= block_rsv->size) {
4888                 num_bytes = block_rsv->reserved - block_rsv->size;
4889                 sinfo->bytes_may_use -= num_bytes;
4890                 trace_btrfs_space_reservation(fs_info, "space_info",
4891                                       sinfo->flags, num_bytes, 0);
4892                 block_rsv->reserved = block_rsv->size;
4893                 block_rsv->full = 1;
4894         }
4895
4896         spin_unlock(&block_rsv->lock);
4897         spin_unlock(&sinfo->lock);
4898 }
4899
4900 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4901 {
4902         struct btrfs_space_info *space_info;
4903
4904         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4905         fs_info->chunk_block_rsv.space_info = space_info;
4906
4907         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4908         fs_info->global_block_rsv.space_info = space_info;
4909         fs_info->delalloc_block_rsv.space_info = space_info;
4910         fs_info->trans_block_rsv.space_info = space_info;
4911         fs_info->empty_block_rsv.space_info = space_info;
4912         fs_info->delayed_block_rsv.space_info = space_info;
4913
4914         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4915         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4916         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4917         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4918         if (fs_info->quota_root)
4919                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4920         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4921
4922         update_global_block_rsv(fs_info);
4923 }
4924
4925 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4926 {
4927         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4928                                 (u64)-1);
4929         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4930         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4931         WARN_ON(fs_info->trans_block_rsv.size > 0);
4932         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4933         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4934         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4935         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4936         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4937 }
4938
4939 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4940                                   struct btrfs_root *root)
4941 {
4942         if (!trans->block_rsv)
4943                 return;
4944
4945         if (!trans->bytes_reserved)
4946                 return;
4947
4948         trace_btrfs_space_reservation(root->fs_info, "transaction",
4949                                       trans->transid, trans->bytes_reserved, 0);
4950         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4951         trans->bytes_reserved = 0;
4952 }
4953
4954 /* Can only return 0 or -ENOSPC */
4955 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4956                                   struct inode *inode)
4957 {
4958         struct btrfs_root *root = BTRFS_I(inode)->root;
4959         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4960         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4961
4962         /*
4963          * We need to hold space in order to delete our orphan item once we've
4964          * added it, so this takes the reservation so we can release it later
4965          * when we are truly done with the orphan item.
4966          */
4967         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4968         trace_btrfs_space_reservation(root->fs_info, "orphan",
4969                                       btrfs_ino(inode), num_bytes, 1);
4970         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4971 }
4972
4973 void btrfs_orphan_release_metadata(struct inode *inode)
4974 {
4975         struct btrfs_root *root = BTRFS_I(inode)->root;
4976         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4977         trace_btrfs_space_reservation(root->fs_info, "orphan",
4978                                       btrfs_ino(inode), num_bytes, 0);
4979         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4980 }
4981
4982 /*
4983  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4984  * root: the root of the parent directory
4985  * rsv: block reservation
4986  * items: the number of items that we need do reservation
4987  * qgroup_reserved: used to return the reserved size in qgroup
4988  *
4989  * This function is used to reserve the space for snapshot/subvolume
4990  * creation and deletion. Those operations are different with the
4991  * common file/directory operations, they change two fs/file trees
4992  * and root tree, the number of items that the qgroup reserves is
4993  * different with the free space reservation. So we can not use
4994  * the space reseravtion mechanism in start_transaction().
4995  */
4996 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4997                                      struct btrfs_block_rsv *rsv,
4998                                      int items,
4999                                      u64 *qgroup_reserved,
5000                                      bool use_global_rsv)
5001 {
5002         u64 num_bytes;
5003         int ret;
5004         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5005
5006         if (root->fs_info->quota_enabled) {
5007                 /* One for parent inode, two for dir entries */
5008                 num_bytes = 3 * root->nodesize;
5009                 ret = btrfs_qgroup_reserve(root, num_bytes);
5010                 if (ret)
5011                         return ret;
5012         } else {
5013                 num_bytes = 0;
5014         }
5015
5016         *qgroup_reserved = num_bytes;
5017
5018         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5019         rsv->space_info = __find_space_info(root->fs_info,
5020                                             BTRFS_BLOCK_GROUP_METADATA);
5021         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5022                                   BTRFS_RESERVE_FLUSH_ALL);
5023
5024         if (ret == -ENOSPC && use_global_rsv)
5025                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5026
5027         if (ret) {
5028                 if (*qgroup_reserved)
5029                         btrfs_qgroup_free(root, *qgroup_reserved);
5030         }
5031
5032         return ret;
5033 }
5034
5035 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5036                                       struct btrfs_block_rsv *rsv,
5037                                       u64 qgroup_reserved)
5038 {
5039         btrfs_block_rsv_release(root, rsv, (u64)-1);
5040         if (qgroup_reserved)
5041                 btrfs_qgroup_free(root, qgroup_reserved);
5042 }
5043
5044 /**
5045  * drop_outstanding_extent - drop an outstanding extent
5046  * @inode: the inode we're dropping the extent for
5047  *
5048  * This is called when we are freeing up an outstanding extent, either called
5049  * after an error or after an extent is written.  This will return the number of
5050  * reserved extents that need to be freed.  This must be called with
5051  * BTRFS_I(inode)->lock held.
5052  */
5053 static unsigned drop_outstanding_extent(struct inode *inode)
5054 {
5055         unsigned drop_inode_space = 0;
5056         unsigned dropped_extents = 0;
5057
5058         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
5059         BTRFS_I(inode)->outstanding_extents--;
5060
5061         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5062             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5063                                &BTRFS_I(inode)->runtime_flags))
5064                 drop_inode_space = 1;
5065
5066         /*
5067          * If we have more or the same amount of outsanding extents than we have
5068          * reserved then we need to leave the reserved extents count alone.
5069          */
5070         if (BTRFS_I(inode)->outstanding_extents >=
5071             BTRFS_I(inode)->reserved_extents)
5072                 return drop_inode_space;
5073
5074         dropped_extents = BTRFS_I(inode)->reserved_extents -
5075                 BTRFS_I(inode)->outstanding_extents;
5076         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5077         return dropped_extents + drop_inode_space;
5078 }
5079
5080 /**
5081  * calc_csum_metadata_size - return the amount of metada space that must be
5082  *      reserved/free'd for the given bytes.
5083  * @inode: the inode we're manipulating
5084  * @num_bytes: the number of bytes in question
5085  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5086  *
5087  * This adjusts the number of csum_bytes in the inode and then returns the
5088  * correct amount of metadata that must either be reserved or freed.  We
5089  * calculate how many checksums we can fit into one leaf and then divide the
5090  * number of bytes that will need to be checksumed by this value to figure out
5091  * how many checksums will be required.  If we are adding bytes then the number
5092  * may go up and we will return the number of additional bytes that must be
5093  * reserved.  If it is going down we will return the number of bytes that must
5094  * be freed.
5095  *
5096  * This must be called with BTRFS_I(inode)->lock held.
5097  */
5098 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5099                                    int reserve)
5100 {
5101         struct btrfs_root *root = BTRFS_I(inode)->root;
5102         u64 csum_size;
5103         int num_csums_per_leaf;
5104         int num_csums;
5105         int old_csums;
5106
5107         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5108             BTRFS_I(inode)->csum_bytes == 0)
5109                 return 0;
5110
5111         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5112         if (reserve)
5113                 BTRFS_I(inode)->csum_bytes += num_bytes;
5114         else
5115                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5116         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
5117         num_csums_per_leaf = (int)div64_u64(csum_size,
5118                                             sizeof(struct btrfs_csum_item) +
5119                                             sizeof(struct btrfs_disk_key));
5120         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5121         num_csums = num_csums + num_csums_per_leaf - 1;
5122         num_csums = num_csums / num_csums_per_leaf;
5123
5124         old_csums = old_csums + num_csums_per_leaf - 1;
5125         old_csums = old_csums / num_csums_per_leaf;
5126
5127         /* No change, no need to reserve more */
5128         if (old_csums == num_csums)
5129                 return 0;
5130
5131         if (reserve)
5132                 return btrfs_calc_trans_metadata_size(root,
5133                                                       num_csums - old_csums);
5134
5135         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5136 }
5137
5138 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5139 {
5140         struct btrfs_root *root = BTRFS_I(inode)->root;
5141         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5142         u64 to_reserve = 0;
5143         u64 csum_bytes;
5144         unsigned nr_extents = 0;
5145         int extra_reserve = 0;
5146         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5147         int ret = 0;
5148         bool delalloc_lock = true;
5149         u64 to_free = 0;
5150         unsigned dropped;
5151
5152         /* If we are a free space inode we need to not flush since we will be in
5153          * the middle of a transaction commit.  We also don't need the delalloc
5154          * mutex since we won't race with anybody.  We need this mostly to make
5155          * lockdep shut its filthy mouth.
5156          */
5157         if (btrfs_is_free_space_inode(inode)) {
5158                 flush = BTRFS_RESERVE_NO_FLUSH;
5159                 delalloc_lock = false;
5160         }
5161
5162         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5163             btrfs_transaction_in_commit(root->fs_info))
5164                 schedule_timeout(1);
5165
5166         if (delalloc_lock)
5167                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5168
5169         num_bytes = ALIGN(num_bytes, root->sectorsize);
5170
5171         spin_lock(&BTRFS_I(inode)->lock);
5172         BTRFS_I(inode)->outstanding_extents++;
5173
5174         if (BTRFS_I(inode)->outstanding_extents >
5175             BTRFS_I(inode)->reserved_extents)
5176                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5177                         BTRFS_I(inode)->reserved_extents;
5178
5179         /*
5180          * Add an item to reserve for updating the inode when we complete the
5181          * delalloc io.
5182          */
5183         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5184                       &BTRFS_I(inode)->runtime_flags)) {
5185                 nr_extents++;
5186                 extra_reserve = 1;
5187         }
5188
5189         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5190         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5191         csum_bytes = BTRFS_I(inode)->csum_bytes;
5192         spin_unlock(&BTRFS_I(inode)->lock);
5193
5194         if (root->fs_info->quota_enabled) {
5195                 ret = btrfs_qgroup_reserve(root, num_bytes +
5196                                            nr_extents * root->nodesize);
5197                 if (ret)
5198                         goto out_fail;
5199         }
5200
5201         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5202         if (unlikely(ret)) {
5203                 if (root->fs_info->quota_enabled)
5204                         btrfs_qgroup_free(root, num_bytes +
5205                                                 nr_extents * root->nodesize);
5206                 goto out_fail;
5207         }
5208
5209         spin_lock(&BTRFS_I(inode)->lock);
5210         if (extra_reserve) {
5211                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5212                         &BTRFS_I(inode)->runtime_flags);
5213                 nr_extents--;
5214         }
5215         BTRFS_I(inode)->reserved_extents += nr_extents;
5216         spin_unlock(&BTRFS_I(inode)->lock);
5217
5218         if (delalloc_lock)
5219                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5220
5221         if (to_reserve)
5222                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5223                                               btrfs_ino(inode), to_reserve, 1);
5224         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5225
5226         return 0;
5227
5228 out_fail:
5229         spin_lock(&BTRFS_I(inode)->lock);
5230         dropped = drop_outstanding_extent(inode);
5231         /*
5232          * If the inodes csum_bytes is the same as the original
5233          * csum_bytes then we know we haven't raced with any free()ers
5234          * so we can just reduce our inodes csum bytes and carry on.
5235          */
5236         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5237                 calc_csum_metadata_size(inode, num_bytes, 0);
5238         } else {
5239                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5240                 u64 bytes;
5241
5242                 /*
5243                  * This is tricky, but first we need to figure out how much we
5244                  * free'd from any free-ers that occured during this
5245                  * reservation, so we reset ->csum_bytes to the csum_bytes
5246                  * before we dropped our lock, and then call the free for the
5247                  * number of bytes that were freed while we were trying our
5248                  * reservation.
5249                  */
5250                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5251                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5252                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5253
5254
5255                 /*
5256                  * Now we need to see how much we would have freed had we not
5257                  * been making this reservation and our ->csum_bytes were not
5258                  * artificially inflated.
5259                  */
5260                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5261                 bytes = csum_bytes - orig_csum_bytes;
5262                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5263
5264                 /*
5265                  * Now reset ->csum_bytes to what it should be.  If bytes is
5266                  * more than to_free then we would have free'd more space had we
5267                  * not had an artificially high ->csum_bytes, so we need to free
5268                  * the remainder.  If bytes is the same or less then we don't
5269                  * need to do anything, the other free-ers did the correct
5270                  * thing.
5271                  */
5272                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5273                 if (bytes > to_free)
5274                         to_free = bytes - to_free;
5275                 else
5276                         to_free = 0;
5277         }
5278         spin_unlock(&BTRFS_I(inode)->lock);
5279         if (dropped)
5280                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5281
5282         if (to_free) {
5283                 btrfs_block_rsv_release(root, block_rsv, to_free);
5284                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5285                                               btrfs_ino(inode), to_free, 0);
5286         }
5287         if (delalloc_lock)
5288                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5289         return ret;
5290 }
5291
5292 /**
5293  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5294  * @inode: the inode to release the reservation for
5295  * @num_bytes: the number of bytes we're releasing
5296  *
5297  * This will release the metadata reservation for an inode.  This can be called
5298  * once we complete IO for a given set of bytes to release their metadata
5299  * reservations.
5300  */
5301 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5302 {
5303         struct btrfs_root *root = BTRFS_I(inode)->root;
5304         u64 to_free = 0;
5305         unsigned dropped;
5306
5307         num_bytes = ALIGN(num_bytes, root->sectorsize);
5308         spin_lock(&BTRFS_I(inode)->lock);
5309         dropped = drop_outstanding_extent(inode);
5310
5311         if (num_bytes)
5312                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5313         spin_unlock(&BTRFS_I(inode)->lock);
5314         if (dropped > 0)
5315                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5316
5317         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5318                                       btrfs_ino(inode), to_free, 0);
5319         if (root->fs_info->quota_enabled) {
5320                 btrfs_qgroup_free(root, num_bytes +
5321                                         dropped * root->nodesize);
5322         }
5323
5324         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5325                                 to_free);
5326 }
5327
5328 /**
5329  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5330  * @inode: inode we're writing to
5331  * @num_bytes: the number of bytes we want to allocate
5332  *
5333  * This will do the following things
5334  *
5335  * o reserve space in the data space info for num_bytes
5336  * o reserve space in the metadata space info based on number of outstanding
5337  *   extents and how much csums will be needed
5338  * o add to the inodes ->delalloc_bytes
5339  * o add it to the fs_info's delalloc inodes list.
5340  *
5341  * This will return 0 for success and -ENOSPC if there is no space left.
5342  */
5343 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5344 {
5345         int ret;
5346
5347         ret = btrfs_check_data_free_space(inode, num_bytes);
5348         if (ret)
5349                 return ret;
5350
5351         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5352         if (ret) {
5353                 btrfs_free_reserved_data_space(inode, num_bytes);
5354                 return ret;
5355         }
5356
5357         return 0;
5358 }
5359
5360 /**
5361  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5362  * @inode: inode we're releasing space for
5363  * @num_bytes: the number of bytes we want to free up
5364  *
5365  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5366  * called in the case that we don't need the metadata AND data reservations
5367  * anymore.  So if there is an error or we insert an inline extent.
5368  *
5369  * This function will release the metadata space that was not used and will
5370  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5371  * list if there are no delalloc bytes left.
5372  */
5373 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5374 {
5375         btrfs_delalloc_release_metadata(inode, num_bytes);
5376         btrfs_free_reserved_data_space(inode, num_bytes);
5377 }
5378
5379 static int update_block_group(struct btrfs_root *root,
5380                               u64 bytenr, u64 num_bytes, int alloc)
5381 {
5382         struct btrfs_block_group_cache *cache = NULL;
5383         struct btrfs_fs_info *info = root->fs_info;
5384         u64 total = num_bytes;
5385         u64 old_val;
5386         u64 byte_in_group;
5387         int factor;
5388
5389         /* block accounting for super block */
5390         spin_lock(&info->delalloc_root_lock);
5391         old_val = btrfs_super_bytes_used(info->super_copy);
5392         if (alloc)
5393                 old_val += num_bytes;
5394         else
5395                 old_val -= num_bytes;
5396         btrfs_set_super_bytes_used(info->super_copy, old_val);
5397         spin_unlock(&info->delalloc_root_lock);
5398
5399         while (total) {
5400                 cache = btrfs_lookup_block_group(info, bytenr);
5401                 if (!cache)
5402                         return -ENOENT;
5403                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5404                                     BTRFS_BLOCK_GROUP_RAID1 |
5405                                     BTRFS_BLOCK_GROUP_RAID10))
5406                         factor = 2;
5407                 else
5408                         factor = 1;
5409                 /*
5410                  * If this block group has free space cache written out, we
5411                  * need to make sure to load it if we are removing space.  This
5412                  * is because we need the unpinning stage to actually add the
5413                  * space back to the block group, otherwise we will leak space.
5414                  */
5415                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5416                         cache_block_group(cache, 1);
5417
5418                 byte_in_group = bytenr - cache->key.objectid;
5419                 WARN_ON(byte_in_group > cache->key.offset);
5420
5421                 spin_lock(&cache->space_info->lock);
5422                 spin_lock(&cache->lock);
5423
5424                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5425                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5426                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5427
5428                 cache->dirty = 1;
5429                 old_val = btrfs_block_group_used(&cache->item);
5430                 num_bytes = min(total, cache->key.offset - byte_in_group);
5431                 if (alloc) {
5432                         old_val += num_bytes;
5433                         btrfs_set_block_group_used(&cache->item, old_val);
5434                         cache->reserved -= num_bytes;
5435                         cache->space_info->bytes_reserved -= num_bytes;
5436                         cache->space_info->bytes_used += num_bytes;
5437                         cache->space_info->disk_used += num_bytes * factor;
5438                         spin_unlock(&cache->lock);
5439                         spin_unlock(&cache->space_info->lock);
5440                 } else {
5441                         old_val -= num_bytes;
5442
5443                         /*
5444                          * No longer have used bytes in this block group, queue
5445                          * it for deletion.
5446                          */
5447                         if (old_val == 0) {
5448                                 spin_lock(&info->unused_bgs_lock);
5449                                 if (list_empty(&cache->bg_list)) {
5450                                         btrfs_get_block_group(cache);
5451                                         list_add_tail(&cache->bg_list,
5452                                                       &info->unused_bgs);
5453                                 }
5454                                 spin_unlock(&info->unused_bgs_lock);
5455                         }
5456                         btrfs_set_block_group_used(&cache->item, old_val);
5457                         cache->pinned += num_bytes;
5458                         cache->space_info->bytes_pinned += num_bytes;
5459                         cache->space_info->bytes_used -= num_bytes;
5460                         cache->space_info->disk_used -= num_bytes * factor;
5461                         spin_unlock(&cache->lock);
5462                         spin_unlock(&cache->space_info->lock);
5463
5464                         set_extent_dirty(info->pinned_extents,
5465                                          bytenr, bytenr + num_bytes - 1,
5466                                          GFP_NOFS | __GFP_NOFAIL);
5467                 }
5468                 btrfs_put_block_group(cache);
5469                 total -= num_bytes;
5470                 bytenr += num_bytes;
5471         }
5472         return 0;
5473 }
5474
5475 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5476 {
5477         struct btrfs_block_group_cache *cache;
5478         u64 bytenr;
5479
5480         spin_lock(&root->fs_info->block_group_cache_lock);
5481         bytenr = root->fs_info->first_logical_byte;
5482         spin_unlock(&root->fs_info->block_group_cache_lock);
5483
5484         if (bytenr < (u64)-1)
5485                 return bytenr;
5486
5487         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5488         if (!cache)
5489                 return 0;
5490
5491         bytenr = cache->key.objectid;
5492         btrfs_put_block_group(cache);
5493
5494         return bytenr;
5495 }
5496
5497 static int pin_down_extent(struct btrfs_root *root,
5498                            struct btrfs_block_group_cache *cache,
5499                            u64 bytenr, u64 num_bytes, int reserved)
5500 {
5501         spin_lock(&cache->space_info->lock);
5502         spin_lock(&cache->lock);
5503         cache->pinned += num_bytes;
5504         cache->space_info->bytes_pinned += num_bytes;
5505         if (reserved) {
5506                 cache->reserved -= num_bytes;
5507                 cache->space_info->bytes_reserved -= num_bytes;
5508         }
5509         spin_unlock(&cache->lock);
5510         spin_unlock(&cache->space_info->lock);
5511
5512         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5513                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5514         if (reserved)
5515                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5516         return 0;
5517 }
5518
5519 /*
5520  * this function must be called within transaction
5521  */
5522 int btrfs_pin_extent(struct btrfs_root *root,
5523                      u64 bytenr, u64 num_bytes, int reserved)
5524 {
5525         struct btrfs_block_group_cache *cache;
5526
5527         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5528         BUG_ON(!cache); /* Logic error */
5529
5530         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5531
5532         btrfs_put_block_group(cache);
5533         return 0;
5534 }
5535
5536 /*
5537  * this function must be called within transaction
5538  */
5539 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5540                                     u64 bytenr, u64 num_bytes)
5541 {
5542         struct btrfs_block_group_cache *cache;
5543         int ret;
5544
5545         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5546         if (!cache)
5547                 return -EINVAL;
5548
5549         /*
5550          * pull in the free space cache (if any) so that our pin
5551          * removes the free space from the cache.  We have load_only set
5552          * to one because the slow code to read in the free extents does check
5553          * the pinned extents.
5554          */
5555         cache_block_group(cache, 1);
5556
5557         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5558
5559         /* remove us from the free space cache (if we're there at all) */
5560         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5561         btrfs_put_block_group(cache);
5562         return ret;
5563 }
5564
5565 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5566 {
5567         int ret;
5568         struct btrfs_block_group_cache *block_group;
5569         struct btrfs_caching_control *caching_ctl;
5570
5571         block_group = btrfs_lookup_block_group(root->fs_info, start);
5572         if (!block_group)
5573                 return -EINVAL;
5574
5575         cache_block_group(block_group, 0);
5576         caching_ctl = get_caching_control(block_group);
5577
5578         if (!caching_ctl) {
5579                 /* Logic error */
5580                 BUG_ON(!block_group_cache_done(block_group));
5581                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5582         } else {
5583                 mutex_lock(&caching_ctl->mutex);
5584
5585                 if (start >= caching_ctl->progress) {
5586                         ret = add_excluded_extent(root, start, num_bytes);
5587                 } else if (start + num_bytes <= caching_ctl->progress) {
5588                         ret = btrfs_remove_free_space(block_group,
5589                                                       start, num_bytes);
5590                 } else {
5591                         num_bytes = caching_ctl->progress - start;
5592                         ret = btrfs_remove_free_space(block_group,
5593                                                       start, num_bytes);
5594                         if (ret)
5595                                 goto out_lock;
5596
5597                         num_bytes = (start + num_bytes) -
5598                                 caching_ctl->progress;
5599                         start = caching_ctl->progress;
5600                         ret = add_excluded_extent(root, start, num_bytes);
5601                 }
5602 out_lock:
5603                 mutex_unlock(&caching_ctl->mutex);
5604                 put_caching_control(caching_ctl);
5605         }
5606         btrfs_put_block_group(block_group);
5607         return ret;
5608 }
5609
5610 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5611                                  struct extent_buffer *eb)
5612 {
5613         struct btrfs_file_extent_item *item;
5614         struct btrfs_key key;
5615         int found_type;
5616         int i;
5617
5618         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5619                 return 0;
5620
5621         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5622                 btrfs_item_key_to_cpu(eb, &key, i);
5623                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5624                         continue;
5625                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5626                 found_type = btrfs_file_extent_type(eb, item);
5627                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5628                         continue;
5629                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5630                         continue;
5631                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5632                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5633                 __exclude_logged_extent(log, key.objectid, key.offset);
5634         }
5635
5636         return 0;
5637 }
5638
5639 /**
5640  * btrfs_update_reserved_bytes - update the block_group and space info counters
5641  * @cache:      The cache we are manipulating
5642  * @num_bytes:  The number of bytes in question
5643  * @reserve:    One of the reservation enums
5644  * @delalloc:   The blocks are allocated for the delalloc write
5645  *
5646  * This is called by the allocator when it reserves space, or by somebody who is
5647  * freeing space that was never actually used on disk.  For example if you
5648  * reserve some space for a new leaf in transaction A and before transaction A
5649  * commits you free that leaf, you call this with reserve set to 0 in order to
5650  * clear the reservation.
5651  *
5652  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5653  * ENOSPC accounting.  For data we handle the reservation through clearing the
5654  * delalloc bits in the io_tree.  We have to do this since we could end up
5655  * allocating less disk space for the amount of data we have reserved in the
5656  * case of compression.
5657  *
5658  * If this is a reservation and the block group has become read only we cannot
5659  * make the reservation and return -EAGAIN, otherwise this function always
5660  * succeeds.
5661  */
5662 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5663                                        u64 num_bytes, int reserve, int delalloc)
5664 {
5665         struct btrfs_space_info *space_info = cache->space_info;
5666         int ret = 0;
5667
5668         spin_lock(&space_info->lock);
5669         spin_lock(&cache->lock);
5670         if (reserve != RESERVE_FREE) {
5671                 if (cache->ro) {
5672                         ret = -EAGAIN;
5673                 } else {
5674                         cache->reserved += num_bytes;
5675                         space_info->bytes_reserved += num_bytes;
5676                         if (reserve == RESERVE_ALLOC) {
5677                                 trace_btrfs_space_reservation(cache->fs_info,
5678                                                 "space_info", space_info->flags,
5679                                                 num_bytes, 0);
5680                                 space_info->bytes_may_use -= num_bytes;
5681                         }
5682
5683                         if (delalloc)
5684                                 cache->delalloc_bytes += num_bytes;
5685                 }
5686         } else {
5687                 if (cache->ro)
5688                         space_info->bytes_readonly += num_bytes;
5689                 cache->reserved -= num_bytes;
5690                 space_info->bytes_reserved -= num_bytes;
5691
5692                 if (delalloc)
5693                         cache->delalloc_bytes -= num_bytes;
5694         }
5695         spin_unlock(&cache->lock);
5696         spin_unlock(&space_info->lock);
5697         return ret;
5698 }
5699
5700 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5701                                 struct btrfs_root *root)
5702 {
5703         struct btrfs_fs_info *fs_info = root->fs_info;
5704         struct btrfs_caching_control *next;
5705         struct btrfs_caching_control *caching_ctl;
5706         struct btrfs_block_group_cache *cache;
5707
5708         down_write(&fs_info->commit_root_sem);
5709
5710         list_for_each_entry_safe(caching_ctl, next,
5711                                  &fs_info->caching_block_groups, list) {
5712                 cache = caching_ctl->block_group;
5713                 if (block_group_cache_done(cache)) {
5714                         cache->last_byte_to_unpin = (u64)-1;
5715                         list_del_init(&caching_ctl->list);
5716                         put_caching_control(caching_ctl);
5717                 } else {
5718                         cache->last_byte_to_unpin = caching_ctl->progress;
5719                 }
5720         }
5721
5722         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5723                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5724         else
5725                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5726
5727         up_write(&fs_info->commit_root_sem);
5728
5729         update_global_block_rsv(fs_info);
5730 }
5731
5732 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5733 {
5734         struct btrfs_fs_info *fs_info = root->fs_info;
5735         struct btrfs_block_group_cache *cache = NULL;
5736         struct btrfs_space_info *space_info;
5737         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5738         u64 len;
5739         bool readonly;
5740
5741         while (start <= end) {
5742                 readonly = false;
5743                 if (!cache ||
5744                     start >= cache->key.objectid + cache->key.offset) {
5745                         if (cache)
5746                                 btrfs_put_block_group(cache);
5747                         cache = btrfs_lookup_block_group(fs_info, start);
5748                         BUG_ON(!cache); /* Logic error */
5749                 }
5750
5751                 len = cache->key.objectid + cache->key.offset - start;
5752                 len = min(len, end + 1 - start);
5753
5754                 if (start < cache->last_byte_to_unpin) {
5755                         len = min(len, cache->last_byte_to_unpin - start);
5756                         btrfs_add_free_space(cache, start, len);
5757                 }
5758
5759                 start += len;
5760                 space_info = cache->space_info;
5761
5762                 spin_lock(&space_info->lock);
5763                 spin_lock(&cache->lock);
5764                 cache->pinned -= len;
5765                 space_info->bytes_pinned -= len;
5766                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
5767                 if (cache->ro) {
5768                         space_info->bytes_readonly += len;
5769                         readonly = true;
5770                 }
5771                 spin_unlock(&cache->lock);
5772                 if (!readonly && global_rsv->space_info == space_info) {
5773                         spin_lock(&global_rsv->lock);
5774                         if (!global_rsv->full) {
5775                                 len = min(len, global_rsv->size -
5776                                           global_rsv->reserved);
5777                                 global_rsv->reserved += len;
5778                                 space_info->bytes_may_use += len;
5779                                 if (global_rsv->reserved >= global_rsv->size)
5780                                         global_rsv->full = 1;
5781                         }
5782                         spin_unlock(&global_rsv->lock);
5783                 }
5784                 spin_unlock(&space_info->lock);
5785         }
5786
5787         if (cache)
5788                 btrfs_put_block_group(cache);
5789         return 0;
5790 }
5791
5792 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5793                                struct btrfs_root *root)
5794 {
5795         struct btrfs_fs_info *fs_info = root->fs_info;
5796         struct extent_io_tree *unpin;
5797         u64 start;
5798         u64 end;
5799         int ret;
5800
5801         if (trans->aborted)
5802                 return 0;
5803
5804         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5805                 unpin = &fs_info->freed_extents[1];
5806         else
5807                 unpin = &fs_info->freed_extents[0];
5808
5809         while (1) {
5810                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5811                                             EXTENT_DIRTY, NULL);
5812                 if (ret)
5813                         break;
5814
5815                 if (btrfs_test_opt(root, DISCARD))
5816                         ret = btrfs_discard_extent(root, start,
5817                                                    end + 1 - start, NULL);
5818
5819                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5820                 unpin_extent_range(root, start, end);
5821                 cond_resched();
5822         }
5823
5824         return 0;
5825 }
5826
5827 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5828                              u64 owner, u64 root_objectid)
5829 {
5830         struct btrfs_space_info *space_info;
5831         u64 flags;
5832
5833         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5834                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5835                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5836                 else
5837                         flags = BTRFS_BLOCK_GROUP_METADATA;
5838         } else {
5839                 flags = BTRFS_BLOCK_GROUP_DATA;
5840         }
5841
5842         space_info = __find_space_info(fs_info, flags);
5843         BUG_ON(!space_info); /* Logic bug */
5844         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5845 }
5846
5847
5848 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5849                                 struct btrfs_root *root,
5850                                 u64 bytenr, u64 num_bytes, u64 parent,
5851                                 u64 root_objectid, u64 owner_objectid,
5852                                 u64 owner_offset, int refs_to_drop,
5853                                 struct btrfs_delayed_extent_op *extent_op,
5854                                 int no_quota)
5855 {
5856         struct btrfs_key key;
5857         struct btrfs_path *path;
5858         struct btrfs_fs_info *info = root->fs_info;
5859         struct btrfs_root *extent_root = info->extent_root;
5860         struct extent_buffer *leaf;
5861         struct btrfs_extent_item *ei;
5862         struct btrfs_extent_inline_ref *iref;
5863         int ret;
5864         int is_data;
5865         int extent_slot = 0;
5866         int found_extent = 0;
5867         int num_to_del = 1;
5868         u32 item_size;
5869         u64 refs;
5870         int last_ref = 0;
5871         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
5872         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5873                                                  SKINNY_METADATA);
5874
5875         if (!info->quota_enabled || !is_fstree(root_objectid))
5876                 no_quota = 1;
5877
5878         path = btrfs_alloc_path();
5879         if (!path)
5880                 return -ENOMEM;
5881
5882         path->reada = 1;
5883         path->leave_spinning = 1;
5884
5885         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5886         BUG_ON(!is_data && refs_to_drop != 1);
5887
5888         if (is_data)
5889                 skinny_metadata = 0;
5890
5891         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5892                                     bytenr, num_bytes, parent,
5893                                     root_objectid, owner_objectid,
5894                                     owner_offset);
5895         if (ret == 0) {
5896                 extent_slot = path->slots[0];
5897                 while (extent_slot >= 0) {
5898                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5899                                               extent_slot);
5900                         if (key.objectid != bytenr)
5901                                 break;
5902                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5903                             key.offset == num_bytes) {
5904                                 found_extent = 1;
5905                                 break;
5906                         }
5907                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5908                             key.offset == owner_objectid) {
5909                                 found_extent = 1;
5910                                 break;
5911                         }
5912                         if (path->slots[0] - extent_slot > 5)
5913                                 break;
5914                         extent_slot--;
5915                 }
5916 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5917                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5918                 if (found_extent && item_size < sizeof(*ei))
5919                         found_extent = 0;
5920 #endif
5921                 if (!found_extent) {
5922                         BUG_ON(iref);
5923                         ret = remove_extent_backref(trans, extent_root, path,
5924                                                     NULL, refs_to_drop,
5925                                                     is_data, &last_ref);
5926                         if (ret) {
5927                                 btrfs_abort_transaction(trans, extent_root, ret);
5928                                 goto out;
5929                         }
5930                         btrfs_release_path(path);
5931                         path->leave_spinning = 1;
5932
5933                         key.objectid = bytenr;
5934                         key.type = BTRFS_EXTENT_ITEM_KEY;
5935                         key.offset = num_bytes;
5936
5937                         if (!is_data && skinny_metadata) {
5938                                 key.type = BTRFS_METADATA_ITEM_KEY;
5939                                 key.offset = owner_objectid;
5940                         }
5941
5942                         ret = btrfs_search_slot(trans, extent_root,
5943                                                 &key, path, -1, 1);
5944                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5945                                 /*
5946                                  * Couldn't find our skinny metadata item,
5947                                  * see if we have ye olde extent item.
5948                                  */
5949                                 path->slots[0]--;
5950                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5951                                                       path->slots[0]);
5952                                 if (key.objectid == bytenr &&
5953                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5954                                     key.offset == num_bytes)
5955                                         ret = 0;
5956                         }
5957
5958                         if (ret > 0 && skinny_metadata) {
5959                                 skinny_metadata = false;
5960                                 key.objectid = bytenr;
5961                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5962                                 key.offset = num_bytes;
5963                                 btrfs_release_path(path);
5964                                 ret = btrfs_search_slot(trans, extent_root,
5965                                                         &key, path, -1, 1);
5966                         }
5967
5968                         if (ret) {
5969                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5970                                         ret, bytenr);
5971                                 if (ret > 0)
5972                                         btrfs_print_leaf(extent_root,
5973                                                          path->nodes[0]);
5974                         }
5975                         if (ret < 0) {
5976                                 btrfs_abort_transaction(trans, extent_root, ret);
5977                                 goto out;
5978                         }
5979                         extent_slot = path->slots[0];
5980                 }
5981         } else if (WARN_ON(ret == -ENOENT)) {
5982                 btrfs_print_leaf(extent_root, path->nodes[0]);
5983                 btrfs_err(info,
5984                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5985                         bytenr, parent, root_objectid, owner_objectid,
5986                         owner_offset);
5987                 btrfs_abort_transaction(trans, extent_root, ret);
5988                 goto out;
5989         } else {
5990                 btrfs_abort_transaction(trans, extent_root, ret);
5991                 goto out;
5992         }
5993
5994         leaf = path->nodes[0];
5995         item_size = btrfs_item_size_nr(leaf, extent_slot);
5996 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5997         if (item_size < sizeof(*ei)) {
5998                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5999                 ret = convert_extent_item_v0(trans, extent_root, path,
6000                                              owner_objectid, 0);
6001                 if (ret < 0) {
6002                         btrfs_abort_transaction(trans, extent_root, ret);
6003                         goto out;
6004                 }
6005
6006                 btrfs_release_path(path);
6007                 path->leave_spinning = 1;
6008
6009                 key.objectid = bytenr;
6010                 key.type = BTRFS_EXTENT_ITEM_KEY;
6011                 key.offset = num_bytes;
6012
6013                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6014                                         -1, 1);
6015                 if (ret) {
6016                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6017                                 ret, bytenr);
6018                         btrfs_print_leaf(extent_root, path->nodes[0]);
6019                 }
6020                 if (ret < 0) {
6021                         btrfs_abort_transaction(trans, extent_root, ret);
6022                         goto out;
6023                 }
6024
6025                 extent_slot = path->slots[0];
6026                 leaf = path->nodes[0];
6027                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6028         }
6029 #endif
6030         BUG_ON(item_size < sizeof(*ei));
6031         ei = btrfs_item_ptr(leaf, extent_slot,
6032                             struct btrfs_extent_item);
6033         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6034             key.type == BTRFS_EXTENT_ITEM_KEY) {
6035                 struct btrfs_tree_block_info *bi;
6036                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6037                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6038                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6039         }
6040
6041         refs = btrfs_extent_refs(leaf, ei);
6042         if (refs < refs_to_drop) {
6043                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6044                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6045                 ret = -EINVAL;
6046                 btrfs_abort_transaction(trans, extent_root, ret);
6047                 goto out;
6048         }
6049         refs -= refs_to_drop;
6050
6051         if (refs > 0) {
6052                 type = BTRFS_QGROUP_OPER_SUB_SHARED;
6053                 if (extent_op)
6054                         __run_delayed_extent_op(extent_op, leaf, ei);
6055                 /*
6056                  * In the case of inline back ref, reference count will
6057                  * be updated by remove_extent_backref
6058                  */
6059                 if (iref) {
6060                         BUG_ON(!found_extent);
6061                 } else {
6062                         btrfs_set_extent_refs(leaf, ei, refs);
6063                         btrfs_mark_buffer_dirty(leaf);
6064                 }
6065                 if (found_extent) {
6066                         ret = remove_extent_backref(trans, extent_root, path,
6067                                                     iref, refs_to_drop,
6068                                                     is_data, &last_ref);
6069                         if (ret) {
6070                                 btrfs_abort_transaction(trans, extent_root, ret);
6071                                 goto out;
6072                         }
6073                 }
6074                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6075                                  root_objectid);
6076         } else {
6077                 if (found_extent) {
6078                         BUG_ON(is_data && refs_to_drop !=
6079                                extent_data_ref_count(root, path, iref));
6080                         if (iref) {
6081                                 BUG_ON(path->slots[0] != extent_slot);
6082                         } else {
6083                                 BUG_ON(path->slots[0] != extent_slot + 1);
6084                                 path->slots[0] = extent_slot;
6085                                 num_to_del = 2;
6086                         }
6087                 }
6088
6089                 last_ref = 1;
6090                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6091                                       num_to_del);
6092                 if (ret) {
6093                         btrfs_abort_transaction(trans, extent_root, ret);
6094                         goto out;
6095                 }
6096                 btrfs_release_path(path);
6097
6098                 if (is_data) {
6099                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6100                         if (ret) {
6101                                 btrfs_abort_transaction(trans, extent_root, ret);
6102                                 goto out;
6103                         }
6104                 }
6105
6106                 ret = update_block_group(root, bytenr, num_bytes, 0);
6107                 if (ret) {
6108                         btrfs_abort_transaction(trans, extent_root, ret);
6109                         goto out;
6110                 }
6111         }
6112         btrfs_release_path(path);
6113
6114         /* Deal with the quota accounting */
6115         if (!ret && last_ref && !no_quota) {
6116                 int mod_seq = 0;
6117
6118                 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
6119                     type == BTRFS_QGROUP_OPER_SUB_SHARED)
6120                         mod_seq = 1;
6121
6122                 ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
6123                                               bytenr, num_bytes, type,
6124                                               mod_seq);
6125         }
6126 out:
6127         btrfs_free_path(path);
6128         return ret;
6129 }
6130
6131 /*
6132  * when we free an block, it is possible (and likely) that we free the last
6133  * delayed ref for that extent as well.  This searches the delayed ref tree for
6134  * a given extent, and if there are no other delayed refs to be processed, it
6135  * removes it from the tree.
6136  */
6137 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6138                                       struct btrfs_root *root, u64 bytenr)
6139 {
6140         struct btrfs_delayed_ref_head *head;
6141         struct btrfs_delayed_ref_root *delayed_refs;
6142         int ret = 0;
6143
6144         delayed_refs = &trans->transaction->delayed_refs;
6145         spin_lock(&delayed_refs->lock);
6146         head = btrfs_find_delayed_ref_head(trans, bytenr);
6147         if (!head)
6148                 goto out_delayed_unlock;
6149
6150         spin_lock(&head->lock);
6151         if (rb_first(&head->ref_root))
6152                 goto out;
6153
6154         if (head->extent_op) {
6155                 if (!head->must_insert_reserved)
6156                         goto out;
6157                 btrfs_free_delayed_extent_op(head->extent_op);
6158                 head->extent_op = NULL;
6159         }
6160
6161         /*
6162          * waiting for the lock here would deadlock.  If someone else has it
6163          * locked they are already in the process of dropping it anyway
6164          */
6165         if (!mutex_trylock(&head->mutex))
6166                 goto out;
6167
6168         /*
6169          * at this point we have a head with no other entries.  Go
6170          * ahead and process it.
6171          */
6172         head->node.in_tree = 0;
6173         rb_erase(&head->href_node, &delayed_refs->href_root);
6174
6175         atomic_dec(&delayed_refs->num_entries);
6176
6177         /*
6178          * we don't take a ref on the node because we're removing it from the
6179          * tree, so we just steal the ref the tree was holding.
6180          */
6181         delayed_refs->num_heads--;
6182         if (head->processing == 0)
6183                 delayed_refs->num_heads_ready--;
6184         head->processing = 0;
6185         spin_unlock(&head->lock);
6186         spin_unlock(&delayed_refs->lock);
6187
6188         BUG_ON(head->extent_op);
6189         if (head->must_insert_reserved)
6190                 ret = 1;
6191
6192         mutex_unlock(&head->mutex);
6193         btrfs_put_delayed_ref(&head->node);
6194         return ret;
6195 out:
6196         spin_unlock(&head->lock);
6197
6198 out_delayed_unlock:
6199         spin_unlock(&delayed_refs->lock);
6200         return 0;
6201 }
6202
6203 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6204                            struct btrfs_root *root,
6205                            struct extent_buffer *buf,
6206                            u64 parent, int last_ref)
6207 {
6208         struct btrfs_block_group_cache *cache = NULL;
6209         int pin = 1;
6210         int ret;
6211
6212         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6213                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6214                                         buf->start, buf->len,
6215                                         parent, root->root_key.objectid,
6216                                         btrfs_header_level(buf),
6217                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6218                 BUG_ON(ret); /* -ENOMEM */
6219         }
6220
6221         if (!last_ref)
6222                 return;
6223
6224         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6225
6226         if (btrfs_header_generation(buf) == trans->transid) {
6227                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6228                         ret = check_ref_cleanup(trans, root, buf->start);
6229                         if (!ret)
6230                                 goto out;
6231                 }
6232
6233                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6234                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6235                         goto out;
6236                 }
6237
6238                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6239
6240                 btrfs_add_free_space(cache, buf->start, buf->len);
6241                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6242                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6243                 pin = 0;
6244         }
6245 out:
6246         if (pin)
6247                 add_pinned_bytes(root->fs_info, buf->len,
6248                                  btrfs_header_level(buf),
6249                                  root->root_key.objectid);
6250
6251         /*
6252          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6253          * anymore.
6254          */
6255         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6256         btrfs_put_block_group(cache);
6257 }
6258
6259 /* Can return -ENOMEM */
6260 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6261                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6262                       u64 owner, u64 offset, int no_quota)
6263 {
6264         int ret;
6265         struct btrfs_fs_info *fs_info = root->fs_info;
6266
6267         if (btrfs_test_is_dummy_root(root))
6268                 return 0;
6269
6270         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6271
6272         /*
6273          * tree log blocks never actually go into the extent allocation
6274          * tree, just update pinning info and exit early.
6275          */
6276         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6277                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6278                 /* unlocks the pinned mutex */
6279                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6280                 ret = 0;
6281         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6282                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6283                                         num_bytes,
6284                                         parent, root_objectid, (int)owner,
6285                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6286         } else {
6287                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6288                                                 num_bytes,
6289                                                 parent, root_objectid, owner,
6290                                                 offset, BTRFS_DROP_DELAYED_REF,
6291                                                 NULL, no_quota);
6292         }
6293         return ret;
6294 }
6295
6296 /*
6297  * when we wait for progress in the block group caching, its because
6298  * our allocation attempt failed at least once.  So, we must sleep
6299  * and let some progress happen before we try again.
6300  *
6301  * This function will sleep at least once waiting for new free space to
6302  * show up, and then it will check the block group free space numbers
6303  * for our min num_bytes.  Another option is to have it go ahead
6304  * and look in the rbtree for a free extent of a given size, but this
6305  * is a good start.
6306  *
6307  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6308  * any of the information in this block group.
6309  */
6310 static noinline void
6311 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6312                                 u64 num_bytes)
6313 {
6314         struct btrfs_caching_control *caching_ctl;
6315
6316         caching_ctl = get_caching_control(cache);
6317         if (!caching_ctl)
6318                 return;
6319
6320         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6321                    (cache->free_space_ctl->free_space >= num_bytes));
6322
6323         put_caching_control(caching_ctl);
6324 }
6325
6326 static noinline int
6327 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6328 {
6329         struct btrfs_caching_control *caching_ctl;
6330         int ret = 0;
6331
6332         caching_ctl = get_caching_control(cache);
6333         if (!caching_ctl)
6334                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6335
6336         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6337         if (cache->cached == BTRFS_CACHE_ERROR)
6338                 ret = -EIO;
6339         put_caching_control(caching_ctl);
6340         return ret;
6341 }
6342
6343 int __get_raid_index(u64 flags)
6344 {
6345         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6346                 return BTRFS_RAID_RAID10;
6347         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6348                 return BTRFS_RAID_RAID1;
6349         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6350                 return BTRFS_RAID_DUP;
6351         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6352                 return BTRFS_RAID_RAID0;
6353         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6354                 return BTRFS_RAID_RAID5;
6355         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6356                 return BTRFS_RAID_RAID6;
6357
6358         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6359 }
6360
6361 int get_block_group_index(struct btrfs_block_group_cache *cache)
6362 {
6363         return __get_raid_index(cache->flags);
6364 }
6365
6366 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6367         [BTRFS_RAID_RAID10]     = "raid10",
6368         [BTRFS_RAID_RAID1]      = "raid1",
6369         [BTRFS_RAID_DUP]        = "dup",
6370         [BTRFS_RAID_RAID0]      = "raid0",
6371         [BTRFS_RAID_SINGLE]     = "single",
6372         [BTRFS_RAID_RAID5]      = "raid5",
6373         [BTRFS_RAID_RAID6]      = "raid6",
6374 };
6375
6376 static const char *get_raid_name(enum btrfs_raid_types type)
6377 {
6378         if (type >= BTRFS_NR_RAID_TYPES)
6379                 return NULL;
6380
6381         return btrfs_raid_type_names[type];
6382 }
6383
6384 enum btrfs_loop_type {
6385         LOOP_CACHING_NOWAIT = 0,
6386         LOOP_CACHING_WAIT = 1,
6387         LOOP_ALLOC_CHUNK = 2,
6388         LOOP_NO_EMPTY_SIZE = 3,
6389 };
6390
6391 static inline void
6392 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6393                        int delalloc)
6394 {
6395         if (delalloc)
6396                 down_read(&cache->data_rwsem);
6397 }
6398
6399 static inline void
6400 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6401                        int delalloc)
6402 {
6403         btrfs_get_block_group(cache);
6404         if (delalloc)
6405                 down_read(&cache->data_rwsem);
6406 }
6407
6408 static struct btrfs_block_group_cache *
6409 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6410                    struct btrfs_free_cluster *cluster,
6411                    int delalloc)
6412 {
6413         struct btrfs_block_group_cache *used_bg;
6414         bool locked = false;
6415 again:
6416         spin_lock(&cluster->refill_lock);
6417         if (locked) {
6418                 if (used_bg == cluster->block_group)
6419                         return used_bg;
6420
6421                 up_read(&used_bg->data_rwsem);
6422                 btrfs_put_block_group(used_bg);
6423         }
6424
6425         used_bg = cluster->block_group;
6426         if (!used_bg)
6427                 return NULL;
6428
6429         if (used_bg == block_group)
6430                 return used_bg;
6431
6432         btrfs_get_block_group(used_bg);
6433
6434         if (!delalloc)
6435                 return used_bg;
6436
6437         if (down_read_trylock(&used_bg->data_rwsem))
6438                 return used_bg;
6439
6440         spin_unlock(&cluster->refill_lock);
6441         down_read(&used_bg->data_rwsem);
6442         locked = true;
6443         goto again;
6444 }
6445
6446 static inline void
6447 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6448                          int delalloc)
6449 {
6450         if (delalloc)
6451                 up_read(&cache->data_rwsem);
6452         btrfs_put_block_group(cache);
6453 }
6454
6455 /*
6456  * walks the btree of allocated extents and find a hole of a given size.
6457  * The key ins is changed to record the hole:
6458  * ins->objectid == start position
6459  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6460  * ins->offset == the size of the hole.
6461  * Any available blocks before search_start are skipped.
6462  *
6463  * If there is no suitable free space, we will record the max size of
6464  * the free space extent currently.
6465  */
6466 static noinline int find_free_extent(struct btrfs_root *orig_root,
6467                                      u64 num_bytes, u64 empty_size,
6468                                      u64 hint_byte, struct btrfs_key *ins,
6469                                      u64 flags, int delalloc)
6470 {
6471         int ret = 0;
6472         struct btrfs_root *root = orig_root->fs_info->extent_root;
6473         struct btrfs_free_cluster *last_ptr = NULL;
6474         struct btrfs_block_group_cache *block_group = NULL;
6475         u64 search_start = 0;
6476         u64 max_extent_size = 0;
6477         int empty_cluster = 2 * 1024 * 1024;
6478         struct btrfs_space_info *space_info;
6479         int loop = 0;
6480         int index = __get_raid_index(flags);
6481         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6482                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6483         bool failed_cluster_refill = false;
6484         bool failed_alloc = false;
6485         bool use_cluster = true;
6486         bool have_caching_bg = false;
6487
6488         WARN_ON(num_bytes < root->sectorsize);
6489         ins->type = BTRFS_EXTENT_ITEM_KEY;
6490         ins->objectid = 0;
6491         ins->offset = 0;
6492
6493         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6494
6495         space_info = __find_space_info(root->fs_info, flags);
6496         if (!space_info) {
6497                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6498                 return -ENOSPC;
6499         }
6500
6501         /*
6502          * If the space info is for both data and metadata it means we have a
6503          * small filesystem and we can't use the clustering stuff.
6504          */
6505         if (btrfs_mixed_space_info(space_info))
6506                 use_cluster = false;
6507
6508         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6509                 last_ptr = &root->fs_info->meta_alloc_cluster;
6510                 if (!btrfs_test_opt(root, SSD))
6511                         empty_cluster = 64 * 1024;
6512         }
6513
6514         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6515             btrfs_test_opt(root, SSD)) {
6516                 last_ptr = &root->fs_info->data_alloc_cluster;
6517         }
6518
6519         if (last_ptr) {
6520                 spin_lock(&last_ptr->lock);
6521                 if (last_ptr->block_group)
6522                         hint_byte = last_ptr->window_start;
6523                 spin_unlock(&last_ptr->lock);
6524         }
6525
6526         search_start = max(search_start, first_logical_byte(root, 0));
6527         search_start = max(search_start, hint_byte);
6528
6529         if (!last_ptr)
6530                 empty_cluster = 0;
6531
6532         if (search_start == hint_byte) {
6533                 block_group = btrfs_lookup_block_group(root->fs_info,
6534                                                        search_start);
6535                 /*
6536                  * we don't want to use the block group if it doesn't match our
6537                  * allocation bits, or if its not cached.
6538                  *
6539                  * However if we are re-searching with an ideal block group
6540                  * picked out then we don't care that the block group is cached.
6541                  */
6542                 if (block_group && block_group_bits(block_group, flags) &&
6543                     block_group->cached != BTRFS_CACHE_NO) {
6544                         down_read(&space_info->groups_sem);
6545                         if (list_empty(&block_group->list) ||
6546                             block_group->ro) {
6547                                 /*
6548                                  * someone is removing this block group,
6549                                  * we can't jump into the have_block_group
6550                                  * target because our list pointers are not
6551                                  * valid
6552                                  */
6553                                 btrfs_put_block_group(block_group);
6554                                 up_read(&space_info->groups_sem);
6555                         } else {
6556                                 index = get_block_group_index(block_group);
6557                                 btrfs_lock_block_group(block_group, delalloc);
6558                                 goto have_block_group;
6559                         }
6560                 } else if (block_group) {
6561                         btrfs_put_block_group(block_group);
6562                 }
6563         }
6564 search:
6565         have_caching_bg = false;
6566         down_read(&space_info->groups_sem);
6567         list_for_each_entry(block_group, &space_info->block_groups[index],
6568                             list) {
6569                 u64 offset;
6570                 int cached;
6571
6572                 btrfs_grab_block_group(block_group, delalloc);
6573                 search_start = block_group->key.objectid;
6574
6575                 /*
6576                  * this can happen if we end up cycling through all the
6577                  * raid types, but we want to make sure we only allocate
6578                  * for the proper type.
6579                  */
6580                 if (!block_group_bits(block_group, flags)) {
6581                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6582                                 BTRFS_BLOCK_GROUP_RAID1 |
6583                                 BTRFS_BLOCK_GROUP_RAID5 |
6584                                 BTRFS_BLOCK_GROUP_RAID6 |
6585                                 BTRFS_BLOCK_GROUP_RAID10;
6586
6587                         /*
6588                          * if they asked for extra copies and this block group
6589                          * doesn't provide them, bail.  This does allow us to
6590                          * fill raid0 from raid1.
6591                          */
6592                         if ((flags & extra) && !(block_group->flags & extra))
6593                                 goto loop;
6594                 }
6595
6596 have_block_group:
6597                 cached = block_group_cache_done(block_group);
6598                 if (unlikely(!cached)) {
6599                         ret = cache_block_group(block_group, 0);
6600                         BUG_ON(ret < 0);
6601                         ret = 0;
6602                 }
6603
6604                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6605                         goto loop;
6606                 if (unlikely(block_group->ro))
6607                         goto loop;
6608
6609                 /*
6610                  * Ok we want to try and use the cluster allocator, so
6611                  * lets look there
6612                  */
6613                 if (last_ptr) {
6614                         struct btrfs_block_group_cache *used_block_group;
6615                         unsigned long aligned_cluster;
6616                         /*
6617                          * the refill lock keeps out other
6618                          * people trying to start a new cluster
6619                          */
6620                         used_block_group = btrfs_lock_cluster(block_group,
6621                                                               last_ptr,
6622                                                               delalloc);
6623                         if (!used_block_group)
6624                                 goto refill_cluster;
6625
6626                         if (used_block_group != block_group &&
6627                             (used_block_group->ro ||
6628                              !block_group_bits(used_block_group, flags)))
6629                                 goto release_cluster;
6630
6631                         offset = btrfs_alloc_from_cluster(used_block_group,
6632                                                 last_ptr,
6633                                                 num_bytes,
6634                                                 used_block_group->key.objectid,
6635                                                 &max_extent_size);
6636                         if (offset) {
6637                                 /* we have a block, we're done */
6638                                 spin_unlock(&last_ptr->refill_lock);
6639                                 trace_btrfs_reserve_extent_cluster(root,
6640                                                 used_block_group,
6641                                                 search_start, num_bytes);
6642                                 if (used_block_group != block_group) {
6643                                         btrfs_release_block_group(block_group,
6644                                                                   delalloc);
6645                                         block_group = used_block_group;
6646                                 }
6647                                 goto checks;
6648                         }
6649
6650                         WARN_ON(last_ptr->block_group != used_block_group);
6651 release_cluster:
6652                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6653                          * set up a new clusters, so lets just skip it
6654                          * and let the allocator find whatever block
6655                          * it can find.  If we reach this point, we
6656                          * will have tried the cluster allocator
6657                          * plenty of times and not have found
6658                          * anything, so we are likely way too
6659                          * fragmented for the clustering stuff to find
6660                          * anything.
6661                          *
6662                          * However, if the cluster is taken from the
6663                          * current block group, release the cluster
6664                          * first, so that we stand a better chance of
6665                          * succeeding in the unclustered
6666                          * allocation.  */
6667                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6668                             used_block_group != block_group) {
6669                                 spin_unlock(&last_ptr->refill_lock);
6670                                 btrfs_release_block_group(used_block_group,
6671                                                           delalloc);
6672                                 goto unclustered_alloc;
6673                         }
6674
6675                         /*
6676                          * this cluster didn't work out, free it and
6677                          * start over
6678                          */
6679                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6680
6681                         if (used_block_group != block_group)
6682                                 btrfs_release_block_group(used_block_group,
6683                                                           delalloc);
6684 refill_cluster:
6685                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6686                                 spin_unlock(&last_ptr->refill_lock);
6687                                 goto unclustered_alloc;
6688                         }
6689
6690                         aligned_cluster = max_t(unsigned long,
6691                                                 empty_cluster + empty_size,
6692                                               block_group->full_stripe_len);
6693
6694                         /* allocate a cluster in this block group */
6695                         ret = btrfs_find_space_cluster(root, block_group,
6696                                                        last_ptr, search_start,
6697                                                        num_bytes,
6698                                                        aligned_cluster);
6699                         if (ret == 0) {
6700                                 /*
6701                                  * now pull our allocation out of this
6702                                  * cluster
6703                                  */
6704                                 offset = btrfs_alloc_from_cluster(block_group,
6705                                                         last_ptr,
6706                                                         num_bytes,
6707                                                         search_start,
6708                                                         &max_extent_size);
6709                                 if (offset) {
6710                                         /* we found one, proceed */
6711                                         spin_unlock(&last_ptr->refill_lock);
6712                                         trace_btrfs_reserve_extent_cluster(root,
6713                                                 block_group, search_start,
6714                                                 num_bytes);
6715                                         goto checks;
6716                                 }
6717                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6718                                    && !failed_cluster_refill) {
6719                                 spin_unlock(&last_ptr->refill_lock);
6720
6721                                 failed_cluster_refill = true;
6722                                 wait_block_group_cache_progress(block_group,
6723                                        num_bytes + empty_cluster + empty_size);
6724                                 goto have_block_group;
6725                         }
6726
6727                         /*
6728                          * at this point we either didn't find a cluster
6729                          * or we weren't able to allocate a block from our
6730                          * cluster.  Free the cluster we've been trying
6731                          * to use, and go to the next block group
6732                          */
6733                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6734                         spin_unlock(&last_ptr->refill_lock);
6735                         goto loop;
6736                 }
6737
6738 unclustered_alloc:
6739                 spin_lock(&block_group->free_space_ctl->tree_lock);
6740                 if (cached &&
6741                     block_group->free_space_ctl->free_space <
6742                     num_bytes + empty_cluster + empty_size) {
6743                         if (block_group->free_space_ctl->free_space >
6744                             max_extent_size)
6745                                 max_extent_size =
6746                                         block_group->free_space_ctl->free_space;
6747                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6748                         goto loop;
6749                 }
6750                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6751
6752                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6753                                                     num_bytes, empty_size,
6754                                                     &max_extent_size);
6755                 /*
6756                  * If we didn't find a chunk, and we haven't failed on this
6757                  * block group before, and this block group is in the middle of
6758                  * caching and we are ok with waiting, then go ahead and wait
6759                  * for progress to be made, and set failed_alloc to true.
6760                  *
6761                  * If failed_alloc is true then we've already waited on this
6762                  * block group once and should move on to the next block group.
6763                  */
6764                 if (!offset && !failed_alloc && !cached &&
6765                     loop > LOOP_CACHING_NOWAIT) {
6766                         wait_block_group_cache_progress(block_group,
6767                                                 num_bytes + empty_size);
6768                         failed_alloc = true;
6769                         goto have_block_group;
6770                 } else if (!offset) {
6771                         if (!cached)
6772                                 have_caching_bg = true;
6773                         goto loop;
6774                 }
6775 checks:
6776                 search_start = ALIGN(offset, root->stripesize);
6777
6778                 /* move on to the next group */
6779                 if (search_start + num_bytes >
6780                     block_group->key.objectid + block_group->key.offset) {
6781                         btrfs_add_free_space(block_group, offset, num_bytes);
6782                         goto loop;
6783                 }
6784
6785                 if (offset < search_start)
6786                         btrfs_add_free_space(block_group, offset,
6787                                              search_start - offset);
6788                 BUG_ON(offset > search_start);
6789
6790                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6791                                                   alloc_type, delalloc);
6792                 if (ret == -EAGAIN) {
6793                         btrfs_add_free_space(block_group, offset, num_bytes);
6794                         goto loop;
6795                 }
6796
6797                 /* we are all good, lets return */
6798                 ins->objectid = search_start;
6799                 ins->offset = num_bytes;
6800
6801                 trace_btrfs_reserve_extent(orig_root, block_group,
6802                                            search_start, num_bytes);
6803                 btrfs_release_block_group(block_group, delalloc);
6804                 break;
6805 loop:
6806                 failed_cluster_refill = false;
6807                 failed_alloc = false;
6808                 BUG_ON(index != get_block_group_index(block_group));
6809                 btrfs_release_block_group(block_group, delalloc);
6810         }
6811         up_read(&space_info->groups_sem);
6812
6813         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6814                 goto search;
6815
6816         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6817                 goto search;
6818
6819         /*
6820          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6821          *                      caching kthreads as we move along
6822          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6823          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6824          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6825          *                      again
6826          */
6827         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6828                 index = 0;
6829                 loop++;
6830                 if (loop == LOOP_ALLOC_CHUNK) {
6831                         struct btrfs_trans_handle *trans;
6832                         int exist = 0;
6833
6834                         trans = current->journal_info;
6835                         if (trans)
6836                                 exist = 1;
6837                         else
6838                                 trans = btrfs_join_transaction(root);
6839
6840                         if (IS_ERR(trans)) {
6841                                 ret = PTR_ERR(trans);
6842                                 goto out;
6843                         }
6844
6845                         ret = do_chunk_alloc(trans, root, flags,
6846                                              CHUNK_ALLOC_FORCE);
6847                         /*
6848                          * Do not bail out on ENOSPC since we
6849                          * can do more things.
6850                          */
6851                         if (ret < 0 && ret != -ENOSPC)
6852                                 btrfs_abort_transaction(trans,
6853                                                         root, ret);
6854                         else
6855                                 ret = 0;
6856                         if (!exist)
6857                                 btrfs_end_transaction(trans, root);
6858                         if (ret)
6859                                 goto out;
6860                 }
6861
6862                 if (loop == LOOP_NO_EMPTY_SIZE) {
6863                         empty_size = 0;
6864                         empty_cluster = 0;
6865                 }
6866
6867                 goto search;
6868         } else if (!ins->objectid) {
6869                 ret = -ENOSPC;
6870         } else if (ins->objectid) {
6871                 ret = 0;
6872         }
6873 out:
6874         if (ret == -ENOSPC)
6875                 ins->offset = max_extent_size;
6876         return ret;
6877 }
6878
6879 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6880                             int dump_block_groups)
6881 {
6882         struct btrfs_block_group_cache *cache;
6883         int index = 0;
6884
6885         spin_lock(&info->lock);
6886         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6887                info->flags,
6888                info->total_bytes - info->bytes_used - info->bytes_pinned -
6889                info->bytes_reserved - info->bytes_readonly,
6890                (info->full) ? "" : "not ");
6891         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6892                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6893                info->total_bytes, info->bytes_used, info->bytes_pinned,
6894                info->bytes_reserved, info->bytes_may_use,
6895                info->bytes_readonly);
6896         spin_unlock(&info->lock);
6897
6898         if (!dump_block_groups)
6899                 return;
6900
6901         down_read(&info->groups_sem);
6902 again:
6903         list_for_each_entry(cache, &info->block_groups[index], list) {
6904                 spin_lock(&cache->lock);
6905                 printk(KERN_INFO "BTRFS: "
6906                            "block group %llu has %llu bytes, "
6907                            "%llu used %llu pinned %llu reserved %s\n",
6908                        cache->key.objectid, cache->key.offset,
6909                        btrfs_block_group_used(&cache->item), cache->pinned,
6910                        cache->reserved, cache->ro ? "[readonly]" : "");
6911                 btrfs_dump_free_space(cache, bytes);
6912                 spin_unlock(&cache->lock);
6913         }
6914         if (++index < BTRFS_NR_RAID_TYPES)
6915                 goto again;
6916         up_read(&info->groups_sem);
6917 }
6918
6919 int btrfs_reserve_extent(struct btrfs_root *root,
6920                          u64 num_bytes, u64 min_alloc_size,
6921                          u64 empty_size, u64 hint_byte,
6922                          struct btrfs_key *ins, int is_data, int delalloc)
6923 {
6924         bool final_tried = false;
6925         u64 flags;
6926         int ret;
6927
6928         flags = btrfs_get_alloc_profile(root, is_data);
6929 again:
6930         WARN_ON(num_bytes < root->sectorsize);
6931         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6932                                flags, delalloc);
6933
6934         if (ret == -ENOSPC) {
6935                 if (!final_tried && ins->offset) {
6936                         num_bytes = min(num_bytes >> 1, ins->offset);
6937                         num_bytes = round_down(num_bytes, root->sectorsize);
6938                         num_bytes = max(num_bytes, min_alloc_size);
6939                         if (num_bytes == min_alloc_size)
6940                                 final_tried = true;
6941                         goto again;
6942                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6943                         struct btrfs_space_info *sinfo;
6944
6945                         sinfo = __find_space_info(root->fs_info, flags);
6946                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6947                                 flags, num_bytes);
6948                         if (sinfo)
6949                                 dump_space_info(sinfo, num_bytes, 1);
6950                 }
6951         }
6952
6953         return ret;
6954 }
6955
6956 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6957                                         u64 start, u64 len,
6958                                         int pin, int delalloc)
6959 {
6960         struct btrfs_block_group_cache *cache;
6961         int ret = 0;
6962
6963         cache = btrfs_lookup_block_group(root->fs_info, start);
6964         if (!cache) {
6965                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6966                         start);
6967                 return -ENOSPC;
6968         }
6969
6970         if (btrfs_test_opt(root, DISCARD))
6971                 ret = btrfs_discard_extent(root, start, len, NULL);
6972
6973         if (pin)
6974                 pin_down_extent(root, cache, start, len, 1);
6975         else {
6976                 btrfs_add_free_space(cache, start, len);
6977                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6978         }
6979         btrfs_put_block_group(cache);
6980
6981         trace_btrfs_reserved_extent_free(root, start, len);
6982
6983         return ret;
6984 }
6985
6986 int btrfs_free_reserved_extent(struct btrfs_root *root,
6987                                u64 start, u64 len, int delalloc)
6988 {
6989         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
6990 }
6991
6992 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6993                                        u64 start, u64 len)
6994 {
6995         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
6996 }
6997
6998 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6999                                       struct btrfs_root *root,
7000                                       u64 parent, u64 root_objectid,
7001                                       u64 flags, u64 owner, u64 offset,
7002                                       struct btrfs_key *ins, int ref_mod)
7003 {
7004         int ret;
7005         struct btrfs_fs_info *fs_info = root->fs_info;
7006         struct btrfs_extent_item *extent_item;
7007         struct btrfs_extent_inline_ref *iref;
7008         struct btrfs_path *path;
7009         struct extent_buffer *leaf;
7010         int type;
7011         u32 size;
7012
7013         if (parent > 0)
7014                 type = BTRFS_SHARED_DATA_REF_KEY;
7015         else
7016                 type = BTRFS_EXTENT_DATA_REF_KEY;
7017
7018         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7019
7020         path = btrfs_alloc_path();
7021         if (!path)
7022                 return -ENOMEM;
7023
7024         path->leave_spinning = 1;
7025         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7026                                       ins, size);
7027         if (ret) {
7028                 btrfs_free_path(path);
7029                 return ret;
7030         }
7031
7032         leaf = path->nodes[0];
7033         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7034                                      struct btrfs_extent_item);
7035         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7036         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7037         btrfs_set_extent_flags(leaf, extent_item,
7038                                flags | BTRFS_EXTENT_FLAG_DATA);
7039
7040         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7041         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7042         if (parent > 0) {
7043                 struct btrfs_shared_data_ref *ref;
7044                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7045                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7046                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7047         } else {
7048                 struct btrfs_extent_data_ref *ref;
7049                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7050                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7051                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7052                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7053                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7054         }
7055
7056         btrfs_mark_buffer_dirty(path->nodes[0]);
7057         btrfs_free_path(path);
7058
7059         /* Always set parent to 0 here since its exclusive anyway. */
7060         ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7061                                       ins->objectid, ins->offset,
7062                                       BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7063         if (ret)
7064                 return ret;
7065
7066         ret = update_block_group(root, ins->objectid, ins->offset, 1);
7067         if (ret) { /* -ENOENT, logic error */
7068                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7069                         ins->objectid, ins->offset);
7070                 BUG();
7071         }
7072         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7073         return ret;
7074 }
7075
7076 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7077                                      struct btrfs_root *root,
7078                                      u64 parent, u64 root_objectid,
7079                                      u64 flags, struct btrfs_disk_key *key,
7080                                      int level, struct btrfs_key *ins,
7081                                      int no_quota)
7082 {
7083         int ret;
7084         struct btrfs_fs_info *fs_info = root->fs_info;
7085         struct btrfs_extent_item *extent_item;
7086         struct btrfs_tree_block_info *block_info;
7087         struct btrfs_extent_inline_ref *iref;
7088         struct btrfs_path *path;
7089         struct extent_buffer *leaf;
7090         u32 size = sizeof(*extent_item) + sizeof(*iref);
7091         u64 num_bytes = ins->offset;
7092         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7093                                                  SKINNY_METADATA);
7094
7095         if (!skinny_metadata)
7096                 size += sizeof(*block_info);
7097
7098         path = btrfs_alloc_path();
7099         if (!path) {
7100                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7101                                                    root->nodesize);
7102                 return -ENOMEM;
7103         }
7104
7105         path->leave_spinning = 1;
7106         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7107                                       ins, size);
7108         if (ret) {
7109                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7110                                                    root->nodesize);
7111                 btrfs_free_path(path);
7112                 return ret;
7113         }
7114
7115         leaf = path->nodes[0];
7116         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7117                                      struct btrfs_extent_item);
7118         btrfs_set_extent_refs(leaf, extent_item, 1);
7119         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7120         btrfs_set_extent_flags(leaf, extent_item,
7121                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7122
7123         if (skinny_metadata) {
7124                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7125                 num_bytes = root->nodesize;
7126         } else {
7127                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7128                 btrfs_set_tree_block_key(leaf, block_info, key);
7129                 btrfs_set_tree_block_level(leaf, block_info, level);
7130                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7131         }
7132
7133         if (parent > 0) {
7134                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7135                 btrfs_set_extent_inline_ref_type(leaf, iref,
7136                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7137                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7138         } else {
7139                 btrfs_set_extent_inline_ref_type(leaf, iref,
7140                                                  BTRFS_TREE_BLOCK_REF_KEY);
7141                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7142         }
7143
7144         btrfs_mark_buffer_dirty(leaf);
7145         btrfs_free_path(path);
7146
7147         if (!no_quota) {
7148                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7149                                               ins->objectid, num_bytes,
7150                                               BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7151                 if (ret)
7152                         return ret;
7153         }
7154
7155         ret = update_block_group(root, ins->objectid, root->nodesize, 1);
7156         if (ret) { /* -ENOENT, logic error */
7157                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7158                         ins->objectid, ins->offset);
7159                 BUG();
7160         }
7161
7162         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7163         return ret;
7164 }
7165
7166 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7167                                      struct btrfs_root *root,
7168                                      u64 root_objectid, u64 owner,
7169                                      u64 offset, struct btrfs_key *ins)
7170 {
7171         int ret;
7172
7173         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7174
7175         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7176                                          ins->offset, 0,
7177                                          root_objectid, owner, offset,
7178                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7179         return ret;
7180 }
7181
7182 /*
7183  * this is used by the tree logging recovery code.  It records that
7184  * an extent has been allocated and makes sure to clear the free
7185  * space cache bits as well
7186  */
7187 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7188                                    struct btrfs_root *root,
7189                                    u64 root_objectid, u64 owner, u64 offset,
7190                                    struct btrfs_key *ins)
7191 {
7192         int ret;
7193         struct btrfs_block_group_cache *block_group;
7194
7195         /*
7196          * Mixed block groups will exclude before processing the log so we only
7197          * need to do the exlude dance if this fs isn't mixed.
7198          */
7199         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7200                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7201                 if (ret)
7202                         return ret;
7203         }
7204
7205         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7206         if (!block_group)
7207                 return -EINVAL;
7208
7209         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7210                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7211         BUG_ON(ret); /* logic error */
7212         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7213                                          0, owner, offset, ins, 1);
7214         btrfs_put_block_group(block_group);
7215         return ret;
7216 }
7217
7218 static struct extent_buffer *
7219 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7220                       u64 bytenr, u32 blocksize, int level)
7221 {
7222         struct extent_buffer *buf;
7223
7224         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
7225         if (!buf)
7226                 return ERR_PTR(-ENOMEM);
7227         btrfs_set_header_generation(buf, trans->transid);
7228         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7229         btrfs_tree_lock(buf);
7230         clean_tree_block(trans, root, buf);
7231         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7232
7233         btrfs_set_lock_blocking(buf);
7234         btrfs_set_buffer_uptodate(buf);
7235
7236         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7237                 buf->log_index = root->log_transid % 2;
7238                 /*
7239                  * we allow two log transactions at a time, use different
7240                  * EXENT bit to differentiate dirty pages.
7241                  */
7242                 if (buf->log_index == 0)
7243                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7244                                         buf->start + buf->len - 1, GFP_NOFS);
7245                 else
7246                         set_extent_new(&root->dirty_log_pages, buf->start,
7247                                         buf->start + buf->len - 1, GFP_NOFS);
7248         } else {
7249                 buf->log_index = -1;
7250                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7251                          buf->start + buf->len - 1, GFP_NOFS);
7252         }
7253         trans->blocks_used++;
7254         /* this returns a buffer locked for blocking */
7255         return buf;
7256 }
7257
7258 static struct btrfs_block_rsv *
7259 use_block_rsv(struct btrfs_trans_handle *trans,
7260               struct btrfs_root *root, u32 blocksize)
7261 {
7262         struct btrfs_block_rsv *block_rsv;
7263         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7264         int ret;
7265         bool global_updated = false;
7266
7267         block_rsv = get_block_rsv(trans, root);
7268
7269         if (unlikely(block_rsv->size == 0))
7270                 goto try_reserve;
7271 again:
7272         ret = block_rsv_use_bytes(block_rsv, blocksize);
7273         if (!ret)
7274                 return block_rsv;
7275
7276         if (block_rsv->failfast)
7277                 return ERR_PTR(ret);
7278
7279         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7280                 global_updated = true;
7281                 update_global_block_rsv(root->fs_info);
7282                 goto again;
7283         }
7284
7285         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7286                 static DEFINE_RATELIMIT_STATE(_rs,
7287                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7288                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7289                 if (__ratelimit(&_rs))
7290                         WARN(1, KERN_DEBUG
7291                                 "BTRFS: block rsv returned %d\n", ret);
7292         }
7293 try_reserve:
7294         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7295                                      BTRFS_RESERVE_NO_FLUSH);
7296         if (!ret)
7297                 return block_rsv;
7298         /*
7299          * If we couldn't reserve metadata bytes try and use some from
7300          * the global reserve if its space type is the same as the global
7301          * reservation.
7302          */
7303         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7304             block_rsv->space_info == global_rsv->space_info) {
7305                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7306                 if (!ret)
7307                         return global_rsv;
7308         }
7309         return ERR_PTR(ret);
7310 }
7311
7312 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7313                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7314 {
7315         block_rsv_add_bytes(block_rsv, blocksize, 0);
7316         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7317 }
7318
7319 /*
7320  * finds a free extent and does all the dirty work required for allocation
7321  * returns the key for the extent through ins, and a tree buffer for
7322  * the first block of the extent through buf.
7323  *
7324  * returns the tree buffer or NULL.
7325  */
7326 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7327                                         struct btrfs_root *root,
7328                                         u64 parent, u64 root_objectid,
7329                                         struct btrfs_disk_key *key, int level,
7330                                         u64 hint, u64 empty_size)
7331 {
7332         struct btrfs_key ins;
7333         struct btrfs_block_rsv *block_rsv;
7334         struct extent_buffer *buf;
7335         u64 flags = 0;
7336         int ret;
7337         u32 blocksize = root->nodesize;
7338         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7339                                                  SKINNY_METADATA);
7340
7341         if (btrfs_test_is_dummy_root(root)) {
7342                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7343                                             blocksize, level);
7344                 if (!IS_ERR(buf))
7345                         root->alloc_bytenr += blocksize;
7346                 return buf;
7347         }
7348
7349         block_rsv = use_block_rsv(trans, root, blocksize);
7350         if (IS_ERR(block_rsv))
7351                 return ERR_CAST(block_rsv);
7352
7353         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7354                                    empty_size, hint, &ins, 0, 0);
7355         if (ret) {
7356                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7357                 return ERR_PTR(ret);
7358         }
7359
7360         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7361                                     blocksize, level);
7362         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7363
7364         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7365                 if (parent == 0)
7366                         parent = ins.objectid;
7367                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7368         } else
7369                 BUG_ON(parent > 0);
7370
7371         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7372                 struct btrfs_delayed_extent_op *extent_op;
7373                 extent_op = btrfs_alloc_delayed_extent_op();
7374                 BUG_ON(!extent_op); /* -ENOMEM */
7375                 if (key)
7376                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7377                 else
7378                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7379                 extent_op->flags_to_set = flags;
7380                 if (skinny_metadata)
7381                         extent_op->update_key = 0;
7382                 else
7383                         extent_op->update_key = 1;
7384                 extent_op->update_flags = 1;
7385                 extent_op->is_data = 0;
7386                 extent_op->level = level;
7387
7388                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7389                                         ins.objectid,
7390                                         ins.offset, parent, root_objectid,
7391                                         level, BTRFS_ADD_DELAYED_EXTENT,
7392                                         extent_op, 0);
7393                 BUG_ON(ret); /* -ENOMEM */
7394         }
7395         return buf;
7396 }
7397
7398 struct walk_control {
7399         u64 refs[BTRFS_MAX_LEVEL];
7400         u64 flags[BTRFS_MAX_LEVEL];
7401         struct btrfs_key update_progress;
7402         int stage;
7403         int level;
7404         int shared_level;
7405         int update_ref;
7406         int keep_locks;
7407         int reada_slot;
7408         int reada_count;
7409         int for_reloc;
7410 };
7411
7412 #define DROP_REFERENCE  1
7413 #define UPDATE_BACKREF  2
7414
7415 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7416                                      struct btrfs_root *root,
7417                                      struct walk_control *wc,
7418                                      struct btrfs_path *path)
7419 {
7420         u64 bytenr;
7421         u64 generation;
7422         u64 refs;
7423         u64 flags;
7424         u32 nritems;
7425         u32 blocksize;
7426         struct btrfs_key key;
7427         struct extent_buffer *eb;
7428         int ret;
7429         int slot;
7430         int nread = 0;
7431
7432         if (path->slots[wc->level] < wc->reada_slot) {
7433                 wc->reada_count = wc->reada_count * 2 / 3;
7434                 wc->reada_count = max(wc->reada_count, 2);
7435         } else {
7436                 wc->reada_count = wc->reada_count * 3 / 2;
7437                 wc->reada_count = min_t(int, wc->reada_count,
7438                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7439         }
7440
7441         eb = path->nodes[wc->level];
7442         nritems = btrfs_header_nritems(eb);
7443         blocksize = root->nodesize;
7444
7445         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7446                 if (nread >= wc->reada_count)
7447                         break;
7448
7449                 cond_resched();
7450                 bytenr = btrfs_node_blockptr(eb, slot);
7451                 generation = btrfs_node_ptr_generation(eb, slot);
7452
7453                 if (slot == path->slots[wc->level])
7454                         goto reada;
7455
7456                 if (wc->stage == UPDATE_BACKREF &&
7457                     generation <= root->root_key.offset)
7458                         continue;
7459
7460                 /* We don't lock the tree block, it's OK to be racy here */
7461                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7462                                                wc->level - 1, 1, &refs,
7463                                                &flags);
7464                 /* We don't care about errors in readahead. */
7465                 if (ret < 0)
7466                         continue;
7467                 BUG_ON(refs == 0);
7468
7469                 if (wc->stage == DROP_REFERENCE) {
7470                         if (refs == 1)
7471                                 goto reada;
7472
7473                         if (wc->level == 1 &&
7474                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7475                                 continue;
7476                         if (!wc->update_ref ||
7477                             generation <= root->root_key.offset)
7478                                 continue;
7479                         btrfs_node_key_to_cpu(eb, &key, slot);
7480                         ret = btrfs_comp_cpu_keys(&key,
7481                                                   &wc->update_progress);
7482                         if (ret < 0)
7483                                 continue;
7484                 } else {
7485                         if (wc->level == 1 &&
7486                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7487                                 continue;
7488                 }
7489 reada:
7490                 readahead_tree_block(root, bytenr, blocksize);
7491                 nread++;
7492         }
7493         wc->reada_slot = slot;
7494 }
7495
7496 static int account_leaf_items(struct btrfs_trans_handle *trans,
7497                               struct btrfs_root *root,
7498                               struct extent_buffer *eb)
7499 {
7500         int nr = btrfs_header_nritems(eb);
7501         int i, extent_type, ret;
7502         struct btrfs_key key;
7503         struct btrfs_file_extent_item *fi;
7504         u64 bytenr, num_bytes;
7505
7506         for (i = 0; i < nr; i++) {
7507                 btrfs_item_key_to_cpu(eb, &key, i);
7508
7509                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7510                         continue;
7511
7512                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7513                 /* filter out non qgroup-accountable extents  */
7514                 extent_type = btrfs_file_extent_type(eb, fi);
7515
7516                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7517                         continue;
7518
7519                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7520                 if (!bytenr)
7521                         continue;
7522
7523                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7524
7525                 ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7526                                               root->objectid,
7527                                               bytenr, num_bytes,
7528                                               BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
7529                 if (ret)
7530                         return ret;
7531         }
7532         return 0;
7533 }
7534
7535 /*
7536  * Walk up the tree from the bottom, freeing leaves and any interior
7537  * nodes which have had all slots visited. If a node (leaf or
7538  * interior) is freed, the node above it will have it's slot
7539  * incremented. The root node will never be freed.
7540  *
7541  * At the end of this function, we should have a path which has all
7542  * slots incremented to the next position for a search. If we need to
7543  * read a new node it will be NULL and the node above it will have the
7544  * correct slot selected for a later read.
7545  *
7546  * If we increment the root nodes slot counter past the number of
7547  * elements, 1 is returned to signal completion of the search.
7548  */
7549 static int adjust_slots_upwards(struct btrfs_root *root,
7550                                 struct btrfs_path *path, int root_level)
7551 {
7552         int level = 0;
7553         int nr, slot;
7554         struct extent_buffer *eb;
7555
7556         if (root_level == 0)
7557                 return 1;
7558
7559         while (level <= root_level) {
7560                 eb = path->nodes[level];
7561                 nr = btrfs_header_nritems(eb);
7562                 path->slots[level]++;
7563                 slot = path->slots[level];
7564                 if (slot >= nr || level == 0) {
7565                         /*
7566                          * Don't free the root -  we will detect this
7567                          * condition after our loop and return a
7568                          * positive value for caller to stop walking the tree.
7569                          */
7570                         if (level != root_level) {
7571                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7572                                 path->locks[level] = 0;
7573
7574                                 free_extent_buffer(eb);
7575                                 path->nodes[level] = NULL;
7576                                 path->slots[level] = 0;
7577                         }
7578                 } else {
7579                         /*
7580                          * We have a valid slot to walk back down
7581                          * from. Stop here so caller can process these
7582                          * new nodes.
7583                          */
7584                         break;
7585                 }
7586
7587                 level++;
7588         }
7589
7590         eb = path->nodes[root_level];
7591         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7592                 return 1;
7593
7594         return 0;
7595 }
7596
7597 /*
7598  * root_eb is the subtree root and is locked before this function is called.
7599  */
7600 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7601                                   struct btrfs_root *root,
7602                                   struct extent_buffer *root_eb,
7603                                   u64 root_gen,
7604                                   int root_level)
7605 {
7606         int ret = 0;
7607         int level;
7608         struct extent_buffer *eb = root_eb;
7609         struct btrfs_path *path = NULL;
7610
7611         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7612         BUG_ON(root_eb == NULL);
7613
7614         if (!root->fs_info->quota_enabled)
7615                 return 0;
7616
7617         if (!extent_buffer_uptodate(root_eb)) {
7618                 ret = btrfs_read_buffer(root_eb, root_gen);
7619                 if (ret)
7620                         goto out;
7621         }
7622
7623         if (root_level == 0) {
7624                 ret = account_leaf_items(trans, root, root_eb);
7625                 goto out;
7626         }
7627
7628         path = btrfs_alloc_path();
7629         if (!path)
7630                 return -ENOMEM;
7631
7632         /*
7633          * Walk down the tree.  Missing extent blocks are filled in as
7634          * we go. Metadata is accounted every time we read a new
7635          * extent block.
7636          *
7637          * When we reach a leaf, we account for file extent items in it,
7638          * walk back up the tree (adjusting slot pointers as we go)
7639          * and restart the search process.
7640          */
7641         extent_buffer_get(root_eb); /* For path */
7642         path->nodes[root_level] = root_eb;
7643         path->slots[root_level] = 0;
7644         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7645 walk_down:
7646         level = root_level;
7647         while (level >= 0) {
7648                 if (path->nodes[level] == NULL) {
7649                         int parent_slot;
7650                         u64 child_gen;
7651                         u64 child_bytenr;
7652
7653                         /* We need to get child blockptr/gen from
7654                          * parent before we can read it. */
7655                         eb = path->nodes[level + 1];
7656                         parent_slot = path->slots[level + 1];
7657                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7658                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7659
7660                         eb = read_tree_block(root, child_bytenr, child_gen);
7661                         if (!eb || !extent_buffer_uptodate(eb)) {
7662                                 ret = -EIO;
7663                                 goto out;
7664                         }
7665
7666                         path->nodes[level] = eb;
7667                         path->slots[level] = 0;
7668
7669                         btrfs_tree_read_lock(eb);
7670                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7671                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7672
7673                         ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7674                                                 root->objectid,
7675                                                 child_bytenr,
7676                                                 root->nodesize,
7677                                                 BTRFS_QGROUP_OPER_SUB_SUBTREE,
7678                                                 0);
7679                         if (ret)
7680                                 goto out;
7681
7682                 }
7683
7684                 if (level == 0) {
7685                         ret = account_leaf_items(trans, root, path->nodes[level]);
7686                         if (ret)
7687                                 goto out;
7688
7689                         /* Nonzero return here means we completed our search */
7690                         ret = adjust_slots_upwards(root, path, root_level);
7691                         if (ret)
7692                                 break;
7693
7694                         /* Restart search with new slots */
7695                         goto walk_down;
7696                 }
7697
7698                 level--;
7699         }
7700
7701         ret = 0;
7702 out:
7703         btrfs_free_path(path);
7704
7705         return ret;
7706 }
7707
7708 /*
7709  * helper to process tree block while walking down the tree.
7710  *
7711  * when wc->stage == UPDATE_BACKREF, this function updates
7712  * back refs for pointers in the block.
7713  *
7714  * NOTE: return value 1 means we should stop walking down.
7715  */
7716 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7717                                    struct btrfs_root *root,
7718                                    struct btrfs_path *path,
7719                                    struct walk_control *wc, int lookup_info)
7720 {
7721         int level = wc->level;
7722         struct extent_buffer *eb = path->nodes[level];
7723         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7724         int ret;
7725
7726         if (wc->stage == UPDATE_BACKREF &&
7727             btrfs_header_owner(eb) != root->root_key.objectid)
7728                 return 1;
7729
7730         /*
7731          * when reference count of tree block is 1, it won't increase
7732          * again. once full backref flag is set, we never clear it.
7733          */
7734         if (lookup_info &&
7735             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7736              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7737                 BUG_ON(!path->locks[level]);
7738                 ret = btrfs_lookup_extent_info(trans, root,
7739                                                eb->start, level, 1,
7740                                                &wc->refs[level],
7741                                                &wc->flags[level]);
7742                 BUG_ON(ret == -ENOMEM);
7743                 if (ret)
7744                         return ret;
7745                 BUG_ON(wc->refs[level] == 0);
7746         }
7747
7748         if (wc->stage == DROP_REFERENCE) {
7749                 if (wc->refs[level] > 1)
7750                         return 1;
7751
7752                 if (path->locks[level] && !wc->keep_locks) {
7753                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7754                         path->locks[level] = 0;
7755                 }
7756                 return 0;
7757         }
7758
7759         /* wc->stage == UPDATE_BACKREF */
7760         if (!(wc->flags[level] & flag)) {
7761                 BUG_ON(!path->locks[level]);
7762                 ret = btrfs_inc_ref(trans, root, eb, 1);
7763                 BUG_ON(ret); /* -ENOMEM */
7764                 ret = btrfs_dec_ref(trans, root, eb, 0);
7765                 BUG_ON(ret); /* -ENOMEM */
7766                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7767                                                   eb->len, flag,
7768                                                   btrfs_header_level(eb), 0);
7769                 BUG_ON(ret); /* -ENOMEM */
7770                 wc->flags[level] |= flag;
7771         }
7772
7773         /*
7774          * the block is shared by multiple trees, so it's not good to
7775          * keep the tree lock
7776          */
7777         if (path->locks[level] && level > 0) {
7778                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7779                 path->locks[level] = 0;
7780         }
7781         return 0;
7782 }
7783
7784 /*
7785  * helper to process tree block pointer.
7786  *
7787  * when wc->stage == DROP_REFERENCE, this function checks
7788  * reference count of the block pointed to. if the block
7789  * is shared and we need update back refs for the subtree
7790  * rooted at the block, this function changes wc->stage to
7791  * UPDATE_BACKREF. if the block is shared and there is no
7792  * need to update back, this function drops the reference
7793  * to the block.
7794  *
7795  * NOTE: return value 1 means we should stop walking down.
7796  */
7797 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7798                                  struct btrfs_root *root,
7799                                  struct btrfs_path *path,
7800                                  struct walk_control *wc, int *lookup_info)
7801 {
7802         u64 bytenr;
7803         u64 generation;
7804         u64 parent;
7805         u32 blocksize;
7806         struct btrfs_key key;
7807         struct extent_buffer *next;
7808         int level = wc->level;
7809         int reada = 0;
7810         int ret = 0;
7811         bool need_account = false;
7812
7813         generation = btrfs_node_ptr_generation(path->nodes[level],
7814                                                path->slots[level]);
7815         /*
7816          * if the lower level block was created before the snapshot
7817          * was created, we know there is no need to update back refs
7818          * for the subtree
7819          */
7820         if (wc->stage == UPDATE_BACKREF &&
7821             generation <= root->root_key.offset) {
7822                 *lookup_info = 1;
7823                 return 1;
7824         }
7825
7826         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7827         blocksize = root->nodesize;
7828
7829         next = btrfs_find_tree_block(root, bytenr);
7830         if (!next) {
7831                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7832                 if (!next)
7833                         return -ENOMEM;
7834                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7835                                                level - 1);
7836                 reada = 1;
7837         }
7838         btrfs_tree_lock(next);
7839         btrfs_set_lock_blocking(next);
7840
7841         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7842                                        &wc->refs[level - 1],
7843                                        &wc->flags[level - 1]);
7844         if (ret < 0) {
7845                 btrfs_tree_unlock(next);
7846                 return ret;
7847         }
7848
7849         if (unlikely(wc->refs[level - 1] == 0)) {
7850                 btrfs_err(root->fs_info, "Missing references.");
7851                 BUG();
7852         }
7853         *lookup_info = 0;
7854
7855         if (wc->stage == DROP_REFERENCE) {
7856                 if (wc->refs[level - 1] > 1) {
7857                         need_account = true;
7858                         if (level == 1 &&
7859                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7860                                 goto skip;
7861
7862                         if (!wc->update_ref ||
7863                             generation <= root->root_key.offset)
7864                                 goto skip;
7865
7866                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7867                                               path->slots[level]);
7868                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7869                         if (ret < 0)
7870                                 goto skip;
7871
7872                         wc->stage = UPDATE_BACKREF;
7873                         wc->shared_level = level - 1;
7874                 }
7875         } else {
7876                 if (level == 1 &&
7877                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7878                         goto skip;
7879         }
7880
7881         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7882                 btrfs_tree_unlock(next);
7883                 free_extent_buffer(next);
7884                 next = NULL;
7885                 *lookup_info = 1;
7886         }
7887
7888         if (!next) {
7889                 if (reada && level == 1)
7890                         reada_walk_down(trans, root, wc, path);
7891                 next = read_tree_block(root, bytenr, generation);
7892                 if (!next || !extent_buffer_uptodate(next)) {
7893                         free_extent_buffer(next);
7894                         return -EIO;
7895                 }
7896                 btrfs_tree_lock(next);
7897                 btrfs_set_lock_blocking(next);
7898         }
7899
7900         level--;
7901         BUG_ON(level != btrfs_header_level(next));
7902         path->nodes[level] = next;
7903         path->slots[level] = 0;
7904         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7905         wc->level = level;
7906         if (wc->level == 1)
7907                 wc->reada_slot = 0;
7908         return 0;
7909 skip:
7910         wc->refs[level - 1] = 0;
7911         wc->flags[level - 1] = 0;
7912         if (wc->stage == DROP_REFERENCE) {
7913                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7914                         parent = path->nodes[level]->start;
7915                 } else {
7916                         BUG_ON(root->root_key.objectid !=
7917                                btrfs_header_owner(path->nodes[level]));
7918                         parent = 0;
7919                 }
7920
7921                 if (need_account) {
7922                         ret = account_shared_subtree(trans, root, next,
7923                                                      generation, level - 1);
7924                         if (ret) {
7925                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
7926                                         "%d accounting shared subtree. Quota "
7927                                         "is out of sync, rescan required.\n",
7928                                         root->fs_info->sb->s_id, ret);
7929                         }
7930                 }
7931                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7932                                 root->root_key.objectid, level - 1, 0, 0);
7933                 BUG_ON(ret); /* -ENOMEM */
7934         }
7935         btrfs_tree_unlock(next);
7936         free_extent_buffer(next);
7937         *lookup_info = 1;
7938         return 1;
7939 }
7940
7941 /*
7942  * helper to process tree block while walking up the tree.
7943  *
7944  * when wc->stage == DROP_REFERENCE, this function drops
7945  * reference count on the block.
7946  *
7947  * when wc->stage == UPDATE_BACKREF, this function changes
7948  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7949  * to UPDATE_BACKREF previously while processing the block.
7950  *
7951  * NOTE: return value 1 means we should stop walking up.
7952  */
7953 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7954                                  struct btrfs_root *root,
7955                                  struct btrfs_path *path,
7956                                  struct walk_control *wc)
7957 {
7958         int ret;
7959         int level = wc->level;
7960         struct extent_buffer *eb = path->nodes[level];
7961         u64 parent = 0;
7962
7963         if (wc->stage == UPDATE_BACKREF) {
7964                 BUG_ON(wc->shared_level < level);
7965                 if (level < wc->shared_level)
7966                         goto out;
7967
7968                 ret = find_next_key(path, level + 1, &wc->update_progress);
7969                 if (ret > 0)
7970                         wc->update_ref = 0;
7971
7972                 wc->stage = DROP_REFERENCE;
7973                 wc->shared_level = -1;
7974                 path->slots[level] = 0;
7975
7976                 /*
7977                  * check reference count again if the block isn't locked.
7978                  * we should start walking down the tree again if reference
7979                  * count is one.
7980                  */
7981                 if (!path->locks[level]) {
7982                         BUG_ON(level == 0);
7983                         btrfs_tree_lock(eb);
7984                         btrfs_set_lock_blocking(eb);
7985                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7986
7987                         ret = btrfs_lookup_extent_info(trans, root,
7988                                                        eb->start, level, 1,
7989                                                        &wc->refs[level],
7990                                                        &wc->flags[level]);
7991                         if (ret < 0) {
7992                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7993                                 path->locks[level] = 0;
7994                                 return ret;
7995                         }
7996                         BUG_ON(wc->refs[level] == 0);
7997                         if (wc->refs[level] == 1) {
7998                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7999                                 path->locks[level] = 0;
8000                                 return 1;
8001                         }
8002                 }
8003         }
8004
8005         /* wc->stage == DROP_REFERENCE */
8006         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8007
8008         if (wc->refs[level] == 1) {
8009                 if (level == 0) {
8010                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8011                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8012                         else
8013                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8014                         BUG_ON(ret); /* -ENOMEM */
8015                         ret = account_leaf_items(trans, root, eb);
8016                         if (ret) {
8017                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8018                                         "%d accounting leaf items. Quota "
8019                                         "is out of sync, rescan required.\n",
8020                                         root->fs_info->sb->s_id, ret);
8021                         }
8022                 }
8023                 /* make block locked assertion in clean_tree_block happy */
8024                 if (!path->locks[level] &&
8025                     btrfs_header_generation(eb) == trans->transid) {
8026                         btrfs_tree_lock(eb);
8027                         btrfs_set_lock_blocking(eb);
8028                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8029                 }
8030                 clean_tree_block(trans, root, eb);
8031         }
8032
8033         if (eb == root->node) {
8034                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8035                         parent = eb->start;
8036                 else
8037                         BUG_ON(root->root_key.objectid !=
8038                                btrfs_header_owner(eb));
8039         } else {
8040                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8041                         parent = path->nodes[level + 1]->start;
8042                 else
8043                         BUG_ON(root->root_key.objectid !=
8044                                btrfs_header_owner(path->nodes[level + 1]));
8045         }
8046
8047         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8048 out:
8049         wc->refs[level] = 0;
8050         wc->flags[level] = 0;
8051         return 0;
8052 }
8053
8054 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8055                                    struct btrfs_root *root,
8056                                    struct btrfs_path *path,
8057                                    struct walk_control *wc)
8058 {
8059         int level = wc->level;
8060         int lookup_info = 1;
8061         int ret;
8062
8063         while (level >= 0) {
8064                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8065                 if (ret > 0)
8066                         break;
8067
8068                 if (level == 0)
8069                         break;
8070
8071                 if (path->slots[level] >=
8072                     btrfs_header_nritems(path->nodes[level]))
8073                         break;
8074
8075                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8076                 if (ret > 0) {
8077                         path->slots[level]++;
8078                         continue;
8079                 } else if (ret < 0)
8080                         return ret;
8081                 level = wc->level;
8082         }
8083         return 0;
8084 }
8085
8086 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8087                                  struct btrfs_root *root,
8088                                  struct btrfs_path *path,
8089                                  struct walk_control *wc, int max_level)
8090 {
8091         int level = wc->level;
8092         int ret;
8093
8094         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8095         while (level < max_level && path->nodes[level]) {
8096                 wc->level = level;
8097                 if (path->slots[level] + 1 <
8098                     btrfs_header_nritems(path->nodes[level])) {
8099                         path->slots[level]++;
8100                         return 0;
8101                 } else {
8102                         ret = walk_up_proc(trans, root, path, wc);
8103                         if (ret > 0)
8104                                 return 0;
8105
8106                         if (path->locks[level]) {
8107                                 btrfs_tree_unlock_rw(path->nodes[level],
8108                                                      path->locks[level]);
8109                                 path->locks[level] = 0;
8110                         }
8111                         free_extent_buffer(path->nodes[level]);
8112                         path->nodes[level] = NULL;
8113                         level++;
8114                 }
8115         }
8116         return 1;
8117 }
8118
8119 /*
8120  * drop a subvolume tree.
8121  *
8122  * this function traverses the tree freeing any blocks that only
8123  * referenced by the tree.
8124  *
8125  * when a shared tree block is found. this function decreases its
8126  * reference count by one. if update_ref is true, this function
8127  * also make sure backrefs for the shared block and all lower level
8128  * blocks are properly updated.
8129  *
8130  * If called with for_reloc == 0, may exit early with -EAGAIN
8131  */
8132 int btrfs_drop_snapshot(struct btrfs_root *root,
8133                          struct btrfs_block_rsv *block_rsv, int update_ref,
8134                          int for_reloc)
8135 {
8136         struct btrfs_path *path;
8137         struct btrfs_trans_handle *trans;
8138         struct btrfs_root *tree_root = root->fs_info->tree_root;
8139         struct btrfs_root_item *root_item = &root->root_item;
8140         struct walk_control *wc;
8141         struct btrfs_key key;
8142         int err = 0;
8143         int ret;
8144         int level;
8145         bool root_dropped = false;
8146
8147         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8148
8149         path = btrfs_alloc_path();
8150         if (!path) {
8151                 err = -ENOMEM;
8152                 goto out;
8153         }
8154
8155         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8156         if (!wc) {
8157                 btrfs_free_path(path);
8158                 err = -ENOMEM;
8159                 goto out;
8160         }
8161
8162         trans = btrfs_start_transaction(tree_root, 0);
8163         if (IS_ERR(trans)) {
8164                 err = PTR_ERR(trans);
8165                 goto out_free;
8166         }
8167
8168         if (block_rsv)
8169                 trans->block_rsv = block_rsv;
8170
8171         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8172                 level = btrfs_header_level(root->node);
8173                 path->nodes[level] = btrfs_lock_root_node(root);
8174                 btrfs_set_lock_blocking(path->nodes[level]);
8175                 path->slots[level] = 0;
8176                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8177                 memset(&wc->update_progress, 0,
8178                        sizeof(wc->update_progress));
8179         } else {
8180                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8181                 memcpy(&wc->update_progress, &key,
8182                        sizeof(wc->update_progress));
8183
8184                 level = root_item->drop_level;
8185                 BUG_ON(level == 0);
8186                 path->lowest_level = level;
8187                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8188                 path->lowest_level = 0;
8189                 if (ret < 0) {
8190                         err = ret;
8191                         goto out_end_trans;
8192                 }
8193                 WARN_ON(ret > 0);
8194
8195                 /*
8196                  * unlock our path, this is safe because only this
8197                  * function is allowed to delete this snapshot
8198                  */
8199                 btrfs_unlock_up_safe(path, 0);
8200
8201                 level = btrfs_header_level(root->node);
8202                 while (1) {
8203                         btrfs_tree_lock(path->nodes[level]);
8204                         btrfs_set_lock_blocking(path->nodes[level]);
8205                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8206
8207                         ret = btrfs_lookup_extent_info(trans, root,
8208                                                 path->nodes[level]->start,
8209                                                 level, 1, &wc->refs[level],
8210                                                 &wc->flags[level]);
8211                         if (ret < 0) {
8212                                 err = ret;
8213                                 goto out_end_trans;
8214                         }
8215                         BUG_ON(wc->refs[level] == 0);
8216
8217                         if (level == root_item->drop_level)
8218                                 break;
8219
8220                         btrfs_tree_unlock(path->nodes[level]);
8221                         path->locks[level] = 0;
8222                         WARN_ON(wc->refs[level] != 1);
8223                         level--;
8224                 }
8225         }
8226
8227         wc->level = level;
8228         wc->shared_level = -1;
8229         wc->stage = DROP_REFERENCE;
8230         wc->update_ref = update_ref;
8231         wc->keep_locks = 0;
8232         wc->for_reloc = for_reloc;
8233         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8234
8235         while (1) {
8236
8237                 ret = walk_down_tree(trans, root, path, wc);
8238                 if (ret < 0) {
8239                         err = ret;
8240                         break;
8241                 }
8242
8243                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8244                 if (ret < 0) {
8245                         err = ret;
8246                         break;
8247                 }
8248
8249                 if (ret > 0) {
8250                         BUG_ON(wc->stage != DROP_REFERENCE);
8251                         break;
8252                 }
8253
8254                 if (wc->stage == DROP_REFERENCE) {
8255                         level = wc->level;
8256                         btrfs_node_key(path->nodes[level],
8257                                        &root_item->drop_progress,
8258                                        path->slots[level]);
8259                         root_item->drop_level = level;
8260                 }
8261
8262                 BUG_ON(wc->level == 0);
8263                 if (btrfs_should_end_transaction(trans, tree_root) ||
8264                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8265                         ret = btrfs_update_root(trans, tree_root,
8266                                                 &root->root_key,
8267                                                 root_item);
8268                         if (ret) {
8269                                 btrfs_abort_transaction(trans, tree_root, ret);
8270                                 err = ret;
8271                                 goto out_end_trans;
8272                         }
8273
8274                         /*
8275                          * Qgroup update accounting is run from
8276                          * delayed ref handling. This usually works
8277                          * out because delayed refs are normally the
8278                          * only way qgroup updates are added. However,
8279                          * we may have added updates during our tree
8280                          * walk so run qgroups here to make sure we
8281                          * don't lose any updates.
8282                          */
8283                         ret = btrfs_delayed_qgroup_accounting(trans,
8284                                                               root->fs_info);
8285                         if (ret)
8286                                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8287                                                    "running qgroup updates "
8288                                                    "during snapshot delete. "
8289                                                    "Quota is out of sync, "
8290                                                    "rescan required.\n", ret);
8291
8292                         btrfs_end_transaction_throttle(trans, tree_root);
8293                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8294                                 pr_debug("BTRFS: drop snapshot early exit\n");
8295                                 err = -EAGAIN;
8296                                 goto out_free;
8297                         }
8298
8299                         trans = btrfs_start_transaction(tree_root, 0);
8300                         if (IS_ERR(trans)) {
8301                                 err = PTR_ERR(trans);
8302                                 goto out_free;
8303                         }
8304                         if (block_rsv)
8305                                 trans->block_rsv = block_rsv;
8306                 }
8307         }
8308         btrfs_release_path(path);
8309         if (err)
8310                 goto out_end_trans;
8311
8312         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8313         if (ret) {
8314                 btrfs_abort_transaction(trans, tree_root, ret);
8315                 goto out_end_trans;
8316         }
8317
8318         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8319                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8320                                       NULL, NULL);
8321                 if (ret < 0) {
8322                         btrfs_abort_transaction(trans, tree_root, ret);
8323                         err = ret;
8324                         goto out_end_trans;
8325                 } else if (ret > 0) {
8326                         /* if we fail to delete the orphan item this time
8327                          * around, it'll get picked up the next time.
8328                          *
8329                          * The most common failure here is just -ENOENT.
8330                          */
8331                         btrfs_del_orphan_item(trans, tree_root,
8332                                               root->root_key.objectid);
8333                 }
8334         }
8335
8336         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8337                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8338         } else {
8339                 free_extent_buffer(root->node);
8340                 free_extent_buffer(root->commit_root);
8341                 btrfs_put_fs_root(root);
8342         }
8343         root_dropped = true;
8344 out_end_trans:
8345         ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
8346         if (ret)
8347                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8348                                    "running qgroup updates "
8349                                    "during snapshot delete. "
8350                                    "Quota is out of sync, "
8351                                    "rescan required.\n", ret);
8352
8353         btrfs_end_transaction_throttle(trans, tree_root);
8354 out_free:
8355         kfree(wc);
8356         btrfs_free_path(path);
8357 out:
8358         /*
8359          * So if we need to stop dropping the snapshot for whatever reason we
8360          * need to make sure to add it back to the dead root list so that we
8361          * keep trying to do the work later.  This also cleans up roots if we
8362          * don't have it in the radix (like when we recover after a power fail
8363          * or unmount) so we don't leak memory.
8364          */
8365         if (!for_reloc && root_dropped == false)
8366                 btrfs_add_dead_root(root);
8367         if (err && err != -EAGAIN)
8368                 btrfs_std_error(root->fs_info, err);
8369         return err;
8370 }
8371
8372 /*
8373  * drop subtree rooted at tree block 'node'.
8374  *
8375  * NOTE: this function will unlock and release tree block 'node'
8376  * only used by relocation code
8377  */
8378 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8379                         struct btrfs_root *root,
8380                         struct extent_buffer *node,
8381                         struct extent_buffer *parent)
8382 {
8383         struct btrfs_path *path;
8384         struct walk_control *wc;
8385         int level;
8386         int parent_level;
8387         int ret = 0;
8388         int wret;
8389
8390         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8391
8392         path = btrfs_alloc_path();
8393         if (!path)
8394                 return -ENOMEM;
8395
8396         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8397         if (!wc) {
8398                 btrfs_free_path(path);
8399                 return -ENOMEM;
8400         }
8401
8402         btrfs_assert_tree_locked(parent);
8403         parent_level = btrfs_header_level(parent);
8404         extent_buffer_get(parent);
8405         path->nodes[parent_level] = parent;
8406         path->slots[parent_level] = btrfs_header_nritems(parent);
8407
8408         btrfs_assert_tree_locked(node);
8409         level = btrfs_header_level(node);
8410         path->nodes[level] = node;
8411         path->slots[level] = 0;
8412         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8413
8414         wc->refs[parent_level] = 1;
8415         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8416         wc->level = level;
8417         wc->shared_level = -1;
8418         wc->stage = DROP_REFERENCE;
8419         wc->update_ref = 0;
8420         wc->keep_locks = 1;
8421         wc->for_reloc = 1;
8422         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8423
8424         while (1) {
8425                 wret = walk_down_tree(trans, root, path, wc);
8426                 if (wret < 0) {
8427                         ret = wret;
8428                         break;
8429                 }
8430
8431                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8432                 if (wret < 0)
8433                         ret = wret;
8434                 if (wret != 0)
8435                         break;
8436         }
8437
8438         kfree(wc);
8439         btrfs_free_path(path);
8440         return ret;
8441 }
8442
8443 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8444 {
8445         u64 num_devices;
8446         u64 stripped;
8447
8448         /*
8449          * if restripe for this chunk_type is on pick target profile and
8450          * return, otherwise do the usual balance
8451          */
8452         stripped = get_restripe_target(root->fs_info, flags);
8453         if (stripped)
8454                 return extended_to_chunk(stripped);
8455
8456         num_devices = root->fs_info->fs_devices->rw_devices;
8457
8458         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8459                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8460                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8461
8462         if (num_devices == 1) {
8463                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8464                 stripped = flags & ~stripped;
8465
8466                 /* turn raid0 into single device chunks */
8467                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8468                         return stripped;
8469
8470                 /* turn mirroring into duplication */
8471                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8472                              BTRFS_BLOCK_GROUP_RAID10))
8473                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8474         } else {
8475                 /* they already had raid on here, just return */
8476                 if (flags & stripped)
8477                         return flags;
8478
8479                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8480                 stripped = flags & ~stripped;
8481
8482                 /* switch duplicated blocks with raid1 */
8483                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8484                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8485
8486                 /* this is drive concat, leave it alone */
8487         }
8488
8489         return flags;
8490 }
8491
8492 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8493 {
8494         struct btrfs_space_info *sinfo = cache->space_info;
8495         u64 num_bytes;
8496         u64 min_allocable_bytes;
8497         int ret = -ENOSPC;
8498
8499
8500         /*
8501          * We need some metadata space and system metadata space for
8502          * allocating chunks in some corner cases until we force to set
8503          * it to be readonly.
8504          */
8505         if ((sinfo->flags &
8506              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8507             !force)
8508                 min_allocable_bytes = 1 * 1024 * 1024;
8509         else
8510                 min_allocable_bytes = 0;
8511
8512         spin_lock(&sinfo->lock);
8513         spin_lock(&cache->lock);
8514
8515         if (cache->ro) {
8516                 ret = 0;
8517                 goto out;
8518         }
8519
8520         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8521                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8522
8523         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8524             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8525             min_allocable_bytes <= sinfo->total_bytes) {
8526                 sinfo->bytes_readonly += num_bytes;
8527                 cache->ro = 1;
8528                 ret = 0;
8529         }
8530 out:
8531         spin_unlock(&cache->lock);
8532         spin_unlock(&sinfo->lock);
8533         return ret;
8534 }
8535
8536 int btrfs_set_block_group_ro(struct btrfs_root *root,
8537                              struct btrfs_block_group_cache *cache)
8538
8539 {
8540         struct btrfs_trans_handle *trans;
8541         u64 alloc_flags;
8542         int ret;
8543
8544         BUG_ON(cache->ro);
8545
8546         trans = btrfs_join_transaction(root);
8547         if (IS_ERR(trans))
8548                 return PTR_ERR(trans);
8549
8550         alloc_flags = update_block_group_flags(root, cache->flags);
8551         if (alloc_flags != cache->flags) {
8552                 ret = do_chunk_alloc(trans, root, alloc_flags,
8553                                      CHUNK_ALLOC_FORCE);
8554                 if (ret < 0)
8555                         goto out;
8556         }
8557
8558         ret = set_block_group_ro(cache, 0);
8559         if (!ret)
8560                 goto out;
8561         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8562         ret = do_chunk_alloc(trans, root, alloc_flags,
8563                              CHUNK_ALLOC_FORCE);
8564         if (ret < 0)
8565                 goto out;
8566         ret = set_block_group_ro(cache, 0);
8567 out:
8568         btrfs_end_transaction(trans, root);
8569         return ret;
8570 }
8571
8572 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8573                             struct btrfs_root *root, u64 type)
8574 {
8575         u64 alloc_flags = get_alloc_profile(root, type);
8576         return do_chunk_alloc(trans, root, alloc_flags,
8577                               CHUNK_ALLOC_FORCE);
8578 }
8579
8580 /*
8581  * helper to account the unused space of all the readonly block group in the
8582  * list. takes mirrors into account.
8583  */
8584 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8585 {
8586         struct btrfs_block_group_cache *block_group;
8587         u64 free_bytes = 0;
8588         int factor;
8589
8590         list_for_each_entry(block_group, groups_list, list) {
8591                 spin_lock(&block_group->lock);
8592
8593                 if (!block_group->ro) {
8594                         spin_unlock(&block_group->lock);
8595                         continue;
8596                 }
8597
8598                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8599                                           BTRFS_BLOCK_GROUP_RAID10 |
8600                                           BTRFS_BLOCK_GROUP_DUP))
8601                         factor = 2;
8602                 else
8603                         factor = 1;
8604
8605                 free_bytes += (block_group->key.offset -
8606                                btrfs_block_group_used(&block_group->item)) *
8607                                factor;
8608
8609                 spin_unlock(&block_group->lock);
8610         }
8611
8612         return free_bytes;
8613 }
8614
8615 /*
8616  * helper to account the unused space of all the readonly block group in the
8617  * space_info. takes mirrors into account.
8618  */
8619 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8620 {
8621         int i;
8622         u64 free_bytes = 0;
8623
8624         spin_lock(&sinfo->lock);
8625
8626         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8627                 if (!list_empty(&sinfo->block_groups[i]))
8628                         free_bytes += __btrfs_get_ro_block_group_free_space(
8629                                                 &sinfo->block_groups[i]);
8630
8631         spin_unlock(&sinfo->lock);
8632
8633         return free_bytes;
8634 }
8635
8636 void btrfs_set_block_group_rw(struct btrfs_root *root,
8637                               struct btrfs_block_group_cache *cache)
8638 {
8639         struct btrfs_space_info *sinfo = cache->space_info;
8640         u64 num_bytes;
8641
8642         BUG_ON(!cache->ro);
8643
8644         spin_lock(&sinfo->lock);
8645         spin_lock(&cache->lock);
8646         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8647                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8648         sinfo->bytes_readonly -= num_bytes;
8649         cache->ro = 0;
8650         spin_unlock(&cache->lock);
8651         spin_unlock(&sinfo->lock);
8652 }
8653
8654 /*
8655  * checks to see if its even possible to relocate this block group.
8656  *
8657  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8658  * ok to go ahead and try.
8659  */
8660 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8661 {
8662         struct btrfs_block_group_cache *block_group;
8663         struct btrfs_space_info *space_info;
8664         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8665         struct btrfs_device *device;
8666         struct btrfs_trans_handle *trans;
8667         u64 min_free;
8668         u64 dev_min = 1;
8669         u64 dev_nr = 0;
8670         u64 target;
8671         int index;
8672         int full = 0;
8673         int ret = 0;
8674
8675         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8676
8677         /* odd, couldn't find the block group, leave it alone */
8678         if (!block_group)
8679                 return -1;
8680
8681         min_free = btrfs_block_group_used(&block_group->item);
8682
8683         /* no bytes used, we're good */
8684         if (!min_free)
8685                 goto out;
8686
8687         space_info = block_group->space_info;
8688         spin_lock(&space_info->lock);
8689
8690         full = space_info->full;
8691
8692         /*
8693          * if this is the last block group we have in this space, we can't
8694          * relocate it unless we're able to allocate a new chunk below.
8695          *
8696          * Otherwise, we need to make sure we have room in the space to handle
8697          * all of the extents from this block group.  If we can, we're good
8698          */
8699         if ((space_info->total_bytes != block_group->key.offset) &&
8700             (space_info->bytes_used + space_info->bytes_reserved +
8701              space_info->bytes_pinned + space_info->bytes_readonly +
8702              min_free < space_info->total_bytes)) {
8703                 spin_unlock(&space_info->lock);
8704                 goto out;
8705         }
8706         spin_unlock(&space_info->lock);
8707
8708         /*
8709          * ok we don't have enough space, but maybe we have free space on our
8710          * devices to allocate new chunks for relocation, so loop through our
8711          * alloc devices and guess if we have enough space.  if this block
8712          * group is going to be restriped, run checks against the target
8713          * profile instead of the current one.
8714          */
8715         ret = -1;
8716
8717         /*
8718          * index:
8719          *      0: raid10
8720          *      1: raid1
8721          *      2: dup
8722          *      3: raid0
8723          *      4: single
8724          */
8725         target = get_restripe_target(root->fs_info, block_group->flags);
8726         if (target) {
8727                 index = __get_raid_index(extended_to_chunk(target));
8728         } else {
8729                 /*
8730                  * this is just a balance, so if we were marked as full
8731                  * we know there is no space for a new chunk
8732                  */
8733                 if (full)
8734                         goto out;
8735
8736                 index = get_block_group_index(block_group);
8737         }
8738
8739         if (index == BTRFS_RAID_RAID10) {
8740                 dev_min = 4;
8741                 /* Divide by 2 */
8742                 min_free >>= 1;
8743         } else if (index == BTRFS_RAID_RAID1) {
8744                 dev_min = 2;
8745         } else if (index == BTRFS_RAID_DUP) {
8746                 /* Multiply by 2 */
8747                 min_free <<= 1;
8748         } else if (index == BTRFS_RAID_RAID0) {
8749                 dev_min = fs_devices->rw_devices;
8750                 do_div(min_free, dev_min);
8751         }
8752
8753         /* We need to do this so that we can look at pending chunks */
8754         trans = btrfs_join_transaction(root);
8755         if (IS_ERR(trans)) {
8756                 ret = PTR_ERR(trans);
8757                 goto out;
8758         }
8759
8760         mutex_lock(&root->fs_info->chunk_mutex);
8761         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8762                 u64 dev_offset;
8763
8764                 /*
8765                  * check to make sure we can actually find a chunk with enough
8766                  * space to fit our block group in.
8767                  */
8768                 if (device->total_bytes > device->bytes_used + min_free &&
8769                     !device->is_tgtdev_for_dev_replace) {
8770                         ret = find_free_dev_extent(trans, device, min_free,
8771                                                    &dev_offset, NULL);
8772                         if (!ret)
8773                                 dev_nr++;
8774
8775                         if (dev_nr >= dev_min)
8776                                 break;
8777
8778                         ret = -1;
8779                 }
8780         }
8781         mutex_unlock(&root->fs_info->chunk_mutex);
8782         btrfs_end_transaction(trans, root);
8783 out:
8784         btrfs_put_block_group(block_group);
8785         return ret;
8786 }
8787
8788 static int find_first_block_group(struct btrfs_root *root,
8789                 struct btrfs_path *path, struct btrfs_key *key)
8790 {
8791         int ret = 0;
8792         struct btrfs_key found_key;
8793         struct extent_buffer *leaf;
8794         int slot;
8795
8796         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8797         if (ret < 0)
8798                 goto out;
8799
8800         while (1) {
8801                 slot = path->slots[0];
8802                 leaf = path->nodes[0];
8803                 if (slot >= btrfs_header_nritems(leaf)) {
8804                         ret = btrfs_next_leaf(root, path);
8805                         if (ret == 0)
8806                                 continue;
8807                         if (ret < 0)
8808                                 goto out;
8809                         break;
8810                 }
8811                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8812
8813                 if (found_key.objectid >= key->objectid &&
8814                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8815                         ret = 0;
8816                         goto out;
8817                 }
8818                 path->slots[0]++;
8819         }
8820 out:
8821         return ret;
8822 }
8823
8824 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8825 {
8826         struct btrfs_block_group_cache *block_group;
8827         u64 last = 0;
8828
8829         while (1) {
8830                 struct inode *inode;
8831
8832                 block_group = btrfs_lookup_first_block_group(info, last);
8833                 while (block_group) {
8834                         spin_lock(&block_group->lock);
8835                         if (block_group->iref)
8836                                 break;
8837                         spin_unlock(&block_group->lock);
8838                         block_group = next_block_group(info->tree_root,
8839                                                        block_group);
8840                 }
8841                 if (!block_group) {
8842                         if (last == 0)
8843                                 break;
8844                         last = 0;
8845                         continue;
8846                 }
8847
8848                 inode = block_group->inode;
8849                 block_group->iref = 0;
8850                 block_group->inode = NULL;
8851                 spin_unlock(&block_group->lock);
8852                 iput(inode);
8853                 last = block_group->key.objectid + block_group->key.offset;
8854                 btrfs_put_block_group(block_group);
8855         }
8856 }
8857
8858 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8859 {
8860         struct btrfs_block_group_cache *block_group;
8861         struct btrfs_space_info *space_info;
8862         struct btrfs_caching_control *caching_ctl;
8863         struct rb_node *n;
8864
8865         down_write(&info->commit_root_sem);
8866         while (!list_empty(&info->caching_block_groups)) {
8867                 caching_ctl = list_entry(info->caching_block_groups.next,
8868                                          struct btrfs_caching_control, list);
8869                 list_del(&caching_ctl->list);
8870                 put_caching_control(caching_ctl);
8871         }
8872         up_write(&info->commit_root_sem);
8873
8874         spin_lock(&info->unused_bgs_lock);
8875         while (!list_empty(&info->unused_bgs)) {
8876                 block_group = list_first_entry(&info->unused_bgs,
8877                                                struct btrfs_block_group_cache,
8878                                                bg_list);
8879                 list_del_init(&block_group->bg_list);
8880                 btrfs_put_block_group(block_group);
8881         }
8882         spin_unlock(&info->unused_bgs_lock);
8883
8884         spin_lock(&info->block_group_cache_lock);
8885         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8886                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8887                                        cache_node);
8888                 rb_erase(&block_group->cache_node,
8889                          &info->block_group_cache_tree);
8890                 spin_unlock(&info->block_group_cache_lock);
8891
8892                 down_write(&block_group->space_info->groups_sem);
8893                 list_del(&block_group->list);
8894                 up_write(&block_group->space_info->groups_sem);
8895
8896                 if (block_group->cached == BTRFS_CACHE_STARTED)
8897                         wait_block_group_cache_done(block_group);
8898
8899                 /*
8900                  * We haven't cached this block group, which means we could
8901                  * possibly have excluded extents on this block group.
8902                  */
8903                 if (block_group->cached == BTRFS_CACHE_NO ||
8904                     block_group->cached == BTRFS_CACHE_ERROR)
8905                         free_excluded_extents(info->extent_root, block_group);
8906
8907                 btrfs_remove_free_space_cache(block_group);
8908                 btrfs_put_block_group(block_group);
8909
8910                 spin_lock(&info->block_group_cache_lock);
8911         }
8912         spin_unlock(&info->block_group_cache_lock);
8913
8914         /* now that all the block groups are freed, go through and
8915          * free all the space_info structs.  This is only called during
8916          * the final stages of unmount, and so we know nobody is
8917          * using them.  We call synchronize_rcu() once before we start,
8918          * just to be on the safe side.
8919          */
8920         synchronize_rcu();
8921
8922         release_global_block_rsv(info);
8923
8924         while (!list_empty(&info->space_info)) {
8925                 int i;
8926
8927                 space_info = list_entry(info->space_info.next,
8928                                         struct btrfs_space_info,
8929                                         list);
8930                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8931                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8932                             space_info->bytes_reserved > 0 ||
8933                             space_info->bytes_may_use > 0)) {
8934                                 dump_space_info(space_info, 0, 0);
8935                         }
8936                 }
8937                 list_del(&space_info->list);
8938                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8939                         struct kobject *kobj;
8940                         kobj = space_info->block_group_kobjs[i];
8941                         space_info->block_group_kobjs[i] = NULL;
8942                         if (kobj) {
8943                                 kobject_del(kobj);
8944                                 kobject_put(kobj);
8945                         }
8946                 }
8947                 kobject_del(&space_info->kobj);
8948                 kobject_put(&space_info->kobj);
8949         }
8950         return 0;
8951 }
8952
8953 static void __link_block_group(struct btrfs_space_info *space_info,
8954                                struct btrfs_block_group_cache *cache)
8955 {
8956         int index = get_block_group_index(cache);
8957         bool first = false;
8958
8959         down_write(&space_info->groups_sem);
8960         if (list_empty(&space_info->block_groups[index]))
8961                 first = true;
8962         list_add_tail(&cache->list, &space_info->block_groups[index]);
8963         up_write(&space_info->groups_sem);
8964
8965         if (first) {
8966                 struct raid_kobject *rkobj;
8967                 int ret;
8968
8969                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
8970                 if (!rkobj)
8971                         goto out_err;
8972                 rkobj->raid_type = index;
8973                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
8974                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
8975                                   "%s", get_raid_name(index));
8976                 if (ret) {
8977                         kobject_put(&rkobj->kobj);
8978                         goto out_err;
8979                 }
8980                 space_info->block_group_kobjs[index] = &rkobj->kobj;
8981         }
8982
8983         return;
8984 out_err:
8985         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8986 }
8987
8988 static struct btrfs_block_group_cache *
8989 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8990 {
8991         struct btrfs_block_group_cache *cache;
8992
8993         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8994         if (!cache)
8995                 return NULL;
8996
8997         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8998                                         GFP_NOFS);
8999         if (!cache->free_space_ctl) {
9000                 kfree(cache);
9001                 return NULL;
9002         }
9003
9004         cache->key.objectid = start;
9005         cache->key.offset = size;
9006         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9007
9008         cache->sectorsize = root->sectorsize;
9009         cache->fs_info = root->fs_info;
9010         cache->full_stripe_len = btrfs_full_stripe_len(root,
9011                                                &root->fs_info->mapping_tree,
9012                                                start);
9013         atomic_set(&cache->count, 1);
9014         spin_lock_init(&cache->lock);
9015         init_rwsem(&cache->data_rwsem);
9016         INIT_LIST_HEAD(&cache->list);
9017         INIT_LIST_HEAD(&cache->cluster_list);
9018         INIT_LIST_HEAD(&cache->bg_list);
9019         btrfs_init_free_space_ctl(cache);
9020
9021         return cache;
9022 }
9023
9024 int btrfs_read_block_groups(struct btrfs_root *root)
9025 {
9026         struct btrfs_path *path;
9027         int ret;
9028         struct btrfs_block_group_cache *cache;
9029         struct btrfs_fs_info *info = root->fs_info;
9030         struct btrfs_space_info *space_info;
9031         struct btrfs_key key;
9032         struct btrfs_key found_key;
9033         struct extent_buffer *leaf;
9034         int need_clear = 0;
9035         u64 cache_gen;
9036
9037         root = info->extent_root;
9038         key.objectid = 0;
9039         key.offset = 0;
9040         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9041         path = btrfs_alloc_path();
9042         if (!path)
9043                 return -ENOMEM;
9044         path->reada = 1;
9045
9046         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9047         if (btrfs_test_opt(root, SPACE_CACHE) &&
9048             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9049                 need_clear = 1;
9050         if (btrfs_test_opt(root, CLEAR_CACHE))
9051                 need_clear = 1;
9052
9053         while (1) {
9054                 ret = find_first_block_group(root, path, &key);
9055                 if (ret > 0)
9056                         break;
9057                 if (ret != 0)
9058                         goto error;
9059
9060                 leaf = path->nodes[0];
9061                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9062
9063                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9064                                                        found_key.offset);
9065                 if (!cache) {
9066                         ret = -ENOMEM;
9067                         goto error;
9068                 }
9069
9070                 if (need_clear) {
9071                         /*
9072                          * When we mount with old space cache, we need to
9073                          * set BTRFS_DC_CLEAR and set dirty flag.
9074                          *
9075                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9076                          *    truncate the old free space cache inode and
9077                          *    setup a new one.
9078                          * b) Setting 'dirty flag' makes sure that we flush
9079                          *    the new space cache info onto disk.
9080                          */
9081                         cache->disk_cache_state = BTRFS_DC_CLEAR;
9082                         if (btrfs_test_opt(root, SPACE_CACHE))
9083                                 cache->dirty = 1;
9084                 }
9085
9086                 read_extent_buffer(leaf, &cache->item,
9087                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9088                                    sizeof(cache->item));
9089                 cache->flags = btrfs_block_group_flags(&cache->item);
9090
9091                 key.objectid = found_key.objectid + found_key.offset;
9092                 btrfs_release_path(path);
9093
9094                 /*
9095                  * We need to exclude the super stripes now so that the space
9096                  * info has super bytes accounted for, otherwise we'll think
9097                  * we have more space than we actually do.
9098                  */
9099                 ret = exclude_super_stripes(root, cache);
9100                 if (ret) {
9101                         /*
9102                          * We may have excluded something, so call this just in
9103                          * case.
9104                          */
9105                         free_excluded_extents(root, cache);
9106                         btrfs_put_block_group(cache);
9107                         goto error;
9108                 }
9109
9110                 /*
9111                  * check for two cases, either we are full, and therefore
9112                  * don't need to bother with the caching work since we won't
9113                  * find any space, or we are empty, and we can just add all
9114                  * the space in and be done with it.  This saves us _alot_ of
9115                  * time, particularly in the full case.
9116                  */
9117                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9118                         cache->last_byte_to_unpin = (u64)-1;
9119                         cache->cached = BTRFS_CACHE_FINISHED;
9120                         free_excluded_extents(root, cache);
9121                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9122                         cache->last_byte_to_unpin = (u64)-1;
9123                         cache->cached = BTRFS_CACHE_FINISHED;
9124                         add_new_free_space(cache, root->fs_info,
9125                                            found_key.objectid,
9126                                            found_key.objectid +
9127                                            found_key.offset);
9128                         free_excluded_extents(root, cache);
9129                 }
9130
9131                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9132                 if (ret) {
9133                         btrfs_remove_free_space_cache(cache);
9134                         btrfs_put_block_group(cache);
9135                         goto error;
9136                 }
9137
9138                 ret = update_space_info(info, cache->flags, found_key.offset,
9139                                         btrfs_block_group_used(&cache->item),
9140                                         &space_info);
9141                 if (ret) {
9142                         btrfs_remove_free_space_cache(cache);
9143                         spin_lock(&info->block_group_cache_lock);
9144                         rb_erase(&cache->cache_node,
9145                                  &info->block_group_cache_tree);
9146                         spin_unlock(&info->block_group_cache_lock);
9147                         btrfs_put_block_group(cache);
9148                         goto error;
9149                 }
9150
9151                 cache->space_info = space_info;
9152                 spin_lock(&cache->space_info->lock);
9153                 cache->space_info->bytes_readonly += cache->bytes_super;
9154                 spin_unlock(&cache->space_info->lock);
9155
9156                 __link_block_group(space_info, cache);
9157
9158                 set_avail_alloc_bits(root->fs_info, cache->flags);
9159                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9160                         set_block_group_ro(cache, 1);
9161                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9162                         spin_lock(&info->unused_bgs_lock);
9163                         /* Should always be true but just in case. */
9164                         if (list_empty(&cache->bg_list)) {
9165                                 btrfs_get_block_group(cache);
9166                                 list_add_tail(&cache->bg_list,
9167                                               &info->unused_bgs);
9168                         }
9169                         spin_unlock(&info->unused_bgs_lock);
9170                 }
9171         }
9172
9173         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9174                 if (!(get_alloc_profile(root, space_info->flags) &
9175                       (BTRFS_BLOCK_GROUP_RAID10 |
9176                        BTRFS_BLOCK_GROUP_RAID1 |
9177                        BTRFS_BLOCK_GROUP_RAID5 |
9178                        BTRFS_BLOCK_GROUP_RAID6 |
9179                        BTRFS_BLOCK_GROUP_DUP)))
9180                         continue;
9181                 /*
9182                  * avoid allocating from un-mirrored block group if there are
9183                  * mirrored block groups.
9184                  */
9185                 list_for_each_entry(cache,
9186                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9187                                 list)
9188                         set_block_group_ro(cache, 1);
9189                 list_for_each_entry(cache,
9190                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9191                                 list)
9192                         set_block_group_ro(cache, 1);
9193         }
9194
9195         init_global_block_rsv(info);
9196         ret = 0;
9197 error:
9198         btrfs_free_path(path);
9199         return ret;
9200 }
9201
9202 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9203                                        struct btrfs_root *root)
9204 {
9205         struct btrfs_block_group_cache *block_group, *tmp;
9206         struct btrfs_root *extent_root = root->fs_info->extent_root;
9207         struct btrfs_block_group_item item;
9208         struct btrfs_key key;
9209         int ret = 0;
9210
9211         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9212                 list_del_init(&block_group->bg_list);
9213                 if (ret)
9214                         continue;
9215
9216                 spin_lock(&block_group->lock);
9217                 memcpy(&item, &block_group->item, sizeof(item));
9218                 memcpy(&key, &block_group->key, sizeof(key));
9219                 spin_unlock(&block_group->lock);
9220
9221                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9222                                         sizeof(item));
9223                 if (ret)
9224                         btrfs_abort_transaction(trans, extent_root, ret);
9225                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9226                                                key.objectid, key.offset);
9227                 if (ret)
9228                         btrfs_abort_transaction(trans, extent_root, ret);
9229         }
9230 }
9231
9232 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9233                            struct btrfs_root *root, u64 bytes_used,
9234                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9235                            u64 size)
9236 {
9237         int ret;
9238         struct btrfs_root *extent_root;
9239         struct btrfs_block_group_cache *cache;
9240
9241         extent_root = root->fs_info->extent_root;
9242
9243         btrfs_set_log_full_commit(root->fs_info, trans);
9244
9245         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9246         if (!cache)
9247                 return -ENOMEM;
9248
9249         btrfs_set_block_group_used(&cache->item, bytes_used);
9250         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9251         btrfs_set_block_group_flags(&cache->item, type);
9252
9253         cache->flags = type;
9254         cache->last_byte_to_unpin = (u64)-1;
9255         cache->cached = BTRFS_CACHE_FINISHED;
9256         ret = exclude_super_stripes(root, cache);
9257         if (ret) {
9258                 /*
9259                  * We may have excluded something, so call this just in
9260                  * case.
9261                  */
9262                 free_excluded_extents(root, cache);
9263                 btrfs_put_block_group(cache);
9264                 return ret;
9265         }
9266
9267         add_new_free_space(cache, root->fs_info, chunk_offset,
9268                            chunk_offset + size);
9269
9270         free_excluded_extents(root, cache);
9271
9272         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9273         if (ret) {
9274                 btrfs_remove_free_space_cache(cache);
9275                 btrfs_put_block_group(cache);
9276                 return ret;
9277         }
9278
9279         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9280                                 &cache->space_info);
9281         if (ret) {
9282                 btrfs_remove_free_space_cache(cache);
9283                 spin_lock(&root->fs_info->block_group_cache_lock);
9284                 rb_erase(&cache->cache_node,
9285                          &root->fs_info->block_group_cache_tree);
9286                 spin_unlock(&root->fs_info->block_group_cache_lock);
9287                 btrfs_put_block_group(cache);
9288                 return ret;
9289         }
9290         update_global_block_rsv(root->fs_info);
9291
9292         spin_lock(&cache->space_info->lock);
9293         cache->space_info->bytes_readonly += cache->bytes_super;
9294         spin_unlock(&cache->space_info->lock);
9295
9296         __link_block_group(cache->space_info, cache);
9297
9298         list_add_tail(&cache->bg_list, &trans->new_bgs);
9299
9300         set_avail_alloc_bits(extent_root->fs_info, type);
9301
9302         return 0;
9303 }
9304
9305 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9306 {
9307         u64 extra_flags = chunk_to_extended(flags) &
9308                                 BTRFS_EXTENDED_PROFILE_MASK;
9309
9310         write_seqlock(&fs_info->profiles_lock);
9311         if (flags & BTRFS_BLOCK_GROUP_DATA)
9312                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9313         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9314                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9315         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9316                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9317         write_sequnlock(&fs_info->profiles_lock);
9318 }
9319
9320 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9321                              struct btrfs_root *root, u64 group_start)
9322 {
9323         struct btrfs_path *path;
9324         struct btrfs_block_group_cache *block_group;
9325         struct btrfs_free_cluster *cluster;
9326         struct btrfs_root *tree_root = root->fs_info->tree_root;
9327         struct btrfs_key key;
9328         struct inode *inode;
9329         struct kobject *kobj = NULL;
9330         int ret;
9331         int index;
9332         int factor;
9333
9334         root = root->fs_info->extent_root;
9335
9336         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9337         BUG_ON(!block_group);
9338         BUG_ON(!block_group->ro);
9339
9340         /*
9341          * Free the reserved super bytes from this block group before
9342          * remove it.
9343          */
9344         free_excluded_extents(root, block_group);
9345
9346         memcpy(&key, &block_group->key, sizeof(key));
9347         index = get_block_group_index(block_group);
9348         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9349                                   BTRFS_BLOCK_GROUP_RAID1 |
9350                                   BTRFS_BLOCK_GROUP_RAID10))
9351                 factor = 2;
9352         else
9353                 factor = 1;
9354
9355         /* make sure this block group isn't part of an allocation cluster */
9356         cluster = &root->fs_info->data_alloc_cluster;
9357         spin_lock(&cluster->refill_lock);
9358         btrfs_return_cluster_to_free_space(block_group, cluster);
9359         spin_unlock(&cluster->refill_lock);
9360
9361         /*
9362          * make sure this block group isn't part of a metadata
9363          * allocation cluster
9364          */
9365         cluster = &root->fs_info->meta_alloc_cluster;
9366         spin_lock(&cluster->refill_lock);
9367         btrfs_return_cluster_to_free_space(block_group, cluster);
9368         spin_unlock(&cluster->refill_lock);
9369
9370         path = btrfs_alloc_path();
9371         if (!path) {
9372                 ret = -ENOMEM;
9373                 goto out;
9374         }
9375
9376         inode = lookup_free_space_inode(tree_root, block_group, path);
9377         if (!IS_ERR(inode)) {
9378                 ret = btrfs_orphan_add(trans, inode);
9379                 if (ret) {
9380                         btrfs_add_delayed_iput(inode);
9381                         goto out;
9382                 }
9383                 clear_nlink(inode);
9384                 /* One for the block groups ref */
9385                 spin_lock(&block_group->lock);
9386                 if (block_group->iref) {
9387                         block_group->iref = 0;
9388                         block_group->inode = NULL;
9389                         spin_unlock(&block_group->lock);
9390                         iput(inode);
9391                 } else {
9392                         spin_unlock(&block_group->lock);
9393                 }
9394                 /* One for our lookup ref */
9395                 btrfs_add_delayed_iput(inode);
9396         }
9397
9398         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9399         key.offset = block_group->key.objectid;
9400         key.type = 0;
9401
9402         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9403         if (ret < 0)
9404                 goto out;
9405         if (ret > 0)
9406                 btrfs_release_path(path);
9407         if (ret == 0) {
9408                 ret = btrfs_del_item(trans, tree_root, path);
9409                 if (ret)
9410                         goto out;
9411                 btrfs_release_path(path);
9412         }
9413
9414         spin_lock(&root->fs_info->block_group_cache_lock);
9415         rb_erase(&block_group->cache_node,
9416                  &root->fs_info->block_group_cache_tree);
9417
9418         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9419                 root->fs_info->first_logical_byte = (u64)-1;
9420         spin_unlock(&root->fs_info->block_group_cache_lock);
9421
9422         down_write(&block_group->space_info->groups_sem);
9423         /*
9424          * we must use list_del_init so people can check to see if they
9425          * are still on the list after taking the semaphore
9426          */
9427         list_del_init(&block_group->list);
9428         if (list_empty(&block_group->space_info->block_groups[index])) {
9429                 kobj = block_group->space_info->block_group_kobjs[index];
9430                 block_group->space_info->block_group_kobjs[index] = NULL;
9431                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9432         }
9433         up_write(&block_group->space_info->groups_sem);
9434         if (kobj) {
9435                 kobject_del(kobj);
9436                 kobject_put(kobj);
9437         }
9438
9439         if (block_group->cached == BTRFS_CACHE_STARTED)
9440                 wait_block_group_cache_done(block_group);
9441
9442         btrfs_remove_free_space_cache(block_group);
9443
9444         spin_lock(&block_group->space_info->lock);
9445         block_group->space_info->total_bytes -= block_group->key.offset;
9446         block_group->space_info->bytes_readonly -= block_group->key.offset;
9447         block_group->space_info->disk_total -= block_group->key.offset * factor;
9448         spin_unlock(&block_group->space_info->lock);
9449
9450         memcpy(&key, &block_group->key, sizeof(key));
9451
9452         btrfs_put_block_group(block_group);
9453         btrfs_put_block_group(block_group);
9454
9455         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9456         if (ret > 0)
9457                 ret = -EIO;
9458         if (ret < 0)
9459                 goto out;
9460
9461         ret = btrfs_del_item(trans, root, path);
9462 out:
9463         btrfs_free_path(path);
9464         return ret;
9465 }
9466
9467 /*
9468  * Process the unused_bgs list and remove any that don't have any allocated
9469  * space inside of them.
9470  */
9471 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9472 {
9473         struct btrfs_block_group_cache *block_group;
9474         struct btrfs_space_info *space_info;
9475         struct btrfs_root *root = fs_info->extent_root;
9476         struct btrfs_trans_handle *trans;
9477         int ret = 0;
9478
9479         if (!fs_info->open)
9480                 return;
9481
9482         spin_lock(&fs_info->unused_bgs_lock);
9483         while (!list_empty(&fs_info->unused_bgs)) {
9484                 u64 start, end;
9485
9486                 block_group = list_first_entry(&fs_info->unused_bgs,
9487                                                struct btrfs_block_group_cache,
9488                                                bg_list);
9489                 space_info = block_group->space_info;
9490                 list_del_init(&block_group->bg_list);
9491                 if (ret || btrfs_mixed_space_info(space_info)) {
9492                         btrfs_put_block_group(block_group);
9493                         continue;
9494                 }
9495                 spin_unlock(&fs_info->unused_bgs_lock);
9496
9497                 /* Don't want to race with allocators so take the groups_sem */
9498                 down_write(&space_info->groups_sem);
9499                 spin_lock(&block_group->lock);
9500                 if (block_group->reserved ||
9501                     btrfs_block_group_used(&block_group->item) ||
9502                     block_group->ro) {
9503                         /*
9504                          * We want to bail if we made new allocations or have
9505                          * outstanding allocations in this block group.  We do
9506                          * the ro check in case balance is currently acting on
9507                          * this block group.
9508                          */
9509                         spin_unlock(&block_group->lock);
9510                         up_write(&space_info->groups_sem);
9511                         goto next;
9512                 }
9513                 spin_unlock(&block_group->lock);
9514
9515                 /* We don't want to force the issue, only flip if it's ok. */
9516                 ret = set_block_group_ro(block_group, 0);
9517                 up_write(&space_info->groups_sem);
9518                 if (ret < 0) {
9519                         ret = 0;
9520                         goto next;
9521                 }
9522
9523                 /*
9524                  * Want to do this before we do anything else so we can recover
9525                  * properly if we fail to join the transaction.
9526                  */
9527                 trans = btrfs_join_transaction(root);
9528                 if (IS_ERR(trans)) {
9529                         btrfs_set_block_group_rw(root, block_group);
9530                         ret = PTR_ERR(trans);
9531                         goto next;
9532                 }
9533
9534                 /*
9535                  * We could have pending pinned extents for this block group,
9536                  * just delete them, we don't care about them anymore.
9537                  */
9538                 start = block_group->key.objectid;
9539                 end = start + block_group->key.offset - 1;
9540                 clear_extent_bits(&fs_info->freed_extents[0], start, end,
9541                                   EXTENT_DIRTY, GFP_NOFS);
9542                 clear_extent_bits(&fs_info->freed_extents[1], start, end,
9543                                   EXTENT_DIRTY, GFP_NOFS);
9544
9545                 /* Reset pinned so btrfs_put_block_group doesn't complain */
9546                 block_group->pinned = 0;
9547
9548                 /*
9549                  * Btrfs_remove_chunk will abort the transaction if things go
9550                  * horribly wrong.
9551                  */
9552                 ret = btrfs_remove_chunk(trans, root,
9553                                          block_group->key.objectid);
9554                 btrfs_end_transaction(trans, root);
9555 next:
9556                 btrfs_put_block_group(block_group);
9557                 spin_lock(&fs_info->unused_bgs_lock);
9558         }
9559         spin_unlock(&fs_info->unused_bgs_lock);
9560 }
9561
9562 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
9563 {
9564         struct btrfs_space_info *space_info;
9565         struct btrfs_super_block *disk_super;
9566         u64 features;
9567         u64 flags;
9568         int mixed = 0;
9569         int ret;
9570
9571         disk_super = fs_info->super_copy;
9572         if (!btrfs_super_root(disk_super))
9573                 return 1;
9574
9575         features = btrfs_super_incompat_flags(disk_super);
9576         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
9577                 mixed = 1;
9578
9579         flags = BTRFS_BLOCK_GROUP_SYSTEM;
9580         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9581         if (ret)
9582                 goto out;
9583
9584         if (mixed) {
9585                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
9586                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9587         } else {
9588                 flags = BTRFS_BLOCK_GROUP_METADATA;
9589                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9590                 if (ret)
9591                         goto out;
9592
9593                 flags = BTRFS_BLOCK_GROUP_DATA;
9594                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9595         }
9596 out:
9597         return ret;
9598 }
9599
9600 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
9601 {
9602         return unpin_extent_range(root, start, end);
9603 }
9604
9605 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
9606                                u64 num_bytes, u64 *actual_bytes)
9607 {
9608         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
9609 }
9610
9611 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9612 {
9613         struct btrfs_fs_info *fs_info = root->fs_info;
9614         struct btrfs_block_group_cache *cache = NULL;
9615         u64 group_trimmed;
9616         u64 start;
9617         u64 end;
9618         u64 trimmed = 0;
9619         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
9620         int ret = 0;
9621
9622         /*
9623          * try to trim all FS space, our block group may start from non-zero.
9624          */
9625         if (range->len == total_bytes)
9626                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
9627         else
9628                 cache = btrfs_lookup_block_group(fs_info, range->start);
9629
9630         while (cache) {
9631                 if (cache->key.objectid >= (range->start + range->len)) {
9632                         btrfs_put_block_group(cache);
9633                         break;
9634                 }
9635
9636                 start = max(range->start, cache->key.objectid);
9637                 end = min(range->start + range->len,
9638                                 cache->key.objectid + cache->key.offset);
9639
9640                 if (end - start >= range->minlen) {
9641                         if (!block_group_cache_done(cache)) {
9642                                 ret = cache_block_group(cache, 0);
9643                                 if (ret) {
9644                                         btrfs_put_block_group(cache);
9645                                         break;
9646                                 }
9647                                 ret = wait_block_group_cache_done(cache);
9648                                 if (ret) {
9649                                         btrfs_put_block_group(cache);
9650                                         break;
9651                                 }
9652                         }
9653                         ret = btrfs_trim_block_group(cache,
9654                                                      &group_trimmed,
9655                                                      start,
9656                                                      end,
9657                                                      range->minlen);
9658
9659                         trimmed += group_trimmed;
9660                         if (ret) {
9661                                 btrfs_put_block_group(cache);
9662                                 break;
9663                         }
9664                 }
9665
9666                 cache = next_block_group(fs_info->tree_root, cache);
9667         }
9668
9669         range->len = trimmed;
9670         return ret;
9671 }
9672
9673 /*
9674  * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
9675  * they are used to prevent the some tasks writing data into the page cache
9676  * by nocow before the subvolume is snapshoted, but flush the data into
9677  * the disk after the snapshot creation.
9678  */
9679 void btrfs_end_nocow_write(struct btrfs_root *root)
9680 {
9681         percpu_counter_dec(&root->subv_writers->counter);
9682         /*
9683          * Make sure counter is updated before we wake up
9684          * waiters.
9685          */
9686         smp_mb();
9687         if (waitqueue_active(&root->subv_writers->wait))
9688                 wake_up(&root->subv_writers->wait);
9689 }
9690
9691 int btrfs_start_nocow_write(struct btrfs_root *root)
9692 {
9693         if (atomic_read(&root->will_be_snapshoted))
9694                 return 0;
9695
9696         percpu_counter_inc(&root->subv_writers->counter);
9697         /*
9698          * Make sure counter is updated before we check for snapshot creation.
9699          */
9700         smp_mb();
9701         if (atomic_read(&root->will_be_snapshoted)) {
9702                 btrfs_end_nocow_write(root);
9703                 return 0;
9704         }
9705         return 1;
9706 }