Btrfs: fix invalid leaf slot access in btrfs_lookup_extent()
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op,
85                                 int no_quota);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (cache->cached != BTRFS_CACHE_STARTED) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         /* We're loading it the fast way, so we don't have a caching_ctl. */
324         if (!cache->caching_ctl) {
325                 spin_unlock(&cache->lock);
326                 return NULL;
327         }
328
329         ctl = cache->caching_ctl;
330         atomic_inc(&ctl->count);
331         spin_unlock(&cache->lock);
332         return ctl;
333 }
334
335 static void put_caching_control(struct btrfs_caching_control *ctl)
336 {
337         if (atomic_dec_and_test(&ctl->count))
338                 kfree(ctl);
339 }
340
341 /*
342  * this is only called by cache_block_group, since we could have freed extents
343  * we need to check the pinned_extents for any extents that can't be used yet
344  * since their free space will be released as soon as the transaction commits.
345  */
346 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
347                               struct btrfs_fs_info *info, u64 start, u64 end)
348 {
349         u64 extent_start, extent_end, size, total_added = 0;
350         int ret;
351
352         while (start < end) {
353                 ret = find_first_extent_bit(info->pinned_extents, start,
354                                             &extent_start, &extent_end,
355                                             EXTENT_DIRTY | EXTENT_UPTODATE,
356                                             NULL);
357                 if (ret)
358                         break;
359
360                 if (extent_start <= start) {
361                         start = extent_end + 1;
362                 } else if (extent_start > start && extent_start < end) {
363                         size = extent_start - start;
364                         total_added += size;
365                         ret = btrfs_add_free_space(block_group, start,
366                                                    size);
367                         BUG_ON(ret); /* -ENOMEM or logic error */
368                         start = extent_end + 1;
369                 } else {
370                         break;
371                 }
372         }
373
374         if (start < end) {
375                 size = end - start;
376                 total_added += size;
377                 ret = btrfs_add_free_space(block_group, start, size);
378                 BUG_ON(ret); /* -ENOMEM or logic error */
379         }
380
381         return total_added;
382 }
383
384 static noinline void caching_thread(struct btrfs_work *work)
385 {
386         struct btrfs_block_group_cache *block_group;
387         struct btrfs_fs_info *fs_info;
388         struct btrfs_caching_control *caching_ctl;
389         struct btrfs_root *extent_root;
390         struct btrfs_path *path;
391         struct extent_buffer *leaf;
392         struct btrfs_key key;
393         u64 total_found = 0;
394         u64 last = 0;
395         u32 nritems;
396         int ret = -ENOMEM;
397
398         caching_ctl = container_of(work, struct btrfs_caching_control, work);
399         block_group = caching_ctl->block_group;
400         fs_info = block_group->fs_info;
401         extent_root = fs_info->extent_root;
402
403         path = btrfs_alloc_path();
404         if (!path)
405                 goto out;
406
407         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
408
409         /*
410          * We don't want to deadlock with somebody trying to allocate a new
411          * extent for the extent root while also trying to search the extent
412          * root to add free space.  So we skip locking and search the commit
413          * root, since its read-only
414          */
415         path->skip_locking = 1;
416         path->search_commit_root = 1;
417         path->reada = 1;
418
419         key.objectid = last;
420         key.offset = 0;
421         key.type = BTRFS_EXTENT_ITEM_KEY;
422 again:
423         mutex_lock(&caching_ctl->mutex);
424         /* need to make sure the commit_root doesn't disappear */
425         down_read(&fs_info->commit_root_sem);
426
427 next:
428         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
429         if (ret < 0)
430                 goto err;
431
432         leaf = path->nodes[0];
433         nritems = btrfs_header_nritems(leaf);
434
435         while (1) {
436                 if (btrfs_fs_closing(fs_info) > 1) {
437                         last = (u64)-1;
438                         break;
439                 }
440
441                 if (path->slots[0] < nritems) {
442                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
443                 } else {
444                         ret = find_next_key(path, 0, &key);
445                         if (ret)
446                                 break;
447
448                         if (need_resched() ||
449                             rwsem_is_contended(&fs_info->commit_root_sem)) {
450                                 caching_ctl->progress = last;
451                                 btrfs_release_path(path);
452                                 up_read(&fs_info->commit_root_sem);
453                                 mutex_unlock(&caching_ctl->mutex);
454                                 cond_resched();
455                                 goto again;
456                         }
457
458                         ret = btrfs_next_leaf(extent_root, path);
459                         if (ret < 0)
460                                 goto err;
461                         if (ret)
462                                 break;
463                         leaf = path->nodes[0];
464                         nritems = btrfs_header_nritems(leaf);
465                         continue;
466                 }
467
468                 if (key.objectid < last) {
469                         key.objectid = last;
470                         key.offset = 0;
471                         key.type = BTRFS_EXTENT_ITEM_KEY;
472
473                         caching_ctl->progress = last;
474                         btrfs_release_path(path);
475                         goto next;
476                 }
477
478                 if (key.objectid < block_group->key.objectid) {
479                         path->slots[0]++;
480                         continue;
481                 }
482
483                 if (key.objectid >= block_group->key.objectid +
484                     block_group->key.offset)
485                         break;
486
487                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
488                     key.type == BTRFS_METADATA_ITEM_KEY) {
489                         total_found += add_new_free_space(block_group,
490                                                           fs_info, last,
491                                                           key.objectid);
492                         if (key.type == BTRFS_METADATA_ITEM_KEY)
493                                 last = key.objectid +
494                                         fs_info->tree_root->nodesize;
495                         else
496                                 last = key.objectid + key.offset;
497
498                         if (total_found > (1024 * 1024 * 2)) {
499                                 total_found = 0;
500                                 wake_up(&caching_ctl->wait);
501                         }
502                 }
503                 path->slots[0]++;
504         }
505         ret = 0;
506
507         total_found += add_new_free_space(block_group, fs_info, last,
508                                           block_group->key.objectid +
509                                           block_group->key.offset);
510         caching_ctl->progress = (u64)-1;
511
512         spin_lock(&block_group->lock);
513         block_group->caching_ctl = NULL;
514         block_group->cached = BTRFS_CACHE_FINISHED;
515         spin_unlock(&block_group->lock);
516
517 err:
518         btrfs_free_path(path);
519         up_read(&fs_info->commit_root_sem);
520
521         free_excluded_extents(extent_root, block_group);
522
523         mutex_unlock(&caching_ctl->mutex);
524 out:
525         if (ret) {
526                 spin_lock(&block_group->lock);
527                 block_group->caching_ctl = NULL;
528                 block_group->cached = BTRFS_CACHE_ERROR;
529                 spin_unlock(&block_group->lock);
530         }
531         wake_up(&caching_ctl->wait);
532
533         put_caching_control(caching_ctl);
534         btrfs_put_block_group(block_group);
535 }
536
537 static int cache_block_group(struct btrfs_block_group_cache *cache,
538                              int load_cache_only)
539 {
540         DEFINE_WAIT(wait);
541         struct btrfs_fs_info *fs_info = cache->fs_info;
542         struct btrfs_caching_control *caching_ctl;
543         int ret = 0;
544
545         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
546         if (!caching_ctl)
547                 return -ENOMEM;
548
549         INIT_LIST_HEAD(&caching_ctl->list);
550         mutex_init(&caching_ctl->mutex);
551         init_waitqueue_head(&caching_ctl->wait);
552         caching_ctl->block_group = cache;
553         caching_ctl->progress = cache->key.objectid;
554         atomic_set(&caching_ctl->count, 1);
555         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
556                         caching_thread, NULL, NULL);
557
558         spin_lock(&cache->lock);
559         /*
560          * This should be a rare occasion, but this could happen I think in the
561          * case where one thread starts to load the space cache info, and then
562          * some other thread starts a transaction commit which tries to do an
563          * allocation while the other thread is still loading the space cache
564          * info.  The previous loop should have kept us from choosing this block
565          * group, but if we've moved to the state where we will wait on caching
566          * block groups we need to first check if we're doing a fast load here,
567          * so we can wait for it to finish, otherwise we could end up allocating
568          * from a block group who's cache gets evicted for one reason or
569          * another.
570          */
571         while (cache->cached == BTRFS_CACHE_FAST) {
572                 struct btrfs_caching_control *ctl;
573
574                 ctl = cache->caching_ctl;
575                 atomic_inc(&ctl->count);
576                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
577                 spin_unlock(&cache->lock);
578
579                 schedule();
580
581                 finish_wait(&ctl->wait, &wait);
582                 put_caching_control(ctl);
583                 spin_lock(&cache->lock);
584         }
585
586         if (cache->cached != BTRFS_CACHE_NO) {
587                 spin_unlock(&cache->lock);
588                 kfree(caching_ctl);
589                 return 0;
590         }
591         WARN_ON(cache->caching_ctl);
592         cache->caching_ctl = caching_ctl;
593         cache->cached = BTRFS_CACHE_FAST;
594         spin_unlock(&cache->lock);
595
596         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
597                 ret = load_free_space_cache(fs_info, cache);
598
599                 spin_lock(&cache->lock);
600                 if (ret == 1) {
601                         cache->caching_ctl = NULL;
602                         cache->cached = BTRFS_CACHE_FINISHED;
603                         cache->last_byte_to_unpin = (u64)-1;
604                 } else {
605                         if (load_cache_only) {
606                                 cache->caching_ctl = NULL;
607                                 cache->cached = BTRFS_CACHE_NO;
608                         } else {
609                                 cache->cached = BTRFS_CACHE_STARTED;
610                         }
611                 }
612                 spin_unlock(&cache->lock);
613                 wake_up(&caching_ctl->wait);
614                 if (ret == 1) {
615                         put_caching_control(caching_ctl);
616                         free_excluded_extents(fs_info->extent_root, cache);
617                         return 0;
618                 }
619         } else {
620                 /*
621                  * We are not going to do the fast caching, set cached to the
622                  * appropriate value and wakeup any waiters.
623                  */
624                 spin_lock(&cache->lock);
625                 if (load_cache_only) {
626                         cache->caching_ctl = NULL;
627                         cache->cached = BTRFS_CACHE_NO;
628                 } else {
629                         cache->cached = BTRFS_CACHE_STARTED;
630                 }
631                 spin_unlock(&cache->lock);
632                 wake_up(&caching_ctl->wait);
633         }
634
635         if (load_cache_only) {
636                 put_caching_control(caching_ctl);
637                 return 0;
638         }
639
640         down_write(&fs_info->commit_root_sem);
641         atomic_inc(&caching_ctl->count);
642         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
643         up_write(&fs_info->commit_root_sem);
644
645         btrfs_get_block_group(cache);
646
647         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
648
649         return ret;
650 }
651
652 /*
653  * return the block group that starts at or after bytenr
654  */
655 static struct btrfs_block_group_cache *
656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
657 {
658         struct btrfs_block_group_cache *cache;
659
660         cache = block_group_cache_tree_search(info, bytenr, 0);
661
662         return cache;
663 }
664
665 /*
666  * return the block group that contains the given bytenr
667  */
668 struct btrfs_block_group_cache *btrfs_lookup_block_group(
669                                                  struct btrfs_fs_info *info,
670                                                  u64 bytenr)
671 {
672         struct btrfs_block_group_cache *cache;
673
674         cache = block_group_cache_tree_search(info, bytenr, 1);
675
676         return cache;
677 }
678
679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
680                                                   u64 flags)
681 {
682         struct list_head *head = &info->space_info;
683         struct btrfs_space_info *found;
684
685         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
686
687         rcu_read_lock();
688         list_for_each_entry_rcu(found, head, list) {
689                 if (found->flags & flags) {
690                         rcu_read_unlock();
691                         return found;
692                 }
693         }
694         rcu_read_unlock();
695         return NULL;
696 }
697
698 /*
699  * after adding space to the filesystem, we need to clear the full flags
700  * on all the space infos.
701  */
702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
703 {
704         struct list_head *head = &info->space_info;
705         struct btrfs_space_info *found;
706
707         rcu_read_lock();
708         list_for_each_entry_rcu(found, head, list)
709                 found->full = 0;
710         rcu_read_unlock();
711 }
712
713 /* simple helper to search for an existing data extent at a given offset */
714 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716         int ret;
717         struct btrfs_key key;
718         struct btrfs_path *path;
719
720         path = btrfs_alloc_path();
721         if (!path)
722                 return -ENOMEM;
723
724         key.objectid = start;
725         key.offset = len;
726         key.type = BTRFS_EXTENT_ITEM_KEY;
727         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728                                 0, 0);
729         btrfs_free_path(path);
730         return ret;
731 }
732
733 /*
734  * helper function to lookup reference count and flags of a tree block.
735  *
736  * the head node for delayed ref is used to store the sum of all the
737  * reference count modifications queued up in the rbtree. the head
738  * node may also store the extent flags to set. This way you can check
739  * to see what the reference count and extent flags would be if all of
740  * the delayed refs are not processed.
741  */
742 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
743                              struct btrfs_root *root, u64 bytenr,
744                              u64 offset, int metadata, u64 *refs, u64 *flags)
745 {
746         struct btrfs_delayed_ref_head *head;
747         struct btrfs_delayed_ref_root *delayed_refs;
748         struct btrfs_path *path;
749         struct btrfs_extent_item *ei;
750         struct extent_buffer *leaf;
751         struct btrfs_key key;
752         u32 item_size;
753         u64 num_refs;
754         u64 extent_flags;
755         int ret;
756
757         /*
758          * If we don't have skinny metadata, don't bother doing anything
759          * different
760          */
761         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
762                 offset = root->nodesize;
763                 metadata = 0;
764         }
765
766         path = btrfs_alloc_path();
767         if (!path)
768                 return -ENOMEM;
769
770         if (!trans) {
771                 path->skip_locking = 1;
772                 path->search_commit_root = 1;
773         }
774
775 search_again:
776         key.objectid = bytenr;
777         key.offset = offset;
778         if (metadata)
779                 key.type = BTRFS_METADATA_ITEM_KEY;
780         else
781                 key.type = BTRFS_EXTENT_ITEM_KEY;
782
783 again:
784         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
785                                 &key, path, 0, 0);
786         if (ret < 0)
787                 goto out_free;
788
789         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
790                 if (path->slots[0]) {
791                         path->slots[0]--;
792                         btrfs_item_key_to_cpu(path->nodes[0], &key,
793                                               path->slots[0]);
794                         if (key.objectid == bytenr &&
795                             key.type == BTRFS_EXTENT_ITEM_KEY &&
796                             key.offset == root->nodesize)
797                                 ret = 0;
798                 }
799                 if (ret) {
800                         key.objectid = bytenr;
801                         key.type = BTRFS_EXTENT_ITEM_KEY;
802                         key.offset = root->nodesize;
803                         btrfs_release_path(path);
804                         goto again;
805                 }
806         }
807
808         if (ret == 0) {
809                 leaf = path->nodes[0];
810                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
811                 if (item_size >= sizeof(*ei)) {
812                         ei = btrfs_item_ptr(leaf, path->slots[0],
813                                             struct btrfs_extent_item);
814                         num_refs = btrfs_extent_refs(leaf, ei);
815                         extent_flags = btrfs_extent_flags(leaf, ei);
816                 } else {
817 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
818                         struct btrfs_extent_item_v0 *ei0;
819                         BUG_ON(item_size != sizeof(*ei0));
820                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
821                                              struct btrfs_extent_item_v0);
822                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
823                         /* FIXME: this isn't correct for data */
824                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
825 #else
826                         BUG();
827 #endif
828                 }
829                 BUG_ON(num_refs == 0);
830         } else {
831                 num_refs = 0;
832                 extent_flags = 0;
833                 ret = 0;
834         }
835
836         if (!trans)
837                 goto out;
838
839         delayed_refs = &trans->transaction->delayed_refs;
840         spin_lock(&delayed_refs->lock);
841         head = btrfs_find_delayed_ref_head(trans, bytenr);
842         if (head) {
843                 if (!mutex_trylock(&head->mutex)) {
844                         atomic_inc(&head->node.refs);
845                         spin_unlock(&delayed_refs->lock);
846
847                         btrfs_release_path(path);
848
849                         /*
850                          * Mutex was contended, block until it's released and try
851                          * again
852                          */
853                         mutex_lock(&head->mutex);
854                         mutex_unlock(&head->mutex);
855                         btrfs_put_delayed_ref(&head->node);
856                         goto search_again;
857                 }
858                 spin_lock(&head->lock);
859                 if (head->extent_op && head->extent_op->update_flags)
860                         extent_flags |= head->extent_op->flags_to_set;
861                 else
862                         BUG_ON(num_refs == 0);
863
864                 num_refs += head->node.ref_mod;
865                 spin_unlock(&head->lock);
866                 mutex_unlock(&head->mutex);
867         }
868         spin_unlock(&delayed_refs->lock);
869 out:
870         WARN_ON(num_refs == 0);
871         if (refs)
872                 *refs = num_refs;
873         if (flags)
874                 *flags = extent_flags;
875 out_free:
876         btrfs_free_path(path);
877         return ret;
878 }
879
880 /*
881  * Back reference rules.  Back refs have three main goals:
882  *
883  * 1) differentiate between all holders of references to an extent so that
884  *    when a reference is dropped we can make sure it was a valid reference
885  *    before freeing the extent.
886  *
887  * 2) Provide enough information to quickly find the holders of an extent
888  *    if we notice a given block is corrupted or bad.
889  *
890  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
891  *    maintenance.  This is actually the same as #2, but with a slightly
892  *    different use case.
893  *
894  * There are two kinds of back refs. The implicit back refs is optimized
895  * for pointers in non-shared tree blocks. For a given pointer in a block,
896  * back refs of this kind provide information about the block's owner tree
897  * and the pointer's key. These information allow us to find the block by
898  * b-tree searching. The full back refs is for pointers in tree blocks not
899  * referenced by their owner trees. The location of tree block is recorded
900  * in the back refs. Actually the full back refs is generic, and can be
901  * used in all cases the implicit back refs is used. The major shortcoming
902  * of the full back refs is its overhead. Every time a tree block gets
903  * COWed, we have to update back refs entry for all pointers in it.
904  *
905  * For a newly allocated tree block, we use implicit back refs for
906  * pointers in it. This means most tree related operations only involve
907  * implicit back refs. For a tree block created in old transaction, the
908  * only way to drop a reference to it is COW it. So we can detect the
909  * event that tree block loses its owner tree's reference and do the
910  * back refs conversion.
911  *
912  * When a tree block is COW'd through a tree, there are four cases:
913  *
914  * The reference count of the block is one and the tree is the block's
915  * owner tree. Nothing to do in this case.
916  *
917  * The reference count of the block is one and the tree is not the
918  * block's owner tree. In this case, full back refs is used for pointers
919  * in the block. Remove these full back refs, add implicit back refs for
920  * every pointers in the new block.
921  *
922  * The reference count of the block is greater than one and the tree is
923  * the block's owner tree. In this case, implicit back refs is used for
924  * pointers in the block. Add full back refs for every pointers in the
925  * block, increase lower level extents' reference counts. The original
926  * implicit back refs are entailed to the new block.
927  *
928  * The reference count of the block is greater than one and the tree is
929  * not the block's owner tree. Add implicit back refs for every pointer in
930  * the new block, increase lower level extents' reference count.
931  *
932  * Back Reference Key composing:
933  *
934  * The key objectid corresponds to the first byte in the extent,
935  * The key type is used to differentiate between types of back refs.
936  * There are different meanings of the key offset for different types
937  * of back refs.
938  *
939  * File extents can be referenced by:
940  *
941  * - multiple snapshots, subvolumes, or different generations in one subvol
942  * - different files inside a single subvolume
943  * - different offsets inside a file (bookend extents in file.c)
944  *
945  * The extent ref structure for the implicit back refs has fields for:
946  *
947  * - Objectid of the subvolume root
948  * - objectid of the file holding the reference
949  * - original offset in the file
950  * - how many bookend extents
951  *
952  * The key offset for the implicit back refs is hash of the first
953  * three fields.
954  *
955  * The extent ref structure for the full back refs has field for:
956  *
957  * - number of pointers in the tree leaf
958  *
959  * The key offset for the implicit back refs is the first byte of
960  * the tree leaf
961  *
962  * When a file extent is allocated, The implicit back refs is used.
963  * the fields are filled in:
964  *
965  *     (root_key.objectid, inode objectid, offset in file, 1)
966  *
967  * When a file extent is removed file truncation, we find the
968  * corresponding implicit back refs and check the following fields:
969  *
970  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
971  *
972  * Btree extents can be referenced by:
973  *
974  * - Different subvolumes
975  *
976  * Both the implicit back refs and the full back refs for tree blocks
977  * only consist of key. The key offset for the implicit back refs is
978  * objectid of block's owner tree. The key offset for the full back refs
979  * is the first byte of parent block.
980  *
981  * When implicit back refs is used, information about the lowest key and
982  * level of the tree block are required. These information are stored in
983  * tree block info structure.
984  */
985
986 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
987 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
988                                   struct btrfs_root *root,
989                                   struct btrfs_path *path,
990                                   u64 owner, u32 extra_size)
991 {
992         struct btrfs_extent_item *item;
993         struct btrfs_extent_item_v0 *ei0;
994         struct btrfs_extent_ref_v0 *ref0;
995         struct btrfs_tree_block_info *bi;
996         struct extent_buffer *leaf;
997         struct btrfs_key key;
998         struct btrfs_key found_key;
999         u32 new_size = sizeof(*item);
1000         u64 refs;
1001         int ret;
1002
1003         leaf = path->nodes[0];
1004         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1005
1006         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1007         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1008                              struct btrfs_extent_item_v0);
1009         refs = btrfs_extent_refs_v0(leaf, ei0);
1010
1011         if (owner == (u64)-1) {
1012                 while (1) {
1013                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1014                                 ret = btrfs_next_leaf(root, path);
1015                                 if (ret < 0)
1016                                         return ret;
1017                                 BUG_ON(ret > 0); /* Corruption */
1018                                 leaf = path->nodes[0];
1019                         }
1020                         btrfs_item_key_to_cpu(leaf, &found_key,
1021                                               path->slots[0]);
1022                         BUG_ON(key.objectid != found_key.objectid);
1023                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1024                                 path->slots[0]++;
1025                                 continue;
1026                         }
1027                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1028                                               struct btrfs_extent_ref_v0);
1029                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1030                         break;
1031                 }
1032         }
1033         btrfs_release_path(path);
1034
1035         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1036                 new_size += sizeof(*bi);
1037
1038         new_size -= sizeof(*ei0);
1039         ret = btrfs_search_slot(trans, root, &key, path,
1040                                 new_size + extra_size, 1);
1041         if (ret < 0)
1042                 return ret;
1043         BUG_ON(ret); /* Corruption */
1044
1045         btrfs_extend_item(root, path, new_size);
1046
1047         leaf = path->nodes[0];
1048         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1049         btrfs_set_extent_refs(leaf, item, refs);
1050         /* FIXME: get real generation */
1051         btrfs_set_extent_generation(leaf, item, 0);
1052         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1053                 btrfs_set_extent_flags(leaf, item,
1054                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1055                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1056                 bi = (struct btrfs_tree_block_info *)(item + 1);
1057                 /* FIXME: get first key of the block */
1058                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1059                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1060         } else {
1061                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1062         }
1063         btrfs_mark_buffer_dirty(leaf);
1064         return 0;
1065 }
1066 #endif
1067
1068 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1069 {
1070         u32 high_crc = ~(u32)0;
1071         u32 low_crc = ~(u32)0;
1072         __le64 lenum;
1073
1074         lenum = cpu_to_le64(root_objectid);
1075         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1076         lenum = cpu_to_le64(owner);
1077         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1078         lenum = cpu_to_le64(offset);
1079         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1080
1081         return ((u64)high_crc << 31) ^ (u64)low_crc;
1082 }
1083
1084 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1085                                      struct btrfs_extent_data_ref *ref)
1086 {
1087         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1088                                     btrfs_extent_data_ref_objectid(leaf, ref),
1089                                     btrfs_extent_data_ref_offset(leaf, ref));
1090 }
1091
1092 static int match_extent_data_ref(struct extent_buffer *leaf,
1093                                  struct btrfs_extent_data_ref *ref,
1094                                  u64 root_objectid, u64 owner, u64 offset)
1095 {
1096         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1097             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1098             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1099                 return 0;
1100         return 1;
1101 }
1102
1103 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1104                                            struct btrfs_root *root,
1105                                            struct btrfs_path *path,
1106                                            u64 bytenr, u64 parent,
1107                                            u64 root_objectid,
1108                                            u64 owner, u64 offset)
1109 {
1110         struct btrfs_key key;
1111         struct btrfs_extent_data_ref *ref;
1112         struct extent_buffer *leaf;
1113         u32 nritems;
1114         int ret;
1115         int recow;
1116         int err = -ENOENT;
1117
1118         key.objectid = bytenr;
1119         if (parent) {
1120                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1121                 key.offset = parent;
1122         } else {
1123                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1124                 key.offset = hash_extent_data_ref(root_objectid,
1125                                                   owner, offset);
1126         }
1127 again:
1128         recow = 0;
1129         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1130         if (ret < 0) {
1131                 err = ret;
1132                 goto fail;
1133         }
1134
1135         if (parent) {
1136                 if (!ret)
1137                         return 0;
1138 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1139                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1140                 btrfs_release_path(path);
1141                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1142                 if (ret < 0) {
1143                         err = ret;
1144                         goto fail;
1145                 }
1146                 if (!ret)
1147                         return 0;
1148 #endif
1149                 goto fail;
1150         }
1151
1152         leaf = path->nodes[0];
1153         nritems = btrfs_header_nritems(leaf);
1154         while (1) {
1155                 if (path->slots[0] >= nritems) {
1156                         ret = btrfs_next_leaf(root, path);
1157                         if (ret < 0)
1158                                 err = ret;
1159                         if (ret)
1160                                 goto fail;
1161
1162                         leaf = path->nodes[0];
1163                         nritems = btrfs_header_nritems(leaf);
1164                         recow = 1;
1165                 }
1166
1167                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1168                 if (key.objectid != bytenr ||
1169                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1170                         goto fail;
1171
1172                 ref = btrfs_item_ptr(leaf, path->slots[0],
1173                                      struct btrfs_extent_data_ref);
1174
1175                 if (match_extent_data_ref(leaf, ref, root_objectid,
1176                                           owner, offset)) {
1177                         if (recow) {
1178                                 btrfs_release_path(path);
1179                                 goto again;
1180                         }
1181                         err = 0;
1182                         break;
1183                 }
1184                 path->slots[0]++;
1185         }
1186 fail:
1187         return err;
1188 }
1189
1190 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1191                                            struct btrfs_root *root,
1192                                            struct btrfs_path *path,
1193                                            u64 bytenr, u64 parent,
1194                                            u64 root_objectid, u64 owner,
1195                                            u64 offset, int refs_to_add)
1196 {
1197         struct btrfs_key key;
1198         struct extent_buffer *leaf;
1199         u32 size;
1200         u32 num_refs;
1201         int ret;
1202
1203         key.objectid = bytenr;
1204         if (parent) {
1205                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1206                 key.offset = parent;
1207                 size = sizeof(struct btrfs_shared_data_ref);
1208         } else {
1209                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1210                 key.offset = hash_extent_data_ref(root_objectid,
1211                                                   owner, offset);
1212                 size = sizeof(struct btrfs_extent_data_ref);
1213         }
1214
1215         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1216         if (ret && ret != -EEXIST)
1217                 goto fail;
1218
1219         leaf = path->nodes[0];
1220         if (parent) {
1221                 struct btrfs_shared_data_ref *ref;
1222                 ref = btrfs_item_ptr(leaf, path->slots[0],
1223                                      struct btrfs_shared_data_ref);
1224                 if (ret == 0) {
1225                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1226                 } else {
1227                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1228                         num_refs += refs_to_add;
1229                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1230                 }
1231         } else {
1232                 struct btrfs_extent_data_ref *ref;
1233                 while (ret == -EEXIST) {
1234                         ref = btrfs_item_ptr(leaf, path->slots[0],
1235                                              struct btrfs_extent_data_ref);
1236                         if (match_extent_data_ref(leaf, ref, root_objectid,
1237                                                   owner, offset))
1238                                 break;
1239                         btrfs_release_path(path);
1240                         key.offset++;
1241                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1242                                                       size);
1243                         if (ret && ret != -EEXIST)
1244                                 goto fail;
1245
1246                         leaf = path->nodes[0];
1247                 }
1248                 ref = btrfs_item_ptr(leaf, path->slots[0],
1249                                      struct btrfs_extent_data_ref);
1250                 if (ret == 0) {
1251                         btrfs_set_extent_data_ref_root(leaf, ref,
1252                                                        root_objectid);
1253                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1254                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1255                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1256                 } else {
1257                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1258                         num_refs += refs_to_add;
1259                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1260                 }
1261         }
1262         btrfs_mark_buffer_dirty(leaf);
1263         ret = 0;
1264 fail:
1265         btrfs_release_path(path);
1266         return ret;
1267 }
1268
1269 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1270                                            struct btrfs_root *root,
1271                                            struct btrfs_path *path,
1272                                            int refs_to_drop, int *last_ref)
1273 {
1274         struct btrfs_key key;
1275         struct btrfs_extent_data_ref *ref1 = NULL;
1276         struct btrfs_shared_data_ref *ref2 = NULL;
1277         struct extent_buffer *leaf;
1278         u32 num_refs = 0;
1279         int ret = 0;
1280
1281         leaf = path->nodes[0];
1282         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1283
1284         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1285                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1286                                       struct btrfs_extent_data_ref);
1287                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1288         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1289                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1290                                       struct btrfs_shared_data_ref);
1291                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1292 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1293         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1294                 struct btrfs_extent_ref_v0 *ref0;
1295                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1296                                       struct btrfs_extent_ref_v0);
1297                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1298 #endif
1299         } else {
1300                 BUG();
1301         }
1302
1303         BUG_ON(num_refs < refs_to_drop);
1304         num_refs -= refs_to_drop;
1305
1306         if (num_refs == 0) {
1307                 ret = btrfs_del_item(trans, root, path);
1308                 *last_ref = 1;
1309         } else {
1310                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1311                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1312                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1313                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1314 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1315                 else {
1316                         struct btrfs_extent_ref_v0 *ref0;
1317                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1318                                         struct btrfs_extent_ref_v0);
1319                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1320                 }
1321 #endif
1322                 btrfs_mark_buffer_dirty(leaf);
1323         }
1324         return ret;
1325 }
1326
1327 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1328                                           struct btrfs_path *path,
1329                                           struct btrfs_extent_inline_ref *iref)
1330 {
1331         struct btrfs_key key;
1332         struct extent_buffer *leaf;
1333         struct btrfs_extent_data_ref *ref1;
1334         struct btrfs_shared_data_ref *ref2;
1335         u32 num_refs = 0;
1336
1337         leaf = path->nodes[0];
1338         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1339         if (iref) {
1340                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1341                     BTRFS_EXTENT_DATA_REF_KEY) {
1342                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1343                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344                 } else {
1345                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1346                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347                 }
1348         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1349                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1350                                       struct btrfs_extent_data_ref);
1351                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1352         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1353                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1354                                       struct btrfs_shared_data_ref);
1355                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1356 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1358                 struct btrfs_extent_ref_v0 *ref0;
1359                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1360                                       struct btrfs_extent_ref_v0);
1361                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1362 #endif
1363         } else {
1364                 WARN_ON(1);
1365         }
1366         return num_refs;
1367 }
1368
1369 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1370                                           struct btrfs_root *root,
1371                                           struct btrfs_path *path,
1372                                           u64 bytenr, u64 parent,
1373                                           u64 root_objectid)
1374 {
1375         struct btrfs_key key;
1376         int ret;
1377
1378         key.objectid = bytenr;
1379         if (parent) {
1380                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1381                 key.offset = parent;
1382         } else {
1383                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1384                 key.offset = root_objectid;
1385         }
1386
1387         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1388         if (ret > 0)
1389                 ret = -ENOENT;
1390 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1391         if (ret == -ENOENT && parent) {
1392                 btrfs_release_path(path);
1393                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1394                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1395                 if (ret > 0)
1396                         ret = -ENOENT;
1397         }
1398 #endif
1399         return ret;
1400 }
1401
1402 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1403                                           struct btrfs_root *root,
1404                                           struct btrfs_path *path,
1405                                           u64 bytenr, u64 parent,
1406                                           u64 root_objectid)
1407 {
1408         struct btrfs_key key;
1409         int ret;
1410
1411         key.objectid = bytenr;
1412         if (parent) {
1413                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1414                 key.offset = parent;
1415         } else {
1416                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1417                 key.offset = root_objectid;
1418         }
1419
1420         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1421         btrfs_release_path(path);
1422         return ret;
1423 }
1424
1425 static inline int extent_ref_type(u64 parent, u64 owner)
1426 {
1427         int type;
1428         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1429                 if (parent > 0)
1430                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1431                 else
1432                         type = BTRFS_TREE_BLOCK_REF_KEY;
1433         } else {
1434                 if (parent > 0)
1435                         type = BTRFS_SHARED_DATA_REF_KEY;
1436                 else
1437                         type = BTRFS_EXTENT_DATA_REF_KEY;
1438         }
1439         return type;
1440 }
1441
1442 static int find_next_key(struct btrfs_path *path, int level,
1443                          struct btrfs_key *key)
1444
1445 {
1446         for (; level < BTRFS_MAX_LEVEL; level++) {
1447                 if (!path->nodes[level])
1448                         break;
1449                 if (path->slots[level] + 1 >=
1450                     btrfs_header_nritems(path->nodes[level]))
1451                         continue;
1452                 if (level == 0)
1453                         btrfs_item_key_to_cpu(path->nodes[level], key,
1454                                               path->slots[level] + 1);
1455                 else
1456                         btrfs_node_key_to_cpu(path->nodes[level], key,
1457                                               path->slots[level] + 1);
1458                 return 0;
1459         }
1460         return 1;
1461 }
1462
1463 /*
1464  * look for inline back ref. if back ref is found, *ref_ret is set
1465  * to the address of inline back ref, and 0 is returned.
1466  *
1467  * if back ref isn't found, *ref_ret is set to the address where it
1468  * should be inserted, and -ENOENT is returned.
1469  *
1470  * if insert is true and there are too many inline back refs, the path
1471  * points to the extent item, and -EAGAIN is returned.
1472  *
1473  * NOTE: inline back refs are ordered in the same way that back ref
1474  *       items in the tree are ordered.
1475  */
1476 static noinline_for_stack
1477 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1478                                  struct btrfs_root *root,
1479                                  struct btrfs_path *path,
1480                                  struct btrfs_extent_inline_ref **ref_ret,
1481                                  u64 bytenr, u64 num_bytes,
1482                                  u64 parent, u64 root_objectid,
1483                                  u64 owner, u64 offset, int insert)
1484 {
1485         struct btrfs_key key;
1486         struct extent_buffer *leaf;
1487         struct btrfs_extent_item *ei;
1488         struct btrfs_extent_inline_ref *iref;
1489         u64 flags;
1490         u64 item_size;
1491         unsigned long ptr;
1492         unsigned long end;
1493         int extra_size;
1494         int type;
1495         int want;
1496         int ret;
1497         int err = 0;
1498         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1499                                                  SKINNY_METADATA);
1500
1501         key.objectid = bytenr;
1502         key.type = BTRFS_EXTENT_ITEM_KEY;
1503         key.offset = num_bytes;
1504
1505         want = extent_ref_type(parent, owner);
1506         if (insert) {
1507                 extra_size = btrfs_extent_inline_ref_size(want);
1508                 path->keep_locks = 1;
1509         } else
1510                 extra_size = -1;
1511
1512         /*
1513          * Owner is our parent level, so we can just add one to get the level
1514          * for the block we are interested in.
1515          */
1516         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1517                 key.type = BTRFS_METADATA_ITEM_KEY;
1518                 key.offset = owner;
1519         }
1520
1521 again:
1522         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1523         if (ret < 0) {
1524                 err = ret;
1525                 goto out;
1526         }
1527
1528         /*
1529          * We may be a newly converted file system which still has the old fat
1530          * extent entries for metadata, so try and see if we have one of those.
1531          */
1532         if (ret > 0 && skinny_metadata) {
1533                 skinny_metadata = false;
1534                 if (path->slots[0]) {
1535                         path->slots[0]--;
1536                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1537                                               path->slots[0]);
1538                         if (key.objectid == bytenr &&
1539                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1540                             key.offset == num_bytes)
1541                                 ret = 0;
1542                 }
1543                 if (ret) {
1544                         key.objectid = bytenr;
1545                         key.type = BTRFS_EXTENT_ITEM_KEY;
1546                         key.offset = num_bytes;
1547                         btrfs_release_path(path);
1548                         goto again;
1549                 }
1550         }
1551
1552         if (ret && !insert) {
1553                 err = -ENOENT;
1554                 goto out;
1555         } else if (WARN_ON(ret)) {
1556                 err = -EIO;
1557                 goto out;
1558         }
1559
1560         leaf = path->nodes[0];
1561         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563         if (item_size < sizeof(*ei)) {
1564                 if (!insert) {
1565                         err = -ENOENT;
1566                         goto out;
1567                 }
1568                 ret = convert_extent_item_v0(trans, root, path, owner,
1569                                              extra_size);
1570                 if (ret < 0) {
1571                         err = ret;
1572                         goto out;
1573                 }
1574                 leaf = path->nodes[0];
1575                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1576         }
1577 #endif
1578         BUG_ON(item_size < sizeof(*ei));
1579
1580         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581         flags = btrfs_extent_flags(leaf, ei);
1582
1583         ptr = (unsigned long)(ei + 1);
1584         end = (unsigned long)ei + item_size;
1585
1586         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587                 ptr += sizeof(struct btrfs_tree_block_info);
1588                 BUG_ON(ptr > end);
1589         }
1590
1591         err = -ENOENT;
1592         while (1) {
1593                 if (ptr >= end) {
1594                         WARN_ON(ptr > end);
1595                         break;
1596                 }
1597                 iref = (struct btrfs_extent_inline_ref *)ptr;
1598                 type = btrfs_extent_inline_ref_type(leaf, iref);
1599                 if (want < type)
1600                         break;
1601                 if (want > type) {
1602                         ptr += btrfs_extent_inline_ref_size(type);
1603                         continue;
1604                 }
1605
1606                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607                         struct btrfs_extent_data_ref *dref;
1608                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609                         if (match_extent_data_ref(leaf, dref, root_objectid,
1610                                                   owner, offset)) {
1611                                 err = 0;
1612                                 break;
1613                         }
1614                         if (hash_extent_data_ref_item(leaf, dref) <
1615                             hash_extent_data_ref(root_objectid, owner, offset))
1616                                 break;
1617                 } else {
1618                         u64 ref_offset;
1619                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1620                         if (parent > 0) {
1621                                 if (parent == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < parent)
1626                                         break;
1627                         } else {
1628                                 if (root_objectid == ref_offset) {
1629                                         err = 0;
1630                                         break;
1631                                 }
1632                                 if (ref_offset < root_objectid)
1633                                         break;
1634                         }
1635                 }
1636                 ptr += btrfs_extent_inline_ref_size(type);
1637         }
1638         if (err == -ENOENT && insert) {
1639                 if (item_size + extra_size >=
1640                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1641                         err = -EAGAIN;
1642                         goto out;
1643                 }
1644                 /*
1645                  * To add new inline back ref, we have to make sure
1646                  * there is no corresponding back ref item.
1647                  * For simplicity, we just do not add new inline back
1648                  * ref if there is any kind of item for this block
1649                  */
1650                 if (find_next_key(path, 0, &key) == 0 &&
1651                     key.objectid == bytenr &&
1652                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1653                         err = -EAGAIN;
1654                         goto out;
1655                 }
1656         }
1657         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1658 out:
1659         if (insert) {
1660                 path->keep_locks = 0;
1661                 btrfs_unlock_up_safe(path, 1);
1662         }
1663         return err;
1664 }
1665
1666 /*
1667  * helper to add new inline back ref
1668  */
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671                                  struct btrfs_path *path,
1672                                  struct btrfs_extent_inline_ref *iref,
1673                                  u64 parent, u64 root_objectid,
1674                                  u64 owner, u64 offset, int refs_to_add,
1675                                  struct btrfs_delayed_extent_op *extent_op)
1676 {
1677         struct extent_buffer *leaf;
1678         struct btrfs_extent_item *ei;
1679         unsigned long ptr;
1680         unsigned long end;
1681         unsigned long item_offset;
1682         u64 refs;
1683         int size;
1684         int type;
1685
1686         leaf = path->nodes[0];
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         item_offset = (unsigned long)iref - (unsigned long)ei;
1689
1690         type = extent_ref_type(parent, owner);
1691         size = btrfs_extent_inline_ref_size(type);
1692
1693         btrfs_extend_item(root, path, size);
1694
1695         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696         refs = btrfs_extent_refs(leaf, ei);
1697         refs += refs_to_add;
1698         btrfs_set_extent_refs(leaf, ei, refs);
1699         if (extent_op)
1700                 __run_delayed_extent_op(extent_op, leaf, ei);
1701
1702         ptr = (unsigned long)ei + item_offset;
1703         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704         if (ptr < end - size)
1705                 memmove_extent_buffer(leaf, ptr + size, ptr,
1706                                       end - size - ptr);
1707
1708         iref = (struct btrfs_extent_inline_ref *)ptr;
1709         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711                 struct btrfs_extent_data_ref *dref;
1712                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718                 struct btrfs_shared_data_ref *sref;
1719                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1724         } else {
1725                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1726         }
1727         btrfs_mark_buffer_dirty(leaf);
1728 }
1729
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731                                  struct btrfs_root *root,
1732                                  struct btrfs_path *path,
1733                                  struct btrfs_extent_inline_ref **ref_ret,
1734                                  u64 bytenr, u64 num_bytes, u64 parent,
1735                                  u64 root_objectid, u64 owner, u64 offset)
1736 {
1737         int ret;
1738
1739         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740                                            bytenr, num_bytes, parent,
1741                                            root_objectid, owner, offset, 0);
1742         if (ret != -ENOENT)
1743                 return ret;
1744
1745         btrfs_release_path(path);
1746         *ref_ret = NULL;
1747
1748         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1750                                             root_objectid);
1751         } else {
1752                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753                                              root_objectid, owner, offset);
1754         }
1755         return ret;
1756 }
1757
1758 /*
1759  * helper to update/remove inline back ref
1760  */
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763                                   struct btrfs_path *path,
1764                                   struct btrfs_extent_inline_ref *iref,
1765                                   int refs_to_mod,
1766                                   struct btrfs_delayed_extent_op *extent_op,
1767                                   int *last_ref)
1768 {
1769         struct extent_buffer *leaf;
1770         struct btrfs_extent_item *ei;
1771         struct btrfs_extent_data_ref *dref = NULL;
1772         struct btrfs_shared_data_ref *sref = NULL;
1773         unsigned long ptr;
1774         unsigned long end;
1775         u32 item_size;
1776         int size;
1777         int type;
1778         u64 refs;
1779
1780         leaf = path->nodes[0];
1781         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1782         refs = btrfs_extent_refs(leaf, ei);
1783         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1784         refs += refs_to_mod;
1785         btrfs_set_extent_refs(leaf, ei, refs);
1786         if (extent_op)
1787                 __run_delayed_extent_op(extent_op, leaf, ei);
1788
1789         type = btrfs_extent_inline_ref_type(leaf, iref);
1790
1791         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1792                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1793                 refs = btrfs_extent_data_ref_count(leaf, dref);
1794         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1795                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1796                 refs = btrfs_shared_data_ref_count(leaf, sref);
1797         } else {
1798                 refs = 1;
1799                 BUG_ON(refs_to_mod != -1);
1800         }
1801
1802         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1803         refs += refs_to_mod;
1804
1805         if (refs > 0) {
1806                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1807                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1808                 else
1809                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1810         } else {
1811                 *last_ref = 1;
1812                 size =  btrfs_extent_inline_ref_size(type);
1813                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1814                 ptr = (unsigned long)iref;
1815                 end = (unsigned long)ei + item_size;
1816                 if (ptr + size < end)
1817                         memmove_extent_buffer(leaf, ptr, ptr + size,
1818                                               end - ptr - size);
1819                 item_size -= size;
1820                 btrfs_truncate_item(root, path, item_size, 1);
1821         }
1822         btrfs_mark_buffer_dirty(leaf);
1823 }
1824
1825 static noinline_for_stack
1826 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1827                                  struct btrfs_root *root,
1828                                  struct btrfs_path *path,
1829                                  u64 bytenr, u64 num_bytes, u64 parent,
1830                                  u64 root_objectid, u64 owner,
1831                                  u64 offset, int refs_to_add,
1832                                  struct btrfs_delayed_extent_op *extent_op)
1833 {
1834         struct btrfs_extent_inline_ref *iref;
1835         int ret;
1836
1837         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1838                                            bytenr, num_bytes, parent,
1839                                            root_objectid, owner, offset, 1);
1840         if (ret == 0) {
1841                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1842                 update_inline_extent_backref(root, path, iref,
1843                                              refs_to_add, extent_op, NULL);
1844         } else if (ret == -ENOENT) {
1845                 setup_inline_extent_backref(root, path, iref, parent,
1846                                             root_objectid, owner, offset,
1847                                             refs_to_add, extent_op);
1848                 ret = 0;
1849         }
1850         return ret;
1851 }
1852
1853 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1854                                  struct btrfs_root *root,
1855                                  struct btrfs_path *path,
1856                                  u64 bytenr, u64 parent, u64 root_objectid,
1857                                  u64 owner, u64 offset, int refs_to_add)
1858 {
1859         int ret;
1860         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1861                 BUG_ON(refs_to_add != 1);
1862                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1863                                             parent, root_objectid);
1864         } else {
1865                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1866                                              parent, root_objectid,
1867                                              owner, offset, refs_to_add);
1868         }
1869         return ret;
1870 }
1871
1872 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1873                                  struct btrfs_root *root,
1874                                  struct btrfs_path *path,
1875                                  struct btrfs_extent_inline_ref *iref,
1876                                  int refs_to_drop, int is_data, int *last_ref)
1877 {
1878         int ret = 0;
1879
1880         BUG_ON(!is_data && refs_to_drop != 1);
1881         if (iref) {
1882                 update_inline_extent_backref(root, path, iref,
1883                                              -refs_to_drop, NULL, last_ref);
1884         } else if (is_data) {
1885                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1886                                              last_ref);
1887         } else {
1888                 *last_ref = 1;
1889                 ret = btrfs_del_item(trans, root, path);
1890         }
1891         return ret;
1892 }
1893
1894 static int btrfs_issue_discard(struct block_device *bdev,
1895                                 u64 start, u64 len)
1896 {
1897         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1898 }
1899
1900 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1901                                 u64 num_bytes, u64 *actual_bytes)
1902 {
1903         int ret;
1904         u64 discarded_bytes = 0;
1905         struct btrfs_bio *bbio = NULL;
1906
1907
1908         /* Tell the block device(s) that the sectors can be discarded */
1909         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1910                               bytenr, &num_bytes, &bbio, 0);
1911         /* Error condition is -ENOMEM */
1912         if (!ret) {
1913                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1914                 int i;
1915
1916
1917                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1918                         if (!stripe->dev->can_discard)
1919                                 continue;
1920
1921                         ret = btrfs_issue_discard(stripe->dev->bdev,
1922                                                   stripe->physical,
1923                                                   stripe->length);
1924                         if (!ret)
1925                                 discarded_bytes += stripe->length;
1926                         else if (ret != -EOPNOTSUPP)
1927                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1928
1929                         /*
1930                          * Just in case we get back EOPNOTSUPP for some reason,
1931                          * just ignore the return value so we don't screw up
1932                          * people calling discard_extent.
1933                          */
1934                         ret = 0;
1935                 }
1936                 kfree(bbio);
1937         }
1938
1939         if (actual_bytes)
1940                 *actual_bytes = discarded_bytes;
1941
1942
1943         if (ret == -EOPNOTSUPP)
1944                 ret = 0;
1945         return ret;
1946 }
1947
1948 /* Can return -ENOMEM */
1949 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1950                          struct btrfs_root *root,
1951                          u64 bytenr, u64 num_bytes, u64 parent,
1952                          u64 root_objectid, u64 owner, u64 offset,
1953                          int no_quota)
1954 {
1955         int ret;
1956         struct btrfs_fs_info *fs_info = root->fs_info;
1957
1958         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1959                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1960
1961         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1962                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1963                                         num_bytes,
1964                                         parent, root_objectid, (int)owner,
1965                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1966         } else {
1967                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1968                                         num_bytes,
1969                                         parent, root_objectid, owner, offset,
1970                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
1971         }
1972         return ret;
1973 }
1974
1975 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1976                                   struct btrfs_root *root,
1977                                   u64 bytenr, u64 num_bytes,
1978                                   u64 parent, u64 root_objectid,
1979                                   u64 owner, u64 offset, int refs_to_add,
1980                                   int no_quota,
1981                                   struct btrfs_delayed_extent_op *extent_op)
1982 {
1983         struct btrfs_fs_info *fs_info = root->fs_info;
1984         struct btrfs_path *path;
1985         struct extent_buffer *leaf;
1986         struct btrfs_extent_item *item;
1987         struct btrfs_key key;
1988         u64 refs;
1989         int ret;
1990         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
1991
1992         path = btrfs_alloc_path();
1993         if (!path)
1994                 return -ENOMEM;
1995
1996         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
1997                 no_quota = 1;
1998
1999         path->reada = 1;
2000         path->leave_spinning = 1;
2001         /* this will setup the path even if it fails to insert the back ref */
2002         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2003                                            bytenr, num_bytes, parent,
2004                                            root_objectid, owner, offset,
2005                                            refs_to_add, extent_op);
2006         if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
2007                 goto out;
2008         /*
2009          * Ok we were able to insert an inline extent and it appears to be a new
2010          * reference, deal with the qgroup accounting.
2011          */
2012         if (!ret && !no_quota) {
2013                 ASSERT(root->fs_info->quota_enabled);
2014                 leaf = path->nodes[0];
2015                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2016                 item = btrfs_item_ptr(leaf, path->slots[0],
2017                                       struct btrfs_extent_item);
2018                 if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
2019                         type = BTRFS_QGROUP_OPER_ADD_SHARED;
2020                 btrfs_release_path(path);
2021
2022                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2023                                               bytenr, num_bytes, type, 0);
2024                 goto out;
2025         }
2026
2027         /*
2028          * Ok we had -EAGAIN which means we didn't have space to insert and
2029          * inline extent ref, so just update the reference count and add a
2030          * normal backref.
2031          */
2032         leaf = path->nodes[0];
2033         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2034         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2035         refs = btrfs_extent_refs(leaf, item);
2036         if (refs)
2037                 type = BTRFS_QGROUP_OPER_ADD_SHARED;
2038         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2039         if (extent_op)
2040                 __run_delayed_extent_op(extent_op, leaf, item);
2041
2042         btrfs_mark_buffer_dirty(leaf);
2043         btrfs_release_path(path);
2044
2045         if (!no_quota) {
2046                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
2047                                               bytenr, num_bytes, type, 0);
2048                 if (ret)
2049                         goto out;
2050         }
2051
2052         path->reada = 1;
2053         path->leave_spinning = 1;
2054         /* now insert the actual backref */
2055         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2056                                     path, bytenr, parent, root_objectid,
2057                                     owner, offset, refs_to_add);
2058         if (ret)
2059                 btrfs_abort_transaction(trans, root, ret);
2060 out:
2061         btrfs_free_path(path);
2062         return ret;
2063 }
2064
2065 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2066                                 struct btrfs_root *root,
2067                                 struct btrfs_delayed_ref_node *node,
2068                                 struct btrfs_delayed_extent_op *extent_op,
2069                                 int insert_reserved)
2070 {
2071         int ret = 0;
2072         struct btrfs_delayed_data_ref *ref;
2073         struct btrfs_key ins;
2074         u64 parent = 0;
2075         u64 ref_root = 0;
2076         u64 flags = 0;
2077
2078         ins.objectid = node->bytenr;
2079         ins.offset = node->num_bytes;
2080         ins.type = BTRFS_EXTENT_ITEM_KEY;
2081
2082         ref = btrfs_delayed_node_to_data_ref(node);
2083         trace_run_delayed_data_ref(node, ref, node->action);
2084
2085         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2086                 parent = ref->parent;
2087         ref_root = ref->root;
2088
2089         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2090                 if (extent_op)
2091                         flags |= extent_op->flags_to_set;
2092                 ret = alloc_reserved_file_extent(trans, root,
2093                                                  parent, ref_root, flags,
2094                                                  ref->objectid, ref->offset,
2095                                                  &ins, node->ref_mod);
2096         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2097                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2098                                              node->num_bytes, parent,
2099                                              ref_root, ref->objectid,
2100                                              ref->offset, node->ref_mod,
2101                                              node->no_quota, extent_op);
2102         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2103                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2104                                           node->num_bytes, parent,
2105                                           ref_root, ref->objectid,
2106                                           ref->offset, node->ref_mod,
2107                                           extent_op, node->no_quota);
2108         } else {
2109                 BUG();
2110         }
2111         return ret;
2112 }
2113
2114 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2115                                     struct extent_buffer *leaf,
2116                                     struct btrfs_extent_item *ei)
2117 {
2118         u64 flags = btrfs_extent_flags(leaf, ei);
2119         if (extent_op->update_flags) {
2120                 flags |= extent_op->flags_to_set;
2121                 btrfs_set_extent_flags(leaf, ei, flags);
2122         }
2123
2124         if (extent_op->update_key) {
2125                 struct btrfs_tree_block_info *bi;
2126                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2127                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2128                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2129         }
2130 }
2131
2132 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2133                                  struct btrfs_root *root,
2134                                  struct btrfs_delayed_ref_node *node,
2135                                  struct btrfs_delayed_extent_op *extent_op)
2136 {
2137         struct btrfs_key key;
2138         struct btrfs_path *path;
2139         struct btrfs_extent_item *ei;
2140         struct extent_buffer *leaf;
2141         u32 item_size;
2142         int ret;
2143         int err = 0;
2144         int metadata = !extent_op->is_data;
2145
2146         if (trans->aborted)
2147                 return 0;
2148
2149         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2150                 metadata = 0;
2151
2152         path = btrfs_alloc_path();
2153         if (!path)
2154                 return -ENOMEM;
2155
2156         key.objectid = node->bytenr;
2157
2158         if (metadata) {
2159                 key.type = BTRFS_METADATA_ITEM_KEY;
2160                 key.offset = extent_op->level;
2161         } else {
2162                 key.type = BTRFS_EXTENT_ITEM_KEY;
2163                 key.offset = node->num_bytes;
2164         }
2165
2166 again:
2167         path->reada = 1;
2168         path->leave_spinning = 1;
2169         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2170                                 path, 0, 1);
2171         if (ret < 0) {
2172                 err = ret;
2173                 goto out;
2174         }
2175         if (ret > 0) {
2176                 if (metadata) {
2177                         if (path->slots[0] > 0) {
2178                                 path->slots[0]--;
2179                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2180                                                       path->slots[0]);
2181                                 if (key.objectid == node->bytenr &&
2182                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2183                                     key.offset == node->num_bytes)
2184                                         ret = 0;
2185                         }
2186                         if (ret > 0) {
2187                                 btrfs_release_path(path);
2188                                 metadata = 0;
2189
2190                                 key.objectid = node->bytenr;
2191                                 key.offset = node->num_bytes;
2192                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2193                                 goto again;
2194                         }
2195                 } else {
2196                         err = -EIO;
2197                         goto out;
2198                 }
2199         }
2200
2201         leaf = path->nodes[0];
2202         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2203 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2204         if (item_size < sizeof(*ei)) {
2205                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2206                                              path, (u64)-1, 0);
2207                 if (ret < 0) {
2208                         err = ret;
2209                         goto out;
2210                 }
2211                 leaf = path->nodes[0];
2212                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2213         }
2214 #endif
2215         BUG_ON(item_size < sizeof(*ei));
2216         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2217         __run_delayed_extent_op(extent_op, leaf, ei);
2218
2219         btrfs_mark_buffer_dirty(leaf);
2220 out:
2221         btrfs_free_path(path);
2222         return err;
2223 }
2224
2225 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2226                                 struct btrfs_root *root,
2227                                 struct btrfs_delayed_ref_node *node,
2228                                 struct btrfs_delayed_extent_op *extent_op,
2229                                 int insert_reserved)
2230 {
2231         int ret = 0;
2232         struct btrfs_delayed_tree_ref *ref;
2233         struct btrfs_key ins;
2234         u64 parent = 0;
2235         u64 ref_root = 0;
2236         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2237                                                  SKINNY_METADATA);
2238
2239         ref = btrfs_delayed_node_to_tree_ref(node);
2240         trace_run_delayed_tree_ref(node, ref, node->action);
2241
2242         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2243                 parent = ref->parent;
2244         ref_root = ref->root;
2245
2246         ins.objectid = node->bytenr;
2247         if (skinny_metadata) {
2248                 ins.offset = ref->level;
2249                 ins.type = BTRFS_METADATA_ITEM_KEY;
2250         } else {
2251                 ins.offset = node->num_bytes;
2252                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2253         }
2254
2255         BUG_ON(node->ref_mod != 1);
2256         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2257                 BUG_ON(!extent_op || !extent_op->update_flags);
2258                 ret = alloc_reserved_tree_block(trans, root,
2259                                                 parent, ref_root,
2260                                                 extent_op->flags_to_set,
2261                                                 &extent_op->key,
2262                                                 ref->level, &ins,
2263                                                 node->no_quota);
2264         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2265                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2266                                              node->num_bytes, parent, ref_root,
2267                                              ref->level, 0, 1, node->no_quota,
2268                                              extent_op);
2269         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2270                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2271                                           node->num_bytes, parent, ref_root,
2272                                           ref->level, 0, 1, extent_op,
2273                                           node->no_quota);
2274         } else {
2275                 BUG();
2276         }
2277         return ret;
2278 }
2279
2280 /* helper function to actually process a single delayed ref entry */
2281 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2282                                struct btrfs_root *root,
2283                                struct btrfs_delayed_ref_node *node,
2284                                struct btrfs_delayed_extent_op *extent_op,
2285                                int insert_reserved)
2286 {
2287         int ret = 0;
2288
2289         if (trans->aborted) {
2290                 if (insert_reserved)
2291                         btrfs_pin_extent(root, node->bytenr,
2292                                          node->num_bytes, 1);
2293                 return 0;
2294         }
2295
2296         if (btrfs_delayed_ref_is_head(node)) {
2297                 struct btrfs_delayed_ref_head *head;
2298                 /*
2299                  * we've hit the end of the chain and we were supposed
2300                  * to insert this extent into the tree.  But, it got
2301                  * deleted before we ever needed to insert it, so all
2302                  * we have to do is clean up the accounting
2303                  */
2304                 BUG_ON(extent_op);
2305                 head = btrfs_delayed_node_to_head(node);
2306                 trace_run_delayed_ref_head(node, head, node->action);
2307
2308                 if (insert_reserved) {
2309                         btrfs_pin_extent(root, node->bytenr,
2310                                          node->num_bytes, 1);
2311                         if (head->is_data) {
2312                                 ret = btrfs_del_csums(trans, root,
2313                                                       node->bytenr,
2314                                                       node->num_bytes);
2315                         }
2316                 }
2317                 return ret;
2318         }
2319
2320         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2321             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2322                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2323                                            insert_reserved);
2324         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2325                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2326                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2327                                            insert_reserved);
2328         else
2329                 BUG();
2330         return ret;
2331 }
2332
2333 static noinline struct btrfs_delayed_ref_node *
2334 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2335 {
2336         struct rb_node *node;
2337         struct btrfs_delayed_ref_node *ref, *last = NULL;;
2338
2339         /*
2340          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2341          * this prevents ref count from going down to zero when
2342          * there still are pending delayed ref.
2343          */
2344         node = rb_first(&head->ref_root);
2345         while (node) {
2346                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2347                                 rb_node);
2348                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2349                         return ref;
2350                 else if (last == NULL)
2351                         last = ref;
2352                 node = rb_next(node);
2353         }
2354         return last;
2355 }
2356
2357 /*
2358  * Returns 0 on success or if called with an already aborted transaction.
2359  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2360  */
2361 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2362                                              struct btrfs_root *root,
2363                                              unsigned long nr)
2364 {
2365         struct btrfs_delayed_ref_root *delayed_refs;
2366         struct btrfs_delayed_ref_node *ref;
2367         struct btrfs_delayed_ref_head *locked_ref = NULL;
2368         struct btrfs_delayed_extent_op *extent_op;
2369         struct btrfs_fs_info *fs_info = root->fs_info;
2370         ktime_t start = ktime_get();
2371         int ret;
2372         unsigned long count = 0;
2373         unsigned long actual_count = 0;
2374         int must_insert_reserved = 0;
2375
2376         delayed_refs = &trans->transaction->delayed_refs;
2377         while (1) {
2378                 if (!locked_ref) {
2379                         if (count >= nr)
2380                                 break;
2381
2382                         spin_lock(&delayed_refs->lock);
2383                         locked_ref = btrfs_select_ref_head(trans);
2384                         if (!locked_ref) {
2385                                 spin_unlock(&delayed_refs->lock);
2386                                 break;
2387                         }
2388
2389                         /* grab the lock that says we are going to process
2390                          * all the refs for this head */
2391                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2392                         spin_unlock(&delayed_refs->lock);
2393                         /*
2394                          * we may have dropped the spin lock to get the head
2395                          * mutex lock, and that might have given someone else
2396                          * time to free the head.  If that's true, it has been
2397                          * removed from our list and we can move on.
2398                          */
2399                         if (ret == -EAGAIN) {
2400                                 locked_ref = NULL;
2401                                 count++;
2402                                 continue;
2403                         }
2404                 }
2405
2406                 /*
2407                  * We need to try and merge add/drops of the same ref since we
2408                  * can run into issues with relocate dropping the implicit ref
2409                  * and then it being added back again before the drop can
2410                  * finish.  If we merged anything we need to re-loop so we can
2411                  * get a good ref.
2412                  */
2413                 spin_lock(&locked_ref->lock);
2414                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2415                                          locked_ref);
2416
2417                 /*
2418                  * locked_ref is the head node, so we have to go one
2419                  * node back for any delayed ref updates
2420                  */
2421                 ref = select_delayed_ref(locked_ref);
2422
2423                 if (ref && ref->seq &&
2424                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2425                         spin_unlock(&locked_ref->lock);
2426                         btrfs_delayed_ref_unlock(locked_ref);
2427                         spin_lock(&delayed_refs->lock);
2428                         locked_ref->processing = 0;
2429                         delayed_refs->num_heads_ready++;
2430                         spin_unlock(&delayed_refs->lock);
2431                         locked_ref = NULL;
2432                         cond_resched();
2433                         count++;
2434                         continue;
2435                 }
2436
2437                 /*
2438                  * record the must insert reserved flag before we
2439                  * drop the spin lock.
2440                  */
2441                 must_insert_reserved = locked_ref->must_insert_reserved;
2442                 locked_ref->must_insert_reserved = 0;
2443
2444                 extent_op = locked_ref->extent_op;
2445                 locked_ref->extent_op = NULL;
2446
2447                 if (!ref) {
2448
2449
2450                         /* All delayed refs have been processed, Go ahead
2451                          * and send the head node to run_one_delayed_ref,
2452                          * so that any accounting fixes can happen
2453                          */
2454                         ref = &locked_ref->node;
2455
2456                         if (extent_op && must_insert_reserved) {
2457                                 btrfs_free_delayed_extent_op(extent_op);
2458                                 extent_op = NULL;
2459                         }
2460
2461                         if (extent_op) {
2462                                 spin_unlock(&locked_ref->lock);
2463                                 ret = run_delayed_extent_op(trans, root,
2464                                                             ref, extent_op);
2465                                 btrfs_free_delayed_extent_op(extent_op);
2466
2467                                 if (ret) {
2468                                         /*
2469                                          * Need to reset must_insert_reserved if
2470                                          * there was an error so the abort stuff
2471                                          * can cleanup the reserved space
2472                                          * properly.
2473                                          */
2474                                         if (must_insert_reserved)
2475                                                 locked_ref->must_insert_reserved = 1;
2476                                         locked_ref->processing = 0;
2477                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2478                                         btrfs_delayed_ref_unlock(locked_ref);
2479                                         return ret;
2480                                 }
2481                                 continue;
2482                         }
2483
2484                         /*
2485                          * Need to drop our head ref lock and re-aqcuire the
2486                          * delayed ref lock and then re-check to make sure
2487                          * nobody got added.
2488                          */
2489                         spin_unlock(&locked_ref->lock);
2490                         spin_lock(&delayed_refs->lock);
2491                         spin_lock(&locked_ref->lock);
2492                         if (rb_first(&locked_ref->ref_root) ||
2493                             locked_ref->extent_op) {
2494                                 spin_unlock(&locked_ref->lock);
2495                                 spin_unlock(&delayed_refs->lock);
2496                                 continue;
2497                         }
2498                         ref->in_tree = 0;
2499                         delayed_refs->num_heads--;
2500                         rb_erase(&locked_ref->href_node,
2501                                  &delayed_refs->href_root);
2502                         spin_unlock(&delayed_refs->lock);
2503                 } else {
2504                         actual_count++;
2505                         ref->in_tree = 0;
2506                         rb_erase(&ref->rb_node, &locked_ref->ref_root);
2507                 }
2508                 atomic_dec(&delayed_refs->num_entries);
2509
2510                 if (!btrfs_delayed_ref_is_head(ref)) {
2511                         /*
2512                          * when we play the delayed ref, also correct the
2513                          * ref_mod on head
2514                          */
2515                         switch (ref->action) {
2516                         case BTRFS_ADD_DELAYED_REF:
2517                         case BTRFS_ADD_DELAYED_EXTENT:
2518                                 locked_ref->node.ref_mod -= ref->ref_mod;
2519                                 break;
2520                         case BTRFS_DROP_DELAYED_REF:
2521                                 locked_ref->node.ref_mod += ref->ref_mod;
2522                                 break;
2523                         default:
2524                                 WARN_ON(1);
2525                         }
2526                 }
2527                 spin_unlock(&locked_ref->lock);
2528
2529                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2530                                           must_insert_reserved);
2531
2532                 btrfs_free_delayed_extent_op(extent_op);
2533                 if (ret) {
2534                         locked_ref->processing = 0;
2535                         btrfs_delayed_ref_unlock(locked_ref);
2536                         btrfs_put_delayed_ref(ref);
2537                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2538                         return ret;
2539                 }
2540
2541                 /*
2542                  * If this node is a head, that means all the refs in this head
2543                  * have been dealt with, and we will pick the next head to deal
2544                  * with, so we must unlock the head and drop it from the cluster
2545                  * list before we release it.
2546                  */
2547                 if (btrfs_delayed_ref_is_head(ref)) {
2548                         btrfs_delayed_ref_unlock(locked_ref);
2549                         locked_ref = NULL;
2550                 }
2551                 btrfs_put_delayed_ref(ref);
2552                 count++;
2553                 cond_resched();
2554         }
2555
2556         /*
2557          * We don't want to include ref heads since we can have empty ref heads
2558          * and those will drastically skew our runtime down since we just do
2559          * accounting, no actual extent tree updates.
2560          */
2561         if (actual_count > 0) {
2562                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2563                 u64 avg;
2564
2565                 /*
2566                  * We weigh the current average higher than our current runtime
2567                  * to avoid large swings in the average.
2568                  */
2569                 spin_lock(&delayed_refs->lock);
2570                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2571                 avg = div64_u64(avg, 4);
2572                 fs_info->avg_delayed_ref_runtime = avg;
2573                 spin_unlock(&delayed_refs->lock);
2574         }
2575         return 0;
2576 }
2577
2578 #ifdef SCRAMBLE_DELAYED_REFS
2579 /*
2580  * Normally delayed refs get processed in ascending bytenr order. This
2581  * correlates in most cases to the order added. To expose dependencies on this
2582  * order, we start to process the tree in the middle instead of the beginning
2583  */
2584 static u64 find_middle(struct rb_root *root)
2585 {
2586         struct rb_node *n = root->rb_node;
2587         struct btrfs_delayed_ref_node *entry;
2588         int alt = 1;
2589         u64 middle;
2590         u64 first = 0, last = 0;
2591
2592         n = rb_first(root);
2593         if (n) {
2594                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2595                 first = entry->bytenr;
2596         }
2597         n = rb_last(root);
2598         if (n) {
2599                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2600                 last = entry->bytenr;
2601         }
2602         n = root->rb_node;
2603
2604         while (n) {
2605                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2606                 WARN_ON(!entry->in_tree);
2607
2608                 middle = entry->bytenr;
2609
2610                 if (alt)
2611                         n = n->rb_left;
2612                 else
2613                         n = n->rb_right;
2614
2615                 alt = 1 - alt;
2616         }
2617         return middle;
2618 }
2619 #endif
2620
2621 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2622 {
2623         u64 num_bytes;
2624
2625         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2626                              sizeof(struct btrfs_extent_inline_ref));
2627         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2628                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2629
2630         /*
2631          * We don't ever fill up leaves all the way so multiply by 2 just to be
2632          * closer to what we're really going to want to ouse.
2633          */
2634         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2635 }
2636
2637 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2638                                        struct btrfs_root *root)
2639 {
2640         struct btrfs_block_rsv *global_rsv;
2641         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2642         u64 num_bytes;
2643         int ret = 0;
2644
2645         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2646         num_heads = heads_to_leaves(root, num_heads);
2647         if (num_heads > 1)
2648                 num_bytes += (num_heads - 1) * root->nodesize;
2649         num_bytes <<= 1;
2650         global_rsv = &root->fs_info->global_block_rsv;
2651
2652         /*
2653          * If we can't allocate any more chunks lets make sure we have _lots_ of
2654          * wiggle room since running delayed refs can create more delayed refs.
2655          */
2656         if (global_rsv->space_info->full)
2657                 num_bytes <<= 1;
2658
2659         spin_lock(&global_rsv->lock);
2660         if (global_rsv->reserved <= num_bytes)
2661                 ret = 1;
2662         spin_unlock(&global_rsv->lock);
2663         return ret;
2664 }
2665
2666 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2667                                        struct btrfs_root *root)
2668 {
2669         struct btrfs_fs_info *fs_info = root->fs_info;
2670         u64 num_entries =
2671                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2672         u64 avg_runtime;
2673         u64 val;
2674
2675         smp_mb();
2676         avg_runtime = fs_info->avg_delayed_ref_runtime;
2677         val = num_entries * avg_runtime;
2678         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2679                 return 1;
2680         if (val >= NSEC_PER_SEC / 2)
2681                 return 2;
2682
2683         return btrfs_check_space_for_delayed_refs(trans, root);
2684 }
2685
2686 struct async_delayed_refs {
2687         struct btrfs_root *root;
2688         int count;
2689         int error;
2690         int sync;
2691         struct completion wait;
2692         struct btrfs_work work;
2693 };
2694
2695 static void delayed_ref_async_start(struct btrfs_work *work)
2696 {
2697         struct async_delayed_refs *async;
2698         struct btrfs_trans_handle *trans;
2699         int ret;
2700
2701         async = container_of(work, struct async_delayed_refs, work);
2702
2703         trans = btrfs_join_transaction(async->root);
2704         if (IS_ERR(trans)) {
2705                 async->error = PTR_ERR(trans);
2706                 goto done;
2707         }
2708
2709         /*
2710          * trans->sync means that when we call end_transaciton, we won't
2711          * wait on delayed refs
2712          */
2713         trans->sync = true;
2714         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2715         if (ret)
2716                 async->error = ret;
2717
2718         ret = btrfs_end_transaction(trans, async->root);
2719         if (ret && !async->error)
2720                 async->error = ret;
2721 done:
2722         if (async->sync)
2723                 complete(&async->wait);
2724         else
2725                 kfree(async);
2726 }
2727
2728 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2729                                  unsigned long count, int wait)
2730 {
2731         struct async_delayed_refs *async;
2732         int ret;
2733
2734         async = kmalloc(sizeof(*async), GFP_NOFS);
2735         if (!async)
2736                 return -ENOMEM;
2737
2738         async->root = root->fs_info->tree_root;
2739         async->count = count;
2740         async->error = 0;
2741         if (wait)
2742                 async->sync = 1;
2743         else
2744                 async->sync = 0;
2745         init_completion(&async->wait);
2746
2747         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2748                         delayed_ref_async_start, NULL, NULL);
2749
2750         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2751
2752         if (wait) {
2753                 wait_for_completion(&async->wait);
2754                 ret = async->error;
2755                 kfree(async);
2756                 return ret;
2757         }
2758         return 0;
2759 }
2760
2761 /*
2762  * this starts processing the delayed reference count updates and
2763  * extent insertions we have queued up so far.  count can be
2764  * 0, which means to process everything in the tree at the start
2765  * of the run (but not newly added entries), or it can be some target
2766  * number you'd like to process.
2767  *
2768  * Returns 0 on success or if called with an aborted transaction
2769  * Returns <0 on error and aborts the transaction
2770  */
2771 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2772                            struct btrfs_root *root, unsigned long count)
2773 {
2774         struct rb_node *node;
2775         struct btrfs_delayed_ref_root *delayed_refs;
2776         struct btrfs_delayed_ref_head *head;
2777         int ret;
2778         int run_all = count == (unsigned long)-1;
2779         int run_most = 0;
2780
2781         /* We'll clean this up in btrfs_cleanup_transaction */
2782         if (trans->aborted)
2783                 return 0;
2784
2785         if (root == root->fs_info->extent_root)
2786                 root = root->fs_info->tree_root;
2787
2788         delayed_refs = &trans->transaction->delayed_refs;
2789         if (count == 0) {
2790                 count = atomic_read(&delayed_refs->num_entries) * 2;
2791                 run_most = 1;
2792         }
2793
2794 again:
2795 #ifdef SCRAMBLE_DELAYED_REFS
2796         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2797 #endif
2798         ret = __btrfs_run_delayed_refs(trans, root, count);
2799         if (ret < 0) {
2800                 btrfs_abort_transaction(trans, root, ret);
2801                 return ret;
2802         }
2803
2804         if (run_all) {
2805                 if (!list_empty(&trans->new_bgs))
2806                         btrfs_create_pending_block_groups(trans, root);
2807
2808                 spin_lock(&delayed_refs->lock);
2809                 node = rb_first(&delayed_refs->href_root);
2810                 if (!node) {
2811                         spin_unlock(&delayed_refs->lock);
2812                         goto out;
2813                 }
2814                 count = (unsigned long)-1;
2815
2816                 while (node) {
2817                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2818                                         href_node);
2819                         if (btrfs_delayed_ref_is_head(&head->node)) {
2820                                 struct btrfs_delayed_ref_node *ref;
2821
2822                                 ref = &head->node;
2823                                 atomic_inc(&ref->refs);
2824
2825                                 spin_unlock(&delayed_refs->lock);
2826                                 /*
2827                                  * Mutex was contended, block until it's
2828                                  * released and try again
2829                                  */
2830                                 mutex_lock(&head->mutex);
2831                                 mutex_unlock(&head->mutex);
2832
2833                                 btrfs_put_delayed_ref(ref);
2834                                 cond_resched();
2835                                 goto again;
2836                         } else {
2837                                 WARN_ON(1);
2838                         }
2839                         node = rb_next(node);
2840                 }
2841                 spin_unlock(&delayed_refs->lock);
2842                 cond_resched();
2843                 goto again;
2844         }
2845 out:
2846         ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
2847         if (ret)
2848                 return ret;
2849         assert_qgroups_uptodate(trans);
2850         return 0;
2851 }
2852
2853 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2854                                 struct btrfs_root *root,
2855                                 u64 bytenr, u64 num_bytes, u64 flags,
2856                                 int level, int is_data)
2857 {
2858         struct btrfs_delayed_extent_op *extent_op;
2859         int ret;
2860
2861         extent_op = btrfs_alloc_delayed_extent_op();
2862         if (!extent_op)
2863                 return -ENOMEM;
2864
2865         extent_op->flags_to_set = flags;
2866         extent_op->update_flags = 1;
2867         extent_op->update_key = 0;
2868         extent_op->is_data = is_data ? 1 : 0;
2869         extent_op->level = level;
2870
2871         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2872                                           num_bytes, extent_op);
2873         if (ret)
2874                 btrfs_free_delayed_extent_op(extent_op);
2875         return ret;
2876 }
2877
2878 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2879                                       struct btrfs_root *root,
2880                                       struct btrfs_path *path,
2881                                       u64 objectid, u64 offset, u64 bytenr)
2882 {
2883         struct btrfs_delayed_ref_head *head;
2884         struct btrfs_delayed_ref_node *ref;
2885         struct btrfs_delayed_data_ref *data_ref;
2886         struct btrfs_delayed_ref_root *delayed_refs;
2887         struct rb_node *node;
2888         int ret = 0;
2889
2890         delayed_refs = &trans->transaction->delayed_refs;
2891         spin_lock(&delayed_refs->lock);
2892         head = btrfs_find_delayed_ref_head(trans, bytenr);
2893         if (!head) {
2894                 spin_unlock(&delayed_refs->lock);
2895                 return 0;
2896         }
2897
2898         if (!mutex_trylock(&head->mutex)) {
2899                 atomic_inc(&head->node.refs);
2900                 spin_unlock(&delayed_refs->lock);
2901
2902                 btrfs_release_path(path);
2903
2904                 /*
2905                  * Mutex was contended, block until it's released and let
2906                  * caller try again
2907                  */
2908                 mutex_lock(&head->mutex);
2909                 mutex_unlock(&head->mutex);
2910                 btrfs_put_delayed_ref(&head->node);
2911                 return -EAGAIN;
2912         }
2913         spin_unlock(&delayed_refs->lock);
2914
2915         spin_lock(&head->lock);
2916         node = rb_first(&head->ref_root);
2917         while (node) {
2918                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2919                 node = rb_next(node);
2920
2921                 /* If it's a shared ref we know a cross reference exists */
2922                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2923                         ret = 1;
2924                         break;
2925                 }
2926
2927                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2928
2929                 /*
2930                  * If our ref doesn't match the one we're currently looking at
2931                  * then we have a cross reference.
2932                  */
2933                 if (data_ref->root != root->root_key.objectid ||
2934                     data_ref->objectid != objectid ||
2935                     data_ref->offset != offset) {
2936                         ret = 1;
2937                         break;
2938                 }
2939         }
2940         spin_unlock(&head->lock);
2941         mutex_unlock(&head->mutex);
2942         return ret;
2943 }
2944
2945 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2946                                         struct btrfs_root *root,
2947                                         struct btrfs_path *path,
2948                                         u64 objectid, u64 offset, u64 bytenr)
2949 {
2950         struct btrfs_root *extent_root = root->fs_info->extent_root;
2951         struct extent_buffer *leaf;
2952         struct btrfs_extent_data_ref *ref;
2953         struct btrfs_extent_inline_ref *iref;
2954         struct btrfs_extent_item *ei;
2955         struct btrfs_key key;
2956         u32 item_size;
2957         int ret;
2958
2959         key.objectid = bytenr;
2960         key.offset = (u64)-1;
2961         key.type = BTRFS_EXTENT_ITEM_KEY;
2962
2963         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2964         if (ret < 0)
2965                 goto out;
2966         BUG_ON(ret == 0); /* Corruption */
2967
2968         ret = -ENOENT;
2969         if (path->slots[0] == 0)
2970                 goto out;
2971
2972         path->slots[0]--;
2973         leaf = path->nodes[0];
2974         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2975
2976         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2977                 goto out;
2978
2979         ret = 1;
2980         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2981 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2982         if (item_size < sizeof(*ei)) {
2983                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2984                 goto out;
2985         }
2986 #endif
2987         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2988
2989         if (item_size != sizeof(*ei) +
2990             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2991                 goto out;
2992
2993         if (btrfs_extent_generation(leaf, ei) <=
2994             btrfs_root_last_snapshot(&root->root_item))
2995                 goto out;
2996
2997         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2998         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2999             BTRFS_EXTENT_DATA_REF_KEY)
3000                 goto out;
3001
3002         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3003         if (btrfs_extent_refs(leaf, ei) !=
3004             btrfs_extent_data_ref_count(leaf, ref) ||
3005             btrfs_extent_data_ref_root(leaf, ref) !=
3006             root->root_key.objectid ||
3007             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3008             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3009                 goto out;
3010
3011         ret = 0;
3012 out:
3013         return ret;
3014 }
3015
3016 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3017                           struct btrfs_root *root,
3018                           u64 objectid, u64 offset, u64 bytenr)
3019 {
3020         struct btrfs_path *path;
3021         int ret;
3022         int ret2;
3023
3024         path = btrfs_alloc_path();
3025         if (!path)
3026                 return -ENOENT;
3027
3028         do {
3029                 ret = check_committed_ref(trans, root, path, objectid,
3030                                           offset, bytenr);
3031                 if (ret && ret != -ENOENT)
3032                         goto out;
3033
3034                 ret2 = check_delayed_ref(trans, root, path, objectid,
3035                                          offset, bytenr);
3036         } while (ret2 == -EAGAIN);
3037
3038         if (ret2 && ret2 != -ENOENT) {
3039                 ret = ret2;
3040                 goto out;
3041         }
3042
3043         if (ret != -ENOENT || ret2 != -ENOENT)
3044                 ret = 0;
3045 out:
3046         btrfs_free_path(path);
3047         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3048                 WARN_ON(ret > 0);
3049         return ret;
3050 }
3051
3052 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3053                            struct btrfs_root *root,
3054                            struct extent_buffer *buf,
3055                            int full_backref, int inc)
3056 {
3057         u64 bytenr;
3058         u64 num_bytes;
3059         u64 parent;
3060         u64 ref_root;
3061         u32 nritems;
3062         struct btrfs_key key;
3063         struct btrfs_file_extent_item *fi;
3064         int i;
3065         int level;
3066         int ret = 0;
3067         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3068                             u64, u64, u64, u64, u64, u64, int);
3069
3070
3071         if (btrfs_test_is_dummy_root(root))
3072                 return 0;
3073
3074         ref_root = btrfs_header_owner(buf);
3075         nritems = btrfs_header_nritems(buf);
3076         level = btrfs_header_level(buf);
3077
3078         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3079                 return 0;
3080
3081         if (inc)
3082                 process_func = btrfs_inc_extent_ref;
3083         else
3084                 process_func = btrfs_free_extent;
3085
3086         if (full_backref)
3087                 parent = buf->start;
3088         else
3089                 parent = 0;
3090
3091         for (i = 0; i < nritems; i++) {
3092                 if (level == 0) {
3093                         btrfs_item_key_to_cpu(buf, &key, i);
3094                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3095                                 continue;
3096                         fi = btrfs_item_ptr(buf, i,
3097                                             struct btrfs_file_extent_item);
3098                         if (btrfs_file_extent_type(buf, fi) ==
3099                             BTRFS_FILE_EXTENT_INLINE)
3100                                 continue;
3101                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3102                         if (bytenr == 0)
3103                                 continue;
3104
3105                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3106                         key.offset -= btrfs_file_extent_offset(buf, fi);
3107                         ret = process_func(trans, root, bytenr, num_bytes,
3108                                            parent, ref_root, key.objectid,
3109                                            key.offset, 1);
3110                         if (ret)
3111                                 goto fail;
3112                 } else {
3113                         bytenr = btrfs_node_blockptr(buf, i);
3114                         num_bytes = root->nodesize;
3115                         ret = process_func(trans, root, bytenr, num_bytes,
3116                                            parent, ref_root, level - 1, 0,
3117                                            1);
3118                         if (ret)
3119                                 goto fail;
3120                 }
3121         }
3122         return 0;
3123 fail:
3124         return ret;
3125 }
3126
3127 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3128                   struct extent_buffer *buf, int full_backref)
3129 {
3130         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3131 }
3132
3133 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3134                   struct extent_buffer *buf, int full_backref)
3135 {
3136         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3137 }
3138
3139 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3140                                  struct btrfs_root *root,
3141                                  struct btrfs_path *path,
3142                                  struct btrfs_block_group_cache *cache)
3143 {
3144         int ret;
3145         struct btrfs_root *extent_root = root->fs_info->extent_root;
3146         unsigned long bi;
3147         struct extent_buffer *leaf;
3148
3149         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3150         if (ret < 0)
3151                 goto fail;
3152         BUG_ON(ret); /* Corruption */
3153
3154         leaf = path->nodes[0];
3155         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3156         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3157         btrfs_mark_buffer_dirty(leaf);
3158         btrfs_release_path(path);
3159 fail:
3160         if (ret) {
3161                 btrfs_abort_transaction(trans, root, ret);
3162                 return ret;
3163         }
3164         return 0;
3165
3166 }
3167
3168 static struct btrfs_block_group_cache *
3169 next_block_group(struct btrfs_root *root,
3170                  struct btrfs_block_group_cache *cache)
3171 {
3172         struct rb_node *node;
3173         spin_lock(&root->fs_info->block_group_cache_lock);
3174         node = rb_next(&cache->cache_node);
3175         btrfs_put_block_group(cache);
3176         if (node) {
3177                 cache = rb_entry(node, struct btrfs_block_group_cache,
3178                                  cache_node);
3179                 btrfs_get_block_group(cache);
3180         } else
3181                 cache = NULL;
3182         spin_unlock(&root->fs_info->block_group_cache_lock);
3183         return cache;
3184 }
3185
3186 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3187                             struct btrfs_trans_handle *trans,
3188                             struct btrfs_path *path)
3189 {
3190         struct btrfs_root *root = block_group->fs_info->tree_root;
3191         struct inode *inode = NULL;
3192         u64 alloc_hint = 0;
3193         int dcs = BTRFS_DC_ERROR;
3194         int num_pages = 0;
3195         int retries = 0;
3196         int ret = 0;
3197
3198         /*
3199          * If this block group is smaller than 100 megs don't bother caching the
3200          * block group.
3201          */
3202         if (block_group->key.offset < (100 * 1024 * 1024)) {
3203                 spin_lock(&block_group->lock);
3204                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3205                 spin_unlock(&block_group->lock);
3206                 return 0;
3207         }
3208
3209 again:
3210         inode = lookup_free_space_inode(root, block_group, path);
3211         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3212                 ret = PTR_ERR(inode);
3213                 btrfs_release_path(path);
3214                 goto out;
3215         }
3216
3217         if (IS_ERR(inode)) {
3218                 BUG_ON(retries);
3219                 retries++;
3220
3221                 if (block_group->ro)
3222                         goto out_free;
3223
3224                 ret = create_free_space_inode(root, trans, block_group, path);
3225                 if (ret)
3226                         goto out_free;
3227                 goto again;
3228         }
3229
3230         /* We've already setup this transaction, go ahead and exit */
3231         if (block_group->cache_generation == trans->transid &&
3232             i_size_read(inode)) {
3233                 dcs = BTRFS_DC_SETUP;
3234                 goto out_put;
3235         }
3236
3237         /*
3238          * We want to set the generation to 0, that way if anything goes wrong
3239          * from here on out we know not to trust this cache when we load up next
3240          * time.
3241          */
3242         BTRFS_I(inode)->generation = 0;
3243         ret = btrfs_update_inode(trans, root, inode);
3244         WARN_ON(ret);
3245
3246         if (i_size_read(inode) > 0) {
3247                 ret = btrfs_check_trunc_cache_free_space(root,
3248                                         &root->fs_info->global_block_rsv);
3249                 if (ret)
3250                         goto out_put;
3251
3252                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3253                 if (ret)
3254                         goto out_put;
3255         }
3256
3257         spin_lock(&block_group->lock);
3258         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3259             !btrfs_test_opt(root, SPACE_CACHE) ||
3260             block_group->delalloc_bytes) {
3261                 /*
3262                  * don't bother trying to write stuff out _if_
3263                  * a) we're not cached,
3264                  * b) we're with nospace_cache mount option.
3265                  */
3266                 dcs = BTRFS_DC_WRITTEN;
3267                 spin_unlock(&block_group->lock);
3268                 goto out_put;
3269         }
3270         spin_unlock(&block_group->lock);
3271
3272         /*
3273          * Try to preallocate enough space based on how big the block group is.
3274          * Keep in mind this has to include any pinned space which could end up
3275          * taking up quite a bit since it's not folded into the other space
3276          * cache.
3277          */
3278         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3279         if (!num_pages)
3280                 num_pages = 1;
3281
3282         num_pages *= 16;
3283         num_pages *= PAGE_CACHE_SIZE;
3284
3285         ret = btrfs_check_data_free_space(inode, num_pages);
3286         if (ret)
3287                 goto out_put;
3288
3289         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3290                                               num_pages, num_pages,
3291                                               &alloc_hint);
3292         if (!ret)
3293                 dcs = BTRFS_DC_SETUP;
3294         btrfs_free_reserved_data_space(inode, num_pages);
3295
3296 out_put:
3297         iput(inode);
3298 out_free:
3299         btrfs_release_path(path);
3300 out:
3301         spin_lock(&block_group->lock);
3302         if (!ret && dcs == BTRFS_DC_SETUP)
3303                 block_group->cache_generation = trans->transid;
3304         block_group->disk_cache_state = dcs;
3305         spin_unlock(&block_group->lock);
3306
3307         return ret;
3308 }
3309
3310 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3311                                    struct btrfs_root *root)
3312 {
3313         struct btrfs_block_group_cache *cache;
3314         int err = 0;
3315         struct btrfs_path *path;
3316         u64 last = 0;
3317
3318         path = btrfs_alloc_path();
3319         if (!path)
3320                 return -ENOMEM;
3321
3322 again:
3323         while (1) {
3324                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3325                 while (cache) {
3326                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3327                                 break;
3328                         cache = next_block_group(root, cache);
3329                 }
3330                 if (!cache) {
3331                         if (last == 0)
3332                                 break;
3333                         last = 0;
3334                         continue;
3335                 }
3336                 err = cache_save_setup(cache, trans, path);
3337                 last = cache->key.objectid + cache->key.offset;
3338                 btrfs_put_block_group(cache);
3339         }
3340
3341         while (1) {
3342                 if (last == 0) {
3343                         err = btrfs_run_delayed_refs(trans, root,
3344                                                      (unsigned long)-1);
3345                         if (err) /* File system offline */
3346                                 goto out;
3347                 }
3348
3349                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3350                 while (cache) {
3351                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3352                                 btrfs_put_block_group(cache);
3353                                 goto again;
3354                         }
3355
3356                         if (cache->dirty)
3357                                 break;
3358                         cache = next_block_group(root, cache);
3359                 }
3360                 if (!cache) {
3361                         if (last == 0)
3362                                 break;
3363                         last = 0;
3364                         continue;
3365                 }
3366
3367                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3368                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3369                 cache->dirty = 0;
3370                 last = cache->key.objectid + cache->key.offset;
3371
3372                 err = write_one_cache_group(trans, root, path, cache);
3373                 btrfs_put_block_group(cache);
3374                 if (err) /* File system offline */
3375                         goto out;
3376         }
3377
3378         while (1) {
3379                 /*
3380                  * I don't think this is needed since we're just marking our
3381                  * preallocated extent as written, but just in case it can't
3382                  * hurt.
3383                  */
3384                 if (last == 0) {
3385                         err = btrfs_run_delayed_refs(trans, root,
3386                                                      (unsigned long)-1);
3387                         if (err) /* File system offline */
3388                                 goto out;
3389                 }
3390
3391                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3392                 while (cache) {
3393                         /*
3394                          * Really this shouldn't happen, but it could if we
3395                          * couldn't write the entire preallocated extent and
3396                          * splitting the extent resulted in a new block.
3397                          */
3398                         if (cache->dirty) {
3399                                 btrfs_put_block_group(cache);
3400                                 goto again;
3401                         }
3402                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3403                                 break;
3404                         cache = next_block_group(root, cache);
3405                 }
3406                 if (!cache) {
3407                         if (last == 0)
3408                                 break;
3409                         last = 0;
3410                         continue;
3411                 }
3412
3413                 err = btrfs_write_out_cache(root, trans, cache, path);
3414
3415                 /*
3416                  * If we didn't have an error then the cache state is still
3417                  * NEED_WRITE, so we can set it to WRITTEN.
3418                  */
3419                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3420                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3421                 last = cache->key.objectid + cache->key.offset;
3422                 btrfs_put_block_group(cache);
3423         }
3424 out:
3425
3426         btrfs_free_path(path);
3427         return err;
3428 }
3429
3430 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3431 {
3432         struct btrfs_block_group_cache *block_group;
3433         int readonly = 0;
3434
3435         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3436         if (!block_group || block_group->ro)
3437                 readonly = 1;
3438         if (block_group)
3439                 btrfs_put_block_group(block_group);
3440         return readonly;
3441 }
3442
3443 static const char *alloc_name(u64 flags)
3444 {
3445         switch (flags) {
3446         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3447                 return "mixed";
3448         case BTRFS_BLOCK_GROUP_METADATA:
3449                 return "metadata";
3450         case BTRFS_BLOCK_GROUP_DATA:
3451                 return "data";
3452         case BTRFS_BLOCK_GROUP_SYSTEM:
3453                 return "system";
3454         default:
3455                 WARN_ON(1);
3456                 return "invalid-combination";
3457         };
3458 }
3459
3460 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3461                              u64 total_bytes, u64 bytes_used,
3462                              struct btrfs_space_info **space_info)
3463 {
3464         struct btrfs_space_info *found;
3465         int i;
3466         int factor;
3467         int ret;
3468
3469         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3470                      BTRFS_BLOCK_GROUP_RAID10))
3471                 factor = 2;
3472         else
3473                 factor = 1;
3474
3475         found = __find_space_info(info, flags);
3476         if (found) {
3477                 spin_lock(&found->lock);
3478                 found->total_bytes += total_bytes;
3479                 found->disk_total += total_bytes * factor;
3480                 found->bytes_used += bytes_used;
3481                 found->disk_used += bytes_used * factor;
3482                 found->full = 0;
3483                 spin_unlock(&found->lock);
3484                 *space_info = found;
3485                 return 0;
3486         }
3487         found = kzalloc(sizeof(*found), GFP_NOFS);
3488         if (!found)
3489                 return -ENOMEM;
3490
3491         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3492         if (ret) {
3493                 kfree(found);
3494                 return ret;
3495         }
3496
3497         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3498                 INIT_LIST_HEAD(&found->block_groups[i]);
3499         init_rwsem(&found->groups_sem);
3500         spin_lock_init(&found->lock);
3501         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3502         found->total_bytes = total_bytes;
3503         found->disk_total = total_bytes * factor;
3504         found->bytes_used = bytes_used;
3505         found->disk_used = bytes_used * factor;
3506         found->bytes_pinned = 0;
3507         found->bytes_reserved = 0;
3508         found->bytes_readonly = 0;
3509         found->bytes_may_use = 0;
3510         found->full = 0;
3511         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3512         found->chunk_alloc = 0;
3513         found->flush = 0;
3514         init_waitqueue_head(&found->wait);
3515
3516         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3517                                     info->space_info_kobj, "%s",
3518                                     alloc_name(found->flags));
3519         if (ret) {
3520                 kfree(found);
3521                 return ret;
3522         }
3523
3524         *space_info = found;
3525         list_add_rcu(&found->list, &info->space_info);
3526         if (flags & BTRFS_BLOCK_GROUP_DATA)
3527                 info->data_sinfo = found;
3528
3529         return ret;
3530 }
3531
3532 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3533 {
3534         u64 extra_flags = chunk_to_extended(flags) &
3535                                 BTRFS_EXTENDED_PROFILE_MASK;
3536
3537         write_seqlock(&fs_info->profiles_lock);
3538         if (flags & BTRFS_BLOCK_GROUP_DATA)
3539                 fs_info->avail_data_alloc_bits |= extra_flags;
3540         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3541                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3542         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3543                 fs_info->avail_system_alloc_bits |= extra_flags;
3544         write_sequnlock(&fs_info->profiles_lock);
3545 }
3546
3547 /*
3548  * returns target flags in extended format or 0 if restripe for this
3549  * chunk_type is not in progress
3550  *
3551  * should be called with either volume_mutex or balance_lock held
3552  */
3553 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3554 {
3555         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3556         u64 target = 0;
3557
3558         if (!bctl)
3559                 return 0;
3560
3561         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3562             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3563                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3564         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3565                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3566                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3567         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3568                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3569                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3570         }
3571
3572         return target;
3573 }
3574
3575 /*
3576  * @flags: available profiles in extended format (see ctree.h)
3577  *
3578  * Returns reduced profile in chunk format.  If profile changing is in
3579  * progress (either running or paused) picks the target profile (if it's
3580  * already available), otherwise falls back to plain reducing.
3581  */
3582 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3583 {
3584         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3585         u64 target;
3586         u64 tmp;
3587
3588         /*
3589          * see if restripe for this chunk_type is in progress, if so
3590          * try to reduce to the target profile
3591          */
3592         spin_lock(&root->fs_info->balance_lock);
3593         target = get_restripe_target(root->fs_info, flags);
3594         if (target) {
3595                 /* pick target profile only if it's already available */
3596                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3597                         spin_unlock(&root->fs_info->balance_lock);
3598                         return extended_to_chunk(target);
3599                 }
3600         }
3601         spin_unlock(&root->fs_info->balance_lock);
3602
3603         /* First, mask out the RAID levels which aren't possible */
3604         if (num_devices == 1)
3605                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3606                            BTRFS_BLOCK_GROUP_RAID5);
3607         if (num_devices < 3)
3608                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3609         if (num_devices < 4)
3610                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3611
3612         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3613                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3614                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3615         flags &= ~tmp;
3616
3617         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3618                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3619         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3620                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3621         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3622                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3623         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3624                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3625         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3626                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3627
3628         return extended_to_chunk(flags | tmp);
3629 }
3630
3631 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3632 {
3633         unsigned seq;
3634         u64 flags;
3635
3636         do {
3637                 flags = orig_flags;
3638                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3639
3640                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3641                         flags |= root->fs_info->avail_data_alloc_bits;
3642                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3643                         flags |= root->fs_info->avail_system_alloc_bits;
3644                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3645                         flags |= root->fs_info->avail_metadata_alloc_bits;
3646         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3647
3648         return btrfs_reduce_alloc_profile(root, flags);
3649 }
3650
3651 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3652 {
3653         u64 flags;
3654         u64 ret;
3655
3656         if (data)
3657                 flags = BTRFS_BLOCK_GROUP_DATA;
3658         else if (root == root->fs_info->chunk_root)
3659                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3660         else
3661                 flags = BTRFS_BLOCK_GROUP_METADATA;
3662
3663         ret = get_alloc_profile(root, flags);
3664         return ret;
3665 }
3666
3667 /*
3668  * This will check the space that the inode allocates from to make sure we have
3669  * enough space for bytes.
3670  */
3671 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3672 {
3673         struct btrfs_space_info *data_sinfo;
3674         struct btrfs_root *root = BTRFS_I(inode)->root;
3675         struct btrfs_fs_info *fs_info = root->fs_info;
3676         u64 used;
3677         int ret = 0, committed = 0, alloc_chunk = 1;
3678
3679         /* make sure bytes are sectorsize aligned */
3680         bytes = ALIGN(bytes, root->sectorsize);
3681
3682         if (btrfs_is_free_space_inode(inode)) {
3683                 committed = 1;
3684                 ASSERT(current->journal_info);
3685         }
3686
3687         data_sinfo = fs_info->data_sinfo;
3688         if (!data_sinfo)
3689                 goto alloc;
3690
3691 again:
3692         /* make sure we have enough space to handle the data first */
3693         spin_lock(&data_sinfo->lock);
3694         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3695                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3696                 data_sinfo->bytes_may_use;
3697
3698         if (used + bytes > data_sinfo->total_bytes) {
3699                 struct btrfs_trans_handle *trans;
3700
3701                 /*
3702                  * if we don't have enough free bytes in this space then we need
3703                  * to alloc a new chunk.
3704                  */
3705                 if (!data_sinfo->full && alloc_chunk) {
3706                         u64 alloc_target;
3707
3708                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3709                         spin_unlock(&data_sinfo->lock);
3710 alloc:
3711                         alloc_target = btrfs_get_alloc_profile(root, 1);
3712                         /*
3713                          * It is ugly that we don't call nolock join
3714                          * transaction for the free space inode case here.
3715                          * But it is safe because we only do the data space
3716                          * reservation for the free space cache in the
3717                          * transaction context, the common join transaction
3718                          * just increase the counter of the current transaction
3719                          * handler, doesn't try to acquire the trans_lock of
3720                          * the fs.
3721                          */
3722                         trans = btrfs_join_transaction(root);
3723                         if (IS_ERR(trans))
3724                                 return PTR_ERR(trans);
3725
3726                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3727                                              alloc_target,
3728                                              CHUNK_ALLOC_NO_FORCE);
3729                         btrfs_end_transaction(trans, root);
3730                         if (ret < 0) {
3731                                 if (ret != -ENOSPC)
3732                                         return ret;
3733                                 else
3734                                         goto commit_trans;
3735                         }
3736
3737                         if (!data_sinfo)
3738                                 data_sinfo = fs_info->data_sinfo;
3739
3740                         goto again;
3741                 }
3742
3743                 /*
3744                  * If we don't have enough pinned space to deal with this
3745                  * allocation don't bother committing the transaction.
3746                  */
3747                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3748                                            bytes) < 0)
3749                         committed = 1;
3750                 spin_unlock(&data_sinfo->lock);
3751
3752                 /* commit the current transaction and try again */
3753 commit_trans:
3754                 if (!committed &&
3755                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3756                         committed = 1;
3757
3758                         trans = btrfs_join_transaction(root);
3759                         if (IS_ERR(trans))
3760                                 return PTR_ERR(trans);
3761                         ret = btrfs_commit_transaction(trans, root);
3762                         if (ret)
3763                                 return ret;
3764                         goto again;
3765                 }
3766
3767                 trace_btrfs_space_reservation(root->fs_info,
3768                                               "space_info:enospc",
3769                                               data_sinfo->flags, bytes, 1);
3770                 return -ENOSPC;
3771         }
3772         data_sinfo->bytes_may_use += bytes;
3773         trace_btrfs_space_reservation(root->fs_info, "space_info",
3774                                       data_sinfo->flags, bytes, 1);
3775         spin_unlock(&data_sinfo->lock);
3776
3777         return 0;
3778 }
3779
3780 /*
3781  * Called if we need to clear a data reservation for this inode.
3782  */
3783 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3784 {
3785         struct btrfs_root *root = BTRFS_I(inode)->root;
3786         struct btrfs_space_info *data_sinfo;
3787
3788         /* make sure bytes are sectorsize aligned */
3789         bytes = ALIGN(bytes, root->sectorsize);
3790
3791         data_sinfo = root->fs_info->data_sinfo;
3792         spin_lock(&data_sinfo->lock);
3793         WARN_ON(data_sinfo->bytes_may_use < bytes);
3794         data_sinfo->bytes_may_use -= bytes;
3795         trace_btrfs_space_reservation(root->fs_info, "space_info",
3796                                       data_sinfo->flags, bytes, 0);
3797         spin_unlock(&data_sinfo->lock);
3798 }
3799
3800 static void force_metadata_allocation(struct btrfs_fs_info *info)
3801 {
3802         struct list_head *head = &info->space_info;
3803         struct btrfs_space_info *found;
3804
3805         rcu_read_lock();
3806         list_for_each_entry_rcu(found, head, list) {
3807                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3808                         found->force_alloc = CHUNK_ALLOC_FORCE;
3809         }
3810         rcu_read_unlock();
3811 }
3812
3813 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3814 {
3815         return (global->size << 1);
3816 }
3817
3818 static int should_alloc_chunk(struct btrfs_root *root,
3819                               struct btrfs_space_info *sinfo, int force)
3820 {
3821         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3822         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3823         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3824         u64 thresh;
3825
3826         if (force == CHUNK_ALLOC_FORCE)
3827                 return 1;
3828
3829         /*
3830          * We need to take into account the global rsv because for all intents
3831          * and purposes it's used space.  Don't worry about locking the
3832          * global_rsv, it doesn't change except when the transaction commits.
3833          */
3834         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3835                 num_allocated += calc_global_rsv_need_space(global_rsv);
3836
3837         /*
3838          * in limited mode, we want to have some free space up to
3839          * about 1% of the FS size.
3840          */
3841         if (force == CHUNK_ALLOC_LIMITED) {
3842                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3843                 thresh = max_t(u64, 64 * 1024 * 1024,
3844                                div_factor_fine(thresh, 1));
3845
3846                 if (num_bytes - num_allocated < thresh)
3847                         return 1;
3848         }
3849
3850         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3851                 return 0;
3852         return 1;
3853 }
3854
3855 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3856 {
3857         u64 num_dev;
3858
3859         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3860                     BTRFS_BLOCK_GROUP_RAID0 |
3861                     BTRFS_BLOCK_GROUP_RAID5 |
3862                     BTRFS_BLOCK_GROUP_RAID6))
3863                 num_dev = root->fs_info->fs_devices->rw_devices;
3864         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3865                 num_dev = 2;
3866         else
3867                 num_dev = 1;    /* DUP or single */
3868
3869         /* metadata for updaing devices and chunk tree */
3870         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3871 }
3872
3873 static void check_system_chunk(struct btrfs_trans_handle *trans,
3874                                struct btrfs_root *root, u64 type)
3875 {
3876         struct btrfs_space_info *info;
3877         u64 left;
3878         u64 thresh;
3879
3880         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3881         spin_lock(&info->lock);
3882         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3883                 info->bytes_reserved - info->bytes_readonly;
3884         spin_unlock(&info->lock);
3885
3886         thresh = get_system_chunk_thresh(root, type);
3887         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3888                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3889                         left, thresh, type);
3890                 dump_space_info(info, 0, 0);
3891         }
3892
3893         if (left < thresh) {
3894                 u64 flags;
3895
3896                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3897                 btrfs_alloc_chunk(trans, root, flags);
3898         }
3899 }
3900
3901 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3902                           struct btrfs_root *extent_root, u64 flags, int force)
3903 {
3904         struct btrfs_space_info *space_info;
3905         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3906         int wait_for_alloc = 0;
3907         int ret = 0;
3908
3909         /* Don't re-enter if we're already allocating a chunk */
3910         if (trans->allocating_chunk)
3911                 return -ENOSPC;
3912
3913         space_info = __find_space_info(extent_root->fs_info, flags);
3914         if (!space_info) {
3915                 ret = update_space_info(extent_root->fs_info, flags,
3916                                         0, 0, &space_info);
3917                 BUG_ON(ret); /* -ENOMEM */
3918         }
3919         BUG_ON(!space_info); /* Logic error */
3920
3921 again:
3922         spin_lock(&space_info->lock);
3923         if (force < space_info->force_alloc)
3924                 force = space_info->force_alloc;
3925         if (space_info->full) {
3926                 if (should_alloc_chunk(extent_root, space_info, force))
3927                         ret = -ENOSPC;
3928                 else
3929                         ret = 0;
3930                 spin_unlock(&space_info->lock);
3931                 return ret;
3932         }
3933
3934         if (!should_alloc_chunk(extent_root, space_info, force)) {
3935                 spin_unlock(&space_info->lock);
3936                 return 0;
3937         } else if (space_info->chunk_alloc) {
3938                 wait_for_alloc = 1;
3939         } else {
3940                 space_info->chunk_alloc = 1;
3941         }
3942
3943         spin_unlock(&space_info->lock);
3944
3945         mutex_lock(&fs_info->chunk_mutex);
3946
3947         /*
3948          * The chunk_mutex is held throughout the entirety of a chunk
3949          * allocation, so once we've acquired the chunk_mutex we know that the
3950          * other guy is done and we need to recheck and see if we should
3951          * allocate.
3952          */
3953         if (wait_for_alloc) {
3954                 mutex_unlock(&fs_info->chunk_mutex);
3955                 wait_for_alloc = 0;
3956                 goto again;
3957         }
3958
3959         trans->allocating_chunk = true;
3960
3961         /*
3962          * If we have mixed data/metadata chunks we want to make sure we keep
3963          * allocating mixed chunks instead of individual chunks.
3964          */
3965         if (btrfs_mixed_space_info(space_info))
3966                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3967
3968         /*
3969          * if we're doing a data chunk, go ahead and make sure that
3970          * we keep a reasonable number of metadata chunks allocated in the
3971          * FS as well.
3972          */
3973         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3974                 fs_info->data_chunk_allocations++;
3975                 if (!(fs_info->data_chunk_allocations %
3976                       fs_info->metadata_ratio))
3977                         force_metadata_allocation(fs_info);
3978         }
3979
3980         /*
3981          * Check if we have enough space in SYSTEM chunk because we may need
3982          * to update devices.
3983          */
3984         check_system_chunk(trans, extent_root, flags);
3985
3986         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3987         trans->allocating_chunk = false;
3988
3989         spin_lock(&space_info->lock);
3990         if (ret < 0 && ret != -ENOSPC)
3991                 goto out;
3992         if (ret)
3993                 space_info->full = 1;
3994         else
3995                 ret = 1;
3996
3997         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3998 out:
3999         space_info->chunk_alloc = 0;
4000         spin_unlock(&space_info->lock);
4001         mutex_unlock(&fs_info->chunk_mutex);
4002         return ret;
4003 }
4004
4005 static int can_overcommit(struct btrfs_root *root,
4006                           struct btrfs_space_info *space_info, u64 bytes,
4007                           enum btrfs_reserve_flush_enum flush)
4008 {
4009         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4010         u64 profile = btrfs_get_alloc_profile(root, 0);
4011         u64 space_size;
4012         u64 avail;
4013         u64 used;
4014
4015         used = space_info->bytes_used + space_info->bytes_reserved +
4016                 space_info->bytes_pinned + space_info->bytes_readonly;
4017
4018         /*
4019          * We only want to allow over committing if we have lots of actual space
4020          * free, but if we don't have enough space to handle the global reserve
4021          * space then we could end up having a real enospc problem when trying
4022          * to allocate a chunk or some other such important allocation.
4023          */
4024         spin_lock(&global_rsv->lock);
4025         space_size = calc_global_rsv_need_space(global_rsv);
4026         spin_unlock(&global_rsv->lock);
4027         if (used + space_size >= space_info->total_bytes)
4028                 return 0;
4029
4030         used += space_info->bytes_may_use;
4031
4032         spin_lock(&root->fs_info->free_chunk_lock);
4033         avail = root->fs_info->free_chunk_space;
4034         spin_unlock(&root->fs_info->free_chunk_lock);
4035
4036         /*
4037          * If we have dup, raid1 or raid10 then only half of the free
4038          * space is actually useable.  For raid56, the space info used
4039          * doesn't include the parity drive, so we don't have to
4040          * change the math
4041          */
4042         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4043                        BTRFS_BLOCK_GROUP_RAID1 |
4044                        BTRFS_BLOCK_GROUP_RAID10))
4045                 avail >>= 1;
4046
4047         /*
4048          * If we aren't flushing all things, let us overcommit up to
4049          * 1/2th of the space. If we can flush, don't let us overcommit
4050          * too much, let it overcommit up to 1/8 of the space.
4051          */
4052         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4053                 avail >>= 3;
4054         else
4055                 avail >>= 1;
4056
4057         if (used + bytes < space_info->total_bytes + avail)
4058                 return 1;
4059         return 0;
4060 }
4061
4062 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4063                                          unsigned long nr_pages, int nr_items)
4064 {
4065         struct super_block *sb = root->fs_info->sb;
4066
4067         if (down_read_trylock(&sb->s_umount)) {
4068                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4069                 up_read(&sb->s_umount);
4070         } else {
4071                 /*
4072                  * We needn't worry the filesystem going from r/w to r/o though
4073                  * we don't acquire ->s_umount mutex, because the filesystem
4074                  * should guarantee the delalloc inodes list be empty after
4075                  * the filesystem is readonly(all dirty pages are written to
4076                  * the disk).
4077                  */
4078                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4079                 if (!current->journal_info)
4080                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4081         }
4082 }
4083
4084 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4085 {
4086         u64 bytes;
4087         int nr;
4088
4089         bytes = btrfs_calc_trans_metadata_size(root, 1);
4090         nr = (int)div64_u64(to_reclaim, bytes);
4091         if (!nr)
4092                 nr = 1;
4093         return nr;
4094 }
4095
4096 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4097
4098 /*
4099  * shrink metadata reservation for delalloc
4100  */
4101 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4102                             bool wait_ordered)
4103 {
4104         struct btrfs_block_rsv *block_rsv;
4105         struct btrfs_space_info *space_info;
4106         struct btrfs_trans_handle *trans;
4107         u64 delalloc_bytes;
4108         u64 max_reclaim;
4109         long time_left;
4110         unsigned long nr_pages;
4111         int loops;
4112         int items;
4113         enum btrfs_reserve_flush_enum flush;
4114
4115         /* Calc the number of the pages we need flush for space reservation */
4116         items = calc_reclaim_items_nr(root, to_reclaim);
4117         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4118
4119         trans = (struct btrfs_trans_handle *)current->journal_info;
4120         block_rsv = &root->fs_info->delalloc_block_rsv;
4121         space_info = block_rsv->space_info;
4122
4123         delalloc_bytes = percpu_counter_sum_positive(
4124                                                 &root->fs_info->delalloc_bytes);
4125         if (delalloc_bytes == 0) {
4126                 if (trans)
4127                         return;
4128                 if (wait_ordered)
4129                         btrfs_wait_ordered_roots(root->fs_info, items);
4130                 return;
4131         }
4132
4133         loops = 0;
4134         while (delalloc_bytes && loops < 3) {
4135                 max_reclaim = min(delalloc_bytes, to_reclaim);
4136                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4137                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4138                 /*
4139                  * We need to wait for the async pages to actually start before
4140                  * we do anything.
4141                  */
4142                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4143                 if (!max_reclaim)
4144                         goto skip_async;
4145
4146                 if (max_reclaim <= nr_pages)
4147                         max_reclaim = 0;
4148                 else
4149                         max_reclaim -= nr_pages;
4150
4151                 wait_event(root->fs_info->async_submit_wait,
4152                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4153                            (int)max_reclaim);
4154 skip_async:
4155                 if (!trans)
4156                         flush = BTRFS_RESERVE_FLUSH_ALL;
4157                 else
4158                         flush = BTRFS_RESERVE_NO_FLUSH;
4159                 spin_lock(&space_info->lock);
4160                 if (can_overcommit(root, space_info, orig, flush)) {
4161                         spin_unlock(&space_info->lock);
4162                         break;
4163                 }
4164                 spin_unlock(&space_info->lock);
4165
4166                 loops++;
4167                 if (wait_ordered && !trans) {
4168                         btrfs_wait_ordered_roots(root->fs_info, items);
4169                 } else {
4170                         time_left = schedule_timeout_killable(1);
4171                         if (time_left)
4172                                 break;
4173                 }
4174                 delalloc_bytes = percpu_counter_sum_positive(
4175                                                 &root->fs_info->delalloc_bytes);
4176         }
4177 }
4178
4179 /**
4180  * maybe_commit_transaction - possibly commit the transaction if its ok to
4181  * @root - the root we're allocating for
4182  * @bytes - the number of bytes we want to reserve
4183  * @force - force the commit
4184  *
4185  * This will check to make sure that committing the transaction will actually
4186  * get us somewhere and then commit the transaction if it does.  Otherwise it
4187  * will return -ENOSPC.
4188  */
4189 static int may_commit_transaction(struct btrfs_root *root,
4190                                   struct btrfs_space_info *space_info,
4191                                   u64 bytes, int force)
4192 {
4193         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4194         struct btrfs_trans_handle *trans;
4195
4196         trans = (struct btrfs_trans_handle *)current->journal_info;
4197         if (trans)
4198                 return -EAGAIN;
4199
4200         if (force)
4201                 goto commit;
4202
4203         /* See if there is enough pinned space to make this reservation */
4204         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4205                                    bytes) >= 0)
4206                 goto commit;
4207
4208         /*
4209          * See if there is some space in the delayed insertion reservation for
4210          * this reservation.
4211          */
4212         if (space_info != delayed_rsv->space_info)
4213                 return -ENOSPC;
4214
4215         spin_lock(&delayed_rsv->lock);
4216         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4217                                    bytes - delayed_rsv->size) >= 0) {
4218                 spin_unlock(&delayed_rsv->lock);
4219                 return -ENOSPC;
4220         }
4221         spin_unlock(&delayed_rsv->lock);
4222
4223 commit:
4224         trans = btrfs_join_transaction(root);
4225         if (IS_ERR(trans))
4226                 return -ENOSPC;
4227
4228         return btrfs_commit_transaction(trans, root);
4229 }
4230
4231 enum flush_state {
4232         FLUSH_DELAYED_ITEMS_NR  =       1,
4233         FLUSH_DELAYED_ITEMS     =       2,
4234         FLUSH_DELALLOC          =       3,
4235         FLUSH_DELALLOC_WAIT     =       4,
4236         ALLOC_CHUNK             =       5,
4237         COMMIT_TRANS            =       6,
4238 };
4239
4240 static int flush_space(struct btrfs_root *root,
4241                        struct btrfs_space_info *space_info, u64 num_bytes,
4242                        u64 orig_bytes, int state)
4243 {
4244         struct btrfs_trans_handle *trans;
4245         int nr;
4246         int ret = 0;
4247
4248         switch (state) {
4249         case FLUSH_DELAYED_ITEMS_NR:
4250         case FLUSH_DELAYED_ITEMS:
4251                 if (state == FLUSH_DELAYED_ITEMS_NR)
4252                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4253                 else
4254                         nr = -1;
4255
4256                 trans = btrfs_join_transaction(root);
4257                 if (IS_ERR(trans)) {
4258                         ret = PTR_ERR(trans);
4259                         break;
4260                 }
4261                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4262                 btrfs_end_transaction(trans, root);
4263                 break;
4264         case FLUSH_DELALLOC:
4265         case FLUSH_DELALLOC_WAIT:
4266                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4267                                 state == FLUSH_DELALLOC_WAIT);
4268                 break;
4269         case ALLOC_CHUNK:
4270                 trans = btrfs_join_transaction(root);
4271                 if (IS_ERR(trans)) {
4272                         ret = PTR_ERR(trans);
4273                         break;
4274                 }
4275                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4276                                      btrfs_get_alloc_profile(root, 0),
4277                                      CHUNK_ALLOC_NO_FORCE);
4278                 btrfs_end_transaction(trans, root);
4279                 if (ret == -ENOSPC)
4280                         ret = 0;
4281                 break;
4282         case COMMIT_TRANS:
4283                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4284                 break;
4285         default:
4286                 ret = -ENOSPC;
4287                 break;
4288         }
4289
4290         return ret;
4291 }
4292
4293 static inline u64
4294 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4295                                  struct btrfs_space_info *space_info)
4296 {
4297         u64 used;
4298         u64 expected;
4299         u64 to_reclaim;
4300
4301         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4302                                 16 * 1024 * 1024);
4303         spin_lock(&space_info->lock);
4304         if (can_overcommit(root, space_info, to_reclaim,
4305                            BTRFS_RESERVE_FLUSH_ALL)) {
4306                 to_reclaim = 0;
4307                 goto out;
4308         }
4309
4310         used = space_info->bytes_used + space_info->bytes_reserved +
4311                space_info->bytes_pinned + space_info->bytes_readonly +
4312                space_info->bytes_may_use;
4313         if (can_overcommit(root, space_info, 1024 * 1024,
4314                            BTRFS_RESERVE_FLUSH_ALL))
4315                 expected = div_factor_fine(space_info->total_bytes, 95);
4316         else
4317                 expected = div_factor_fine(space_info->total_bytes, 90);
4318
4319         if (used > expected)
4320                 to_reclaim = used - expected;
4321         else
4322                 to_reclaim = 0;
4323         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4324                                      space_info->bytes_reserved);
4325 out:
4326         spin_unlock(&space_info->lock);
4327
4328         return to_reclaim;
4329 }
4330
4331 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4332                                         struct btrfs_fs_info *fs_info, u64 used)
4333 {
4334         return (used >= div_factor_fine(space_info->total_bytes, 98) &&
4335                 !btrfs_fs_closing(fs_info) &&
4336                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4337 }
4338
4339 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4340                                        struct btrfs_fs_info *fs_info,
4341                                        int flush_state)
4342 {
4343         u64 used;
4344
4345         spin_lock(&space_info->lock);
4346         /*
4347          * We run out of space and have not got any free space via flush_space,
4348          * so don't bother doing async reclaim.
4349          */
4350         if (flush_state > COMMIT_TRANS && space_info->full) {
4351                 spin_unlock(&space_info->lock);
4352                 return 0;
4353         }
4354
4355         used = space_info->bytes_used + space_info->bytes_reserved +
4356                space_info->bytes_pinned + space_info->bytes_readonly +
4357                space_info->bytes_may_use;
4358         if (need_do_async_reclaim(space_info, fs_info, used)) {
4359                 spin_unlock(&space_info->lock);
4360                 return 1;
4361         }
4362         spin_unlock(&space_info->lock);
4363
4364         return 0;
4365 }
4366
4367 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4368 {
4369         struct btrfs_fs_info *fs_info;
4370         struct btrfs_space_info *space_info;
4371         u64 to_reclaim;
4372         int flush_state;
4373
4374         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4375         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4376
4377         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4378                                                       space_info);
4379         if (!to_reclaim)
4380                 return;
4381
4382         flush_state = FLUSH_DELAYED_ITEMS_NR;
4383         do {
4384                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4385                             to_reclaim, flush_state);
4386                 flush_state++;
4387                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4388                                                  flush_state))
4389                         return;
4390         } while (flush_state <= COMMIT_TRANS);
4391
4392         if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
4393                 queue_work(system_unbound_wq, work);
4394 }
4395
4396 void btrfs_init_async_reclaim_work(struct work_struct *work)
4397 {
4398         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4399 }
4400
4401 /**
4402  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4403  * @root - the root we're allocating for
4404  * @block_rsv - the block_rsv we're allocating for
4405  * @orig_bytes - the number of bytes we want
4406  * @flush - whether or not we can flush to make our reservation
4407  *
4408  * This will reserve orgi_bytes number of bytes from the space info associated
4409  * with the block_rsv.  If there is not enough space it will make an attempt to
4410  * flush out space to make room.  It will do this by flushing delalloc if
4411  * possible or committing the transaction.  If flush is 0 then no attempts to
4412  * regain reservations will be made and this will fail if there is not enough
4413  * space already.
4414  */
4415 static int reserve_metadata_bytes(struct btrfs_root *root,
4416                                   struct btrfs_block_rsv *block_rsv,
4417                                   u64 orig_bytes,
4418                                   enum btrfs_reserve_flush_enum flush)
4419 {
4420         struct btrfs_space_info *space_info = block_rsv->space_info;
4421         u64 used;
4422         u64 num_bytes = orig_bytes;
4423         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4424         int ret = 0;
4425         bool flushing = false;
4426
4427 again:
4428         ret = 0;
4429         spin_lock(&space_info->lock);
4430         /*
4431          * We only want to wait if somebody other than us is flushing and we
4432          * are actually allowed to flush all things.
4433          */
4434         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4435                space_info->flush) {
4436                 spin_unlock(&space_info->lock);
4437                 /*
4438                  * If we have a trans handle we can't wait because the flusher
4439                  * may have to commit the transaction, which would mean we would
4440                  * deadlock since we are waiting for the flusher to finish, but
4441                  * hold the current transaction open.
4442                  */
4443                 if (current->journal_info)
4444                         return -EAGAIN;
4445                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4446                 /* Must have been killed, return */
4447                 if (ret)
4448                         return -EINTR;
4449
4450                 spin_lock(&space_info->lock);
4451         }
4452
4453         ret = -ENOSPC;
4454         used = space_info->bytes_used + space_info->bytes_reserved +
4455                 space_info->bytes_pinned + space_info->bytes_readonly +
4456                 space_info->bytes_may_use;
4457
4458         /*
4459          * The idea here is that we've not already over-reserved the block group
4460          * then we can go ahead and save our reservation first and then start
4461          * flushing if we need to.  Otherwise if we've already overcommitted
4462          * lets start flushing stuff first and then come back and try to make
4463          * our reservation.
4464          */
4465         if (used <= space_info->total_bytes) {
4466                 if (used + orig_bytes <= space_info->total_bytes) {
4467                         space_info->bytes_may_use += orig_bytes;
4468                         trace_btrfs_space_reservation(root->fs_info,
4469                                 "space_info", space_info->flags, orig_bytes, 1);
4470                         ret = 0;
4471                 } else {
4472                         /*
4473                          * Ok set num_bytes to orig_bytes since we aren't
4474                          * overocmmitted, this way we only try and reclaim what
4475                          * we need.
4476                          */
4477                         num_bytes = orig_bytes;
4478                 }
4479         } else {
4480                 /*
4481                  * Ok we're over committed, set num_bytes to the overcommitted
4482                  * amount plus the amount of bytes that we need for this
4483                  * reservation.
4484                  */
4485                 num_bytes = used - space_info->total_bytes +
4486                         (orig_bytes * 2);
4487         }
4488
4489         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4490                 space_info->bytes_may_use += orig_bytes;
4491                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4492                                               space_info->flags, orig_bytes,
4493                                               1);
4494                 ret = 0;
4495         }
4496
4497         /*
4498          * Couldn't make our reservation, save our place so while we're trying
4499          * to reclaim space we can actually use it instead of somebody else
4500          * stealing it from us.
4501          *
4502          * We make the other tasks wait for the flush only when we can flush
4503          * all things.
4504          */
4505         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4506                 flushing = true;
4507                 space_info->flush = 1;
4508         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4509                 used += orig_bytes;
4510                 /*
4511                  * We will do the space reservation dance during log replay,
4512                  * which means we won't have fs_info->fs_root set, so don't do
4513                  * the async reclaim as we will panic.
4514                  */
4515                 if (!root->fs_info->log_root_recovering &&
4516                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4517                     !work_busy(&root->fs_info->async_reclaim_work))
4518                         queue_work(system_unbound_wq,
4519                                    &root->fs_info->async_reclaim_work);
4520         }
4521         spin_unlock(&space_info->lock);
4522
4523         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4524                 goto out;
4525
4526         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4527                           flush_state);
4528         flush_state++;
4529
4530         /*
4531          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4532          * would happen. So skip delalloc flush.
4533          */
4534         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4535             (flush_state == FLUSH_DELALLOC ||
4536              flush_state == FLUSH_DELALLOC_WAIT))
4537                 flush_state = ALLOC_CHUNK;
4538
4539         if (!ret)
4540                 goto again;
4541         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4542                  flush_state < COMMIT_TRANS)
4543                 goto again;
4544         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4545                  flush_state <= COMMIT_TRANS)
4546                 goto again;
4547
4548 out:
4549         if (ret == -ENOSPC &&
4550             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4551                 struct btrfs_block_rsv *global_rsv =
4552                         &root->fs_info->global_block_rsv;
4553
4554                 if (block_rsv != global_rsv &&
4555                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4556                         ret = 0;
4557         }
4558         if (ret == -ENOSPC)
4559                 trace_btrfs_space_reservation(root->fs_info,
4560                                               "space_info:enospc",
4561                                               space_info->flags, orig_bytes, 1);
4562         if (flushing) {
4563                 spin_lock(&space_info->lock);
4564                 space_info->flush = 0;
4565                 wake_up_all(&space_info->wait);
4566                 spin_unlock(&space_info->lock);
4567         }
4568         return ret;
4569 }
4570
4571 static struct btrfs_block_rsv *get_block_rsv(
4572                                         const struct btrfs_trans_handle *trans,
4573                                         const struct btrfs_root *root)
4574 {
4575         struct btrfs_block_rsv *block_rsv = NULL;
4576
4577         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4578                 block_rsv = trans->block_rsv;
4579
4580         if (root == root->fs_info->csum_root && trans->adding_csums)
4581                 block_rsv = trans->block_rsv;
4582
4583         if (root == root->fs_info->uuid_root)
4584                 block_rsv = trans->block_rsv;
4585
4586         if (!block_rsv)
4587                 block_rsv = root->block_rsv;
4588
4589         if (!block_rsv)
4590                 block_rsv = &root->fs_info->empty_block_rsv;
4591
4592         return block_rsv;
4593 }
4594
4595 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4596                                u64 num_bytes)
4597 {
4598         int ret = -ENOSPC;
4599         spin_lock(&block_rsv->lock);
4600         if (block_rsv->reserved >= num_bytes) {
4601                 block_rsv->reserved -= num_bytes;
4602                 if (block_rsv->reserved < block_rsv->size)
4603                         block_rsv->full = 0;
4604                 ret = 0;
4605         }
4606         spin_unlock(&block_rsv->lock);
4607         return ret;
4608 }
4609
4610 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4611                                 u64 num_bytes, int update_size)
4612 {
4613         spin_lock(&block_rsv->lock);
4614         block_rsv->reserved += num_bytes;
4615         if (update_size)
4616                 block_rsv->size += num_bytes;
4617         else if (block_rsv->reserved >= block_rsv->size)
4618                 block_rsv->full = 1;
4619         spin_unlock(&block_rsv->lock);
4620 }
4621
4622 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4623                              struct btrfs_block_rsv *dest, u64 num_bytes,
4624                              int min_factor)
4625 {
4626         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4627         u64 min_bytes;
4628
4629         if (global_rsv->space_info != dest->space_info)
4630                 return -ENOSPC;
4631
4632         spin_lock(&global_rsv->lock);
4633         min_bytes = div_factor(global_rsv->size, min_factor);
4634         if (global_rsv->reserved < min_bytes + num_bytes) {
4635                 spin_unlock(&global_rsv->lock);
4636                 return -ENOSPC;
4637         }
4638         global_rsv->reserved -= num_bytes;
4639         if (global_rsv->reserved < global_rsv->size)
4640                 global_rsv->full = 0;
4641         spin_unlock(&global_rsv->lock);
4642
4643         block_rsv_add_bytes(dest, num_bytes, 1);
4644         return 0;
4645 }
4646
4647 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4648                                     struct btrfs_block_rsv *block_rsv,
4649                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4650 {
4651         struct btrfs_space_info *space_info = block_rsv->space_info;
4652
4653         spin_lock(&block_rsv->lock);
4654         if (num_bytes == (u64)-1)
4655                 num_bytes = block_rsv->size;
4656         block_rsv->size -= num_bytes;
4657         if (block_rsv->reserved >= block_rsv->size) {
4658                 num_bytes = block_rsv->reserved - block_rsv->size;
4659                 block_rsv->reserved = block_rsv->size;
4660                 block_rsv->full = 1;
4661         } else {
4662                 num_bytes = 0;
4663         }
4664         spin_unlock(&block_rsv->lock);
4665
4666         if (num_bytes > 0) {
4667                 if (dest) {
4668                         spin_lock(&dest->lock);
4669                         if (!dest->full) {
4670                                 u64 bytes_to_add;
4671
4672                                 bytes_to_add = dest->size - dest->reserved;
4673                                 bytes_to_add = min(num_bytes, bytes_to_add);
4674                                 dest->reserved += bytes_to_add;
4675                                 if (dest->reserved >= dest->size)
4676                                         dest->full = 1;
4677                                 num_bytes -= bytes_to_add;
4678                         }
4679                         spin_unlock(&dest->lock);
4680                 }
4681                 if (num_bytes) {
4682                         spin_lock(&space_info->lock);
4683                         space_info->bytes_may_use -= num_bytes;
4684                         trace_btrfs_space_reservation(fs_info, "space_info",
4685                                         space_info->flags, num_bytes, 0);
4686                         spin_unlock(&space_info->lock);
4687                 }
4688         }
4689 }
4690
4691 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4692                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4693 {
4694         int ret;
4695
4696         ret = block_rsv_use_bytes(src, num_bytes);
4697         if (ret)
4698                 return ret;
4699
4700         block_rsv_add_bytes(dst, num_bytes, 1);
4701         return 0;
4702 }
4703
4704 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4705 {
4706         memset(rsv, 0, sizeof(*rsv));
4707         spin_lock_init(&rsv->lock);
4708         rsv->type = type;
4709 }
4710
4711 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4712                                               unsigned short type)
4713 {
4714         struct btrfs_block_rsv *block_rsv;
4715         struct btrfs_fs_info *fs_info = root->fs_info;
4716
4717         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4718         if (!block_rsv)
4719                 return NULL;
4720
4721         btrfs_init_block_rsv(block_rsv, type);
4722         block_rsv->space_info = __find_space_info(fs_info,
4723                                                   BTRFS_BLOCK_GROUP_METADATA);
4724         return block_rsv;
4725 }
4726
4727 void btrfs_free_block_rsv(struct btrfs_root *root,
4728                           struct btrfs_block_rsv *rsv)
4729 {
4730         if (!rsv)
4731                 return;
4732         btrfs_block_rsv_release(root, rsv, (u64)-1);
4733         kfree(rsv);
4734 }
4735
4736 int btrfs_block_rsv_add(struct btrfs_root *root,
4737                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4738                         enum btrfs_reserve_flush_enum flush)
4739 {
4740         int ret;
4741
4742         if (num_bytes == 0)
4743                 return 0;
4744
4745         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4746         if (!ret) {
4747                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4748                 return 0;
4749         }
4750
4751         return ret;
4752 }
4753
4754 int btrfs_block_rsv_check(struct btrfs_root *root,
4755                           struct btrfs_block_rsv *block_rsv, int min_factor)
4756 {
4757         u64 num_bytes = 0;
4758         int ret = -ENOSPC;
4759
4760         if (!block_rsv)
4761                 return 0;
4762
4763         spin_lock(&block_rsv->lock);
4764         num_bytes = div_factor(block_rsv->size, min_factor);
4765         if (block_rsv->reserved >= num_bytes)
4766                 ret = 0;
4767         spin_unlock(&block_rsv->lock);
4768
4769         return ret;
4770 }
4771
4772 int btrfs_block_rsv_refill(struct btrfs_root *root,
4773                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4774                            enum btrfs_reserve_flush_enum flush)
4775 {
4776         u64 num_bytes = 0;
4777         int ret = -ENOSPC;
4778
4779         if (!block_rsv)
4780                 return 0;
4781
4782         spin_lock(&block_rsv->lock);
4783         num_bytes = min_reserved;
4784         if (block_rsv->reserved >= num_bytes)
4785                 ret = 0;
4786         else
4787                 num_bytes -= block_rsv->reserved;
4788         spin_unlock(&block_rsv->lock);
4789
4790         if (!ret)
4791                 return 0;
4792
4793         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4794         if (!ret) {
4795                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4796                 return 0;
4797         }
4798
4799         return ret;
4800 }
4801
4802 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4803                             struct btrfs_block_rsv *dst_rsv,
4804                             u64 num_bytes)
4805 {
4806         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4807 }
4808
4809 void btrfs_block_rsv_release(struct btrfs_root *root,
4810                              struct btrfs_block_rsv *block_rsv,
4811                              u64 num_bytes)
4812 {
4813         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4814         if (global_rsv == block_rsv ||
4815             block_rsv->space_info != global_rsv->space_info)
4816                 global_rsv = NULL;
4817         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4818                                 num_bytes);
4819 }
4820
4821 /*
4822  * helper to calculate size of global block reservation.
4823  * the desired value is sum of space used by extent tree,
4824  * checksum tree and root tree
4825  */
4826 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4827 {
4828         struct btrfs_space_info *sinfo;
4829         u64 num_bytes;
4830         u64 meta_used;
4831         u64 data_used;
4832         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4833
4834         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4835         spin_lock(&sinfo->lock);
4836         data_used = sinfo->bytes_used;
4837         spin_unlock(&sinfo->lock);
4838
4839         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4840         spin_lock(&sinfo->lock);
4841         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4842                 data_used = 0;
4843         meta_used = sinfo->bytes_used;
4844         spin_unlock(&sinfo->lock);
4845
4846         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4847                     csum_size * 2;
4848         num_bytes += div64_u64(data_used + meta_used, 50);
4849
4850         if (num_bytes * 3 > meta_used)
4851                 num_bytes = div64_u64(meta_used, 3);
4852
4853         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
4854 }
4855
4856 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4857 {
4858         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4859         struct btrfs_space_info *sinfo = block_rsv->space_info;
4860         u64 num_bytes;
4861
4862         num_bytes = calc_global_metadata_size(fs_info);
4863
4864         spin_lock(&sinfo->lock);
4865         spin_lock(&block_rsv->lock);
4866
4867         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4868
4869         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4870                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4871                     sinfo->bytes_may_use;
4872
4873         if (sinfo->total_bytes > num_bytes) {
4874                 num_bytes = sinfo->total_bytes - num_bytes;
4875                 block_rsv->reserved += num_bytes;
4876                 sinfo->bytes_may_use += num_bytes;
4877                 trace_btrfs_space_reservation(fs_info, "space_info",
4878                                       sinfo->flags, num_bytes, 1);
4879         }
4880
4881         if (block_rsv->reserved >= block_rsv->size) {
4882                 num_bytes = block_rsv->reserved - block_rsv->size;
4883                 sinfo->bytes_may_use -= num_bytes;
4884                 trace_btrfs_space_reservation(fs_info, "space_info",
4885                                       sinfo->flags, num_bytes, 0);
4886                 block_rsv->reserved = block_rsv->size;
4887                 block_rsv->full = 1;
4888         }
4889
4890         spin_unlock(&block_rsv->lock);
4891         spin_unlock(&sinfo->lock);
4892 }
4893
4894 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4895 {
4896         struct btrfs_space_info *space_info;
4897
4898         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4899         fs_info->chunk_block_rsv.space_info = space_info;
4900
4901         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4902         fs_info->global_block_rsv.space_info = space_info;
4903         fs_info->delalloc_block_rsv.space_info = space_info;
4904         fs_info->trans_block_rsv.space_info = space_info;
4905         fs_info->empty_block_rsv.space_info = space_info;
4906         fs_info->delayed_block_rsv.space_info = space_info;
4907
4908         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4909         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4910         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4911         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4912         if (fs_info->quota_root)
4913                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4914         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4915
4916         update_global_block_rsv(fs_info);
4917 }
4918
4919 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4920 {
4921         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4922                                 (u64)-1);
4923         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4924         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4925         WARN_ON(fs_info->trans_block_rsv.size > 0);
4926         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4927         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4928         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4929         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4930         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4931 }
4932
4933 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4934                                   struct btrfs_root *root)
4935 {
4936         if (!trans->block_rsv)
4937                 return;
4938
4939         if (!trans->bytes_reserved)
4940                 return;
4941
4942         trace_btrfs_space_reservation(root->fs_info, "transaction",
4943                                       trans->transid, trans->bytes_reserved, 0);
4944         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4945         trans->bytes_reserved = 0;
4946 }
4947
4948 /* Can only return 0 or -ENOSPC */
4949 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4950                                   struct inode *inode)
4951 {
4952         struct btrfs_root *root = BTRFS_I(inode)->root;
4953         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4954         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4955
4956         /*
4957          * We need to hold space in order to delete our orphan item once we've
4958          * added it, so this takes the reservation so we can release it later
4959          * when we are truly done with the orphan item.
4960          */
4961         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4962         trace_btrfs_space_reservation(root->fs_info, "orphan",
4963                                       btrfs_ino(inode), num_bytes, 1);
4964         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4965 }
4966
4967 void btrfs_orphan_release_metadata(struct inode *inode)
4968 {
4969         struct btrfs_root *root = BTRFS_I(inode)->root;
4970         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4971         trace_btrfs_space_reservation(root->fs_info, "orphan",
4972                                       btrfs_ino(inode), num_bytes, 0);
4973         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4974 }
4975
4976 /*
4977  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4978  * root: the root of the parent directory
4979  * rsv: block reservation
4980  * items: the number of items that we need do reservation
4981  * qgroup_reserved: used to return the reserved size in qgroup
4982  *
4983  * This function is used to reserve the space for snapshot/subvolume
4984  * creation and deletion. Those operations are different with the
4985  * common file/directory operations, they change two fs/file trees
4986  * and root tree, the number of items that the qgroup reserves is
4987  * different with the free space reservation. So we can not use
4988  * the space reseravtion mechanism in start_transaction().
4989  */
4990 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4991                                      struct btrfs_block_rsv *rsv,
4992                                      int items,
4993                                      u64 *qgroup_reserved,
4994                                      bool use_global_rsv)
4995 {
4996         u64 num_bytes;
4997         int ret;
4998         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4999
5000         if (root->fs_info->quota_enabled) {
5001                 /* One for parent inode, two for dir entries */
5002                 num_bytes = 3 * root->nodesize;
5003                 ret = btrfs_qgroup_reserve(root, num_bytes);
5004                 if (ret)
5005                         return ret;
5006         } else {
5007                 num_bytes = 0;
5008         }
5009
5010         *qgroup_reserved = num_bytes;
5011
5012         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5013         rsv->space_info = __find_space_info(root->fs_info,
5014                                             BTRFS_BLOCK_GROUP_METADATA);
5015         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5016                                   BTRFS_RESERVE_FLUSH_ALL);
5017
5018         if (ret == -ENOSPC && use_global_rsv)
5019                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5020
5021         if (ret) {
5022                 if (*qgroup_reserved)
5023                         btrfs_qgroup_free(root, *qgroup_reserved);
5024         }
5025
5026         return ret;
5027 }
5028
5029 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5030                                       struct btrfs_block_rsv *rsv,
5031                                       u64 qgroup_reserved)
5032 {
5033         btrfs_block_rsv_release(root, rsv, (u64)-1);
5034         if (qgroup_reserved)
5035                 btrfs_qgroup_free(root, qgroup_reserved);
5036 }
5037
5038 /**
5039  * drop_outstanding_extent - drop an outstanding extent
5040  * @inode: the inode we're dropping the extent for
5041  *
5042  * This is called when we are freeing up an outstanding extent, either called
5043  * after an error or after an extent is written.  This will return the number of
5044  * reserved extents that need to be freed.  This must be called with
5045  * BTRFS_I(inode)->lock held.
5046  */
5047 static unsigned drop_outstanding_extent(struct inode *inode)
5048 {
5049         unsigned drop_inode_space = 0;
5050         unsigned dropped_extents = 0;
5051
5052         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
5053         BTRFS_I(inode)->outstanding_extents--;
5054
5055         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5056             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5057                                &BTRFS_I(inode)->runtime_flags))
5058                 drop_inode_space = 1;
5059
5060         /*
5061          * If we have more or the same amount of outsanding extents than we have
5062          * reserved then we need to leave the reserved extents count alone.
5063          */
5064         if (BTRFS_I(inode)->outstanding_extents >=
5065             BTRFS_I(inode)->reserved_extents)
5066                 return drop_inode_space;
5067
5068         dropped_extents = BTRFS_I(inode)->reserved_extents -
5069                 BTRFS_I(inode)->outstanding_extents;
5070         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5071         return dropped_extents + drop_inode_space;
5072 }
5073
5074 /**
5075  * calc_csum_metadata_size - return the amount of metada space that must be
5076  *      reserved/free'd for the given bytes.
5077  * @inode: the inode we're manipulating
5078  * @num_bytes: the number of bytes in question
5079  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5080  *
5081  * This adjusts the number of csum_bytes in the inode and then returns the
5082  * correct amount of metadata that must either be reserved or freed.  We
5083  * calculate how many checksums we can fit into one leaf and then divide the
5084  * number of bytes that will need to be checksumed by this value to figure out
5085  * how many checksums will be required.  If we are adding bytes then the number
5086  * may go up and we will return the number of additional bytes that must be
5087  * reserved.  If it is going down we will return the number of bytes that must
5088  * be freed.
5089  *
5090  * This must be called with BTRFS_I(inode)->lock held.
5091  */
5092 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5093                                    int reserve)
5094 {
5095         struct btrfs_root *root = BTRFS_I(inode)->root;
5096         u64 csum_size;
5097         int num_csums_per_leaf;
5098         int num_csums;
5099         int old_csums;
5100
5101         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5102             BTRFS_I(inode)->csum_bytes == 0)
5103                 return 0;
5104
5105         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5106         if (reserve)
5107                 BTRFS_I(inode)->csum_bytes += num_bytes;
5108         else
5109                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5110         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
5111         num_csums_per_leaf = (int)div64_u64(csum_size,
5112                                             sizeof(struct btrfs_csum_item) +
5113                                             sizeof(struct btrfs_disk_key));
5114         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
5115         num_csums = num_csums + num_csums_per_leaf - 1;
5116         num_csums = num_csums / num_csums_per_leaf;
5117
5118         old_csums = old_csums + num_csums_per_leaf - 1;
5119         old_csums = old_csums / num_csums_per_leaf;
5120
5121         /* No change, no need to reserve more */
5122         if (old_csums == num_csums)
5123                 return 0;
5124
5125         if (reserve)
5126                 return btrfs_calc_trans_metadata_size(root,
5127                                                       num_csums - old_csums);
5128
5129         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5130 }
5131
5132 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5133 {
5134         struct btrfs_root *root = BTRFS_I(inode)->root;
5135         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5136         u64 to_reserve = 0;
5137         u64 csum_bytes;
5138         unsigned nr_extents = 0;
5139         int extra_reserve = 0;
5140         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5141         int ret = 0;
5142         bool delalloc_lock = true;
5143         u64 to_free = 0;
5144         unsigned dropped;
5145
5146         /* If we are a free space inode we need to not flush since we will be in
5147          * the middle of a transaction commit.  We also don't need the delalloc
5148          * mutex since we won't race with anybody.  We need this mostly to make
5149          * lockdep shut its filthy mouth.
5150          */
5151         if (btrfs_is_free_space_inode(inode)) {
5152                 flush = BTRFS_RESERVE_NO_FLUSH;
5153                 delalloc_lock = false;
5154         }
5155
5156         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5157             btrfs_transaction_in_commit(root->fs_info))
5158                 schedule_timeout(1);
5159
5160         if (delalloc_lock)
5161                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5162
5163         num_bytes = ALIGN(num_bytes, root->sectorsize);
5164
5165         spin_lock(&BTRFS_I(inode)->lock);
5166         BTRFS_I(inode)->outstanding_extents++;
5167
5168         if (BTRFS_I(inode)->outstanding_extents >
5169             BTRFS_I(inode)->reserved_extents)
5170                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5171                         BTRFS_I(inode)->reserved_extents;
5172
5173         /*
5174          * Add an item to reserve for updating the inode when we complete the
5175          * delalloc io.
5176          */
5177         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5178                       &BTRFS_I(inode)->runtime_flags)) {
5179                 nr_extents++;
5180                 extra_reserve = 1;
5181         }
5182
5183         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5184         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5185         csum_bytes = BTRFS_I(inode)->csum_bytes;
5186         spin_unlock(&BTRFS_I(inode)->lock);
5187
5188         if (root->fs_info->quota_enabled) {
5189                 ret = btrfs_qgroup_reserve(root, num_bytes +
5190                                            nr_extents * root->nodesize);
5191                 if (ret)
5192                         goto out_fail;
5193         }
5194
5195         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5196         if (unlikely(ret)) {
5197                 if (root->fs_info->quota_enabled)
5198                         btrfs_qgroup_free(root, num_bytes +
5199                                                 nr_extents * root->nodesize);
5200                 goto out_fail;
5201         }
5202
5203         spin_lock(&BTRFS_I(inode)->lock);
5204         if (extra_reserve) {
5205                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5206                         &BTRFS_I(inode)->runtime_flags);
5207                 nr_extents--;
5208         }
5209         BTRFS_I(inode)->reserved_extents += nr_extents;
5210         spin_unlock(&BTRFS_I(inode)->lock);
5211
5212         if (delalloc_lock)
5213                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5214
5215         if (to_reserve)
5216                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5217                                               btrfs_ino(inode), to_reserve, 1);
5218         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5219
5220         return 0;
5221
5222 out_fail:
5223         spin_lock(&BTRFS_I(inode)->lock);
5224         dropped = drop_outstanding_extent(inode);
5225         /*
5226          * If the inodes csum_bytes is the same as the original
5227          * csum_bytes then we know we haven't raced with any free()ers
5228          * so we can just reduce our inodes csum bytes and carry on.
5229          */
5230         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5231                 calc_csum_metadata_size(inode, num_bytes, 0);
5232         } else {
5233                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5234                 u64 bytes;
5235
5236                 /*
5237                  * This is tricky, but first we need to figure out how much we
5238                  * free'd from any free-ers that occured during this
5239                  * reservation, so we reset ->csum_bytes to the csum_bytes
5240                  * before we dropped our lock, and then call the free for the
5241                  * number of bytes that were freed while we were trying our
5242                  * reservation.
5243                  */
5244                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5245                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5246                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5247
5248
5249                 /*
5250                  * Now we need to see how much we would have freed had we not
5251                  * been making this reservation and our ->csum_bytes were not
5252                  * artificially inflated.
5253                  */
5254                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5255                 bytes = csum_bytes - orig_csum_bytes;
5256                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5257
5258                 /*
5259                  * Now reset ->csum_bytes to what it should be.  If bytes is
5260                  * more than to_free then we would have free'd more space had we
5261                  * not had an artificially high ->csum_bytes, so we need to free
5262                  * the remainder.  If bytes is the same or less then we don't
5263                  * need to do anything, the other free-ers did the correct
5264                  * thing.
5265                  */
5266                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5267                 if (bytes > to_free)
5268                         to_free = bytes - to_free;
5269                 else
5270                         to_free = 0;
5271         }
5272         spin_unlock(&BTRFS_I(inode)->lock);
5273         if (dropped)
5274                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5275
5276         if (to_free) {
5277                 btrfs_block_rsv_release(root, block_rsv, to_free);
5278                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5279                                               btrfs_ino(inode), to_free, 0);
5280         }
5281         if (delalloc_lock)
5282                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5283         return ret;
5284 }
5285
5286 /**
5287  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5288  * @inode: the inode to release the reservation for
5289  * @num_bytes: the number of bytes we're releasing
5290  *
5291  * This will release the metadata reservation for an inode.  This can be called
5292  * once we complete IO for a given set of bytes to release their metadata
5293  * reservations.
5294  */
5295 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5296 {
5297         struct btrfs_root *root = BTRFS_I(inode)->root;
5298         u64 to_free = 0;
5299         unsigned dropped;
5300
5301         num_bytes = ALIGN(num_bytes, root->sectorsize);
5302         spin_lock(&BTRFS_I(inode)->lock);
5303         dropped = drop_outstanding_extent(inode);
5304
5305         if (num_bytes)
5306                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5307         spin_unlock(&BTRFS_I(inode)->lock);
5308         if (dropped > 0)
5309                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5310
5311         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5312                                       btrfs_ino(inode), to_free, 0);
5313         if (root->fs_info->quota_enabled) {
5314                 btrfs_qgroup_free(root, num_bytes +
5315                                         dropped * root->nodesize);
5316         }
5317
5318         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5319                                 to_free);
5320 }
5321
5322 /**
5323  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5324  * @inode: inode we're writing to
5325  * @num_bytes: the number of bytes we want to allocate
5326  *
5327  * This will do the following things
5328  *
5329  * o reserve space in the data space info for num_bytes
5330  * o reserve space in the metadata space info based on number of outstanding
5331  *   extents and how much csums will be needed
5332  * o add to the inodes ->delalloc_bytes
5333  * o add it to the fs_info's delalloc inodes list.
5334  *
5335  * This will return 0 for success and -ENOSPC if there is no space left.
5336  */
5337 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5338 {
5339         int ret;
5340
5341         ret = btrfs_check_data_free_space(inode, num_bytes);
5342         if (ret)
5343                 return ret;
5344
5345         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5346         if (ret) {
5347                 btrfs_free_reserved_data_space(inode, num_bytes);
5348                 return ret;
5349         }
5350
5351         return 0;
5352 }
5353
5354 /**
5355  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5356  * @inode: inode we're releasing space for
5357  * @num_bytes: the number of bytes we want to free up
5358  *
5359  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5360  * called in the case that we don't need the metadata AND data reservations
5361  * anymore.  So if there is an error or we insert an inline extent.
5362  *
5363  * This function will release the metadata space that was not used and will
5364  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5365  * list if there are no delalloc bytes left.
5366  */
5367 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5368 {
5369         btrfs_delalloc_release_metadata(inode, num_bytes);
5370         btrfs_free_reserved_data_space(inode, num_bytes);
5371 }
5372
5373 static int update_block_group(struct btrfs_root *root,
5374                               u64 bytenr, u64 num_bytes, int alloc)
5375 {
5376         struct btrfs_block_group_cache *cache = NULL;
5377         struct btrfs_fs_info *info = root->fs_info;
5378         u64 total = num_bytes;
5379         u64 old_val;
5380         u64 byte_in_group;
5381         int factor;
5382
5383         /* block accounting for super block */
5384         spin_lock(&info->delalloc_root_lock);
5385         old_val = btrfs_super_bytes_used(info->super_copy);
5386         if (alloc)
5387                 old_val += num_bytes;
5388         else
5389                 old_val -= num_bytes;
5390         btrfs_set_super_bytes_used(info->super_copy, old_val);
5391         spin_unlock(&info->delalloc_root_lock);
5392
5393         while (total) {
5394                 cache = btrfs_lookup_block_group(info, bytenr);
5395                 if (!cache)
5396                         return -ENOENT;
5397                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5398                                     BTRFS_BLOCK_GROUP_RAID1 |
5399                                     BTRFS_BLOCK_GROUP_RAID10))
5400                         factor = 2;
5401                 else
5402                         factor = 1;
5403                 /*
5404                  * If this block group has free space cache written out, we
5405                  * need to make sure to load it if we are removing space.  This
5406                  * is because we need the unpinning stage to actually add the
5407                  * space back to the block group, otherwise we will leak space.
5408                  */
5409                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5410                         cache_block_group(cache, 1);
5411
5412                 byte_in_group = bytenr - cache->key.objectid;
5413                 WARN_ON(byte_in_group > cache->key.offset);
5414
5415                 spin_lock(&cache->space_info->lock);
5416                 spin_lock(&cache->lock);
5417
5418                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5419                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5420                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5421
5422                 cache->dirty = 1;
5423                 old_val = btrfs_block_group_used(&cache->item);
5424                 num_bytes = min(total, cache->key.offset - byte_in_group);
5425                 if (alloc) {
5426                         old_val += num_bytes;
5427                         btrfs_set_block_group_used(&cache->item, old_val);
5428                         cache->reserved -= num_bytes;
5429                         cache->space_info->bytes_reserved -= num_bytes;
5430                         cache->space_info->bytes_used += num_bytes;
5431                         cache->space_info->disk_used += num_bytes * factor;
5432                         spin_unlock(&cache->lock);
5433                         spin_unlock(&cache->space_info->lock);
5434                 } else {
5435                         old_val -= num_bytes;
5436
5437                         /*
5438                          * No longer have used bytes in this block group, queue
5439                          * it for deletion.
5440                          */
5441                         if (old_val == 0) {
5442                                 spin_lock(&info->unused_bgs_lock);
5443                                 if (list_empty(&cache->bg_list)) {
5444                                         btrfs_get_block_group(cache);
5445                                         list_add_tail(&cache->bg_list,
5446                                                       &info->unused_bgs);
5447                                 }
5448                                 spin_unlock(&info->unused_bgs_lock);
5449                         }
5450                         btrfs_set_block_group_used(&cache->item, old_val);
5451                         cache->pinned += num_bytes;
5452                         cache->space_info->bytes_pinned += num_bytes;
5453                         cache->space_info->bytes_used -= num_bytes;
5454                         cache->space_info->disk_used -= num_bytes * factor;
5455                         spin_unlock(&cache->lock);
5456                         spin_unlock(&cache->space_info->lock);
5457
5458                         set_extent_dirty(info->pinned_extents,
5459                                          bytenr, bytenr + num_bytes - 1,
5460                                          GFP_NOFS | __GFP_NOFAIL);
5461                 }
5462                 btrfs_put_block_group(cache);
5463                 total -= num_bytes;
5464                 bytenr += num_bytes;
5465         }
5466         return 0;
5467 }
5468
5469 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5470 {
5471         struct btrfs_block_group_cache *cache;
5472         u64 bytenr;
5473
5474         spin_lock(&root->fs_info->block_group_cache_lock);
5475         bytenr = root->fs_info->first_logical_byte;
5476         spin_unlock(&root->fs_info->block_group_cache_lock);
5477
5478         if (bytenr < (u64)-1)
5479                 return bytenr;
5480
5481         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5482         if (!cache)
5483                 return 0;
5484
5485         bytenr = cache->key.objectid;
5486         btrfs_put_block_group(cache);
5487
5488         return bytenr;
5489 }
5490
5491 static int pin_down_extent(struct btrfs_root *root,
5492                            struct btrfs_block_group_cache *cache,
5493                            u64 bytenr, u64 num_bytes, int reserved)
5494 {
5495         spin_lock(&cache->space_info->lock);
5496         spin_lock(&cache->lock);
5497         cache->pinned += num_bytes;
5498         cache->space_info->bytes_pinned += num_bytes;
5499         if (reserved) {
5500                 cache->reserved -= num_bytes;
5501                 cache->space_info->bytes_reserved -= num_bytes;
5502         }
5503         spin_unlock(&cache->lock);
5504         spin_unlock(&cache->space_info->lock);
5505
5506         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5507                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5508         if (reserved)
5509                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5510         return 0;
5511 }
5512
5513 /*
5514  * this function must be called within transaction
5515  */
5516 int btrfs_pin_extent(struct btrfs_root *root,
5517                      u64 bytenr, u64 num_bytes, int reserved)
5518 {
5519         struct btrfs_block_group_cache *cache;
5520
5521         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5522         BUG_ON(!cache); /* Logic error */
5523
5524         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5525
5526         btrfs_put_block_group(cache);
5527         return 0;
5528 }
5529
5530 /*
5531  * this function must be called within transaction
5532  */
5533 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5534                                     u64 bytenr, u64 num_bytes)
5535 {
5536         struct btrfs_block_group_cache *cache;
5537         int ret;
5538
5539         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5540         if (!cache)
5541                 return -EINVAL;
5542
5543         /*
5544          * pull in the free space cache (if any) so that our pin
5545          * removes the free space from the cache.  We have load_only set
5546          * to one because the slow code to read in the free extents does check
5547          * the pinned extents.
5548          */
5549         cache_block_group(cache, 1);
5550
5551         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5552
5553         /* remove us from the free space cache (if we're there at all) */
5554         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5555         btrfs_put_block_group(cache);
5556         return ret;
5557 }
5558
5559 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5560 {
5561         int ret;
5562         struct btrfs_block_group_cache *block_group;
5563         struct btrfs_caching_control *caching_ctl;
5564
5565         block_group = btrfs_lookup_block_group(root->fs_info, start);
5566         if (!block_group)
5567                 return -EINVAL;
5568
5569         cache_block_group(block_group, 0);
5570         caching_ctl = get_caching_control(block_group);
5571
5572         if (!caching_ctl) {
5573                 /* Logic error */
5574                 BUG_ON(!block_group_cache_done(block_group));
5575                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5576         } else {
5577                 mutex_lock(&caching_ctl->mutex);
5578
5579                 if (start >= caching_ctl->progress) {
5580                         ret = add_excluded_extent(root, start, num_bytes);
5581                 } else if (start + num_bytes <= caching_ctl->progress) {
5582                         ret = btrfs_remove_free_space(block_group,
5583                                                       start, num_bytes);
5584                 } else {
5585                         num_bytes = caching_ctl->progress - start;
5586                         ret = btrfs_remove_free_space(block_group,
5587                                                       start, num_bytes);
5588                         if (ret)
5589                                 goto out_lock;
5590
5591                         num_bytes = (start + num_bytes) -
5592                                 caching_ctl->progress;
5593                         start = caching_ctl->progress;
5594                         ret = add_excluded_extent(root, start, num_bytes);
5595                 }
5596 out_lock:
5597                 mutex_unlock(&caching_ctl->mutex);
5598                 put_caching_control(caching_ctl);
5599         }
5600         btrfs_put_block_group(block_group);
5601         return ret;
5602 }
5603
5604 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5605                                  struct extent_buffer *eb)
5606 {
5607         struct btrfs_file_extent_item *item;
5608         struct btrfs_key key;
5609         int found_type;
5610         int i;
5611
5612         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5613                 return 0;
5614
5615         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5616                 btrfs_item_key_to_cpu(eb, &key, i);
5617                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5618                         continue;
5619                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5620                 found_type = btrfs_file_extent_type(eb, item);
5621                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5622                         continue;
5623                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5624                         continue;
5625                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5626                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5627                 __exclude_logged_extent(log, key.objectid, key.offset);
5628         }
5629
5630         return 0;
5631 }
5632
5633 /**
5634  * btrfs_update_reserved_bytes - update the block_group and space info counters
5635  * @cache:      The cache we are manipulating
5636  * @num_bytes:  The number of bytes in question
5637  * @reserve:    One of the reservation enums
5638  * @delalloc:   The blocks are allocated for the delalloc write
5639  *
5640  * This is called by the allocator when it reserves space, or by somebody who is
5641  * freeing space that was never actually used on disk.  For example if you
5642  * reserve some space for a new leaf in transaction A and before transaction A
5643  * commits you free that leaf, you call this with reserve set to 0 in order to
5644  * clear the reservation.
5645  *
5646  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5647  * ENOSPC accounting.  For data we handle the reservation through clearing the
5648  * delalloc bits in the io_tree.  We have to do this since we could end up
5649  * allocating less disk space for the amount of data we have reserved in the
5650  * case of compression.
5651  *
5652  * If this is a reservation and the block group has become read only we cannot
5653  * make the reservation and return -EAGAIN, otherwise this function always
5654  * succeeds.
5655  */
5656 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5657                                        u64 num_bytes, int reserve, int delalloc)
5658 {
5659         struct btrfs_space_info *space_info = cache->space_info;
5660         int ret = 0;
5661
5662         spin_lock(&space_info->lock);
5663         spin_lock(&cache->lock);
5664         if (reserve != RESERVE_FREE) {
5665                 if (cache->ro) {
5666                         ret = -EAGAIN;
5667                 } else {
5668                         cache->reserved += num_bytes;
5669                         space_info->bytes_reserved += num_bytes;
5670                         if (reserve == RESERVE_ALLOC) {
5671                                 trace_btrfs_space_reservation(cache->fs_info,
5672                                                 "space_info", space_info->flags,
5673                                                 num_bytes, 0);
5674                                 space_info->bytes_may_use -= num_bytes;
5675                         }
5676
5677                         if (delalloc)
5678                                 cache->delalloc_bytes += num_bytes;
5679                 }
5680         } else {
5681                 if (cache->ro)
5682                         space_info->bytes_readonly += num_bytes;
5683                 cache->reserved -= num_bytes;
5684                 space_info->bytes_reserved -= num_bytes;
5685
5686                 if (delalloc)
5687                         cache->delalloc_bytes -= num_bytes;
5688         }
5689         spin_unlock(&cache->lock);
5690         spin_unlock(&space_info->lock);
5691         return ret;
5692 }
5693
5694 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5695                                 struct btrfs_root *root)
5696 {
5697         struct btrfs_fs_info *fs_info = root->fs_info;
5698         struct btrfs_caching_control *next;
5699         struct btrfs_caching_control *caching_ctl;
5700         struct btrfs_block_group_cache *cache;
5701
5702         down_write(&fs_info->commit_root_sem);
5703
5704         list_for_each_entry_safe(caching_ctl, next,
5705                                  &fs_info->caching_block_groups, list) {
5706                 cache = caching_ctl->block_group;
5707                 if (block_group_cache_done(cache)) {
5708                         cache->last_byte_to_unpin = (u64)-1;
5709                         list_del_init(&caching_ctl->list);
5710                         put_caching_control(caching_ctl);
5711                 } else {
5712                         cache->last_byte_to_unpin = caching_ctl->progress;
5713                 }
5714         }
5715
5716         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5717                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5718         else
5719                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5720
5721         up_write(&fs_info->commit_root_sem);
5722
5723         update_global_block_rsv(fs_info);
5724 }
5725
5726 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5727 {
5728         struct btrfs_fs_info *fs_info = root->fs_info;
5729         struct btrfs_block_group_cache *cache = NULL;
5730         struct btrfs_space_info *space_info;
5731         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5732         u64 len;
5733         bool readonly;
5734
5735         while (start <= end) {
5736                 readonly = false;
5737                 if (!cache ||
5738                     start >= cache->key.objectid + cache->key.offset) {
5739                         if (cache)
5740                                 btrfs_put_block_group(cache);
5741                         cache = btrfs_lookup_block_group(fs_info, start);
5742                         BUG_ON(!cache); /* Logic error */
5743                 }
5744
5745                 len = cache->key.objectid + cache->key.offset - start;
5746                 len = min(len, end + 1 - start);
5747
5748                 if (start < cache->last_byte_to_unpin) {
5749                         len = min(len, cache->last_byte_to_unpin - start);
5750                         btrfs_add_free_space(cache, start, len);
5751                 }
5752
5753                 start += len;
5754                 space_info = cache->space_info;
5755
5756                 spin_lock(&space_info->lock);
5757                 spin_lock(&cache->lock);
5758                 cache->pinned -= len;
5759                 space_info->bytes_pinned -= len;
5760                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
5761                 if (cache->ro) {
5762                         space_info->bytes_readonly += len;
5763                         readonly = true;
5764                 }
5765                 spin_unlock(&cache->lock);
5766                 if (!readonly && global_rsv->space_info == space_info) {
5767                         spin_lock(&global_rsv->lock);
5768                         if (!global_rsv->full) {
5769                                 len = min(len, global_rsv->size -
5770                                           global_rsv->reserved);
5771                                 global_rsv->reserved += len;
5772                                 space_info->bytes_may_use += len;
5773                                 if (global_rsv->reserved >= global_rsv->size)
5774                                         global_rsv->full = 1;
5775                         }
5776                         spin_unlock(&global_rsv->lock);
5777                 }
5778                 spin_unlock(&space_info->lock);
5779         }
5780
5781         if (cache)
5782                 btrfs_put_block_group(cache);
5783         return 0;
5784 }
5785
5786 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5787                                struct btrfs_root *root)
5788 {
5789         struct btrfs_fs_info *fs_info = root->fs_info;
5790         struct extent_io_tree *unpin;
5791         u64 start;
5792         u64 end;
5793         int ret;
5794
5795         if (trans->aborted)
5796                 return 0;
5797
5798         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5799                 unpin = &fs_info->freed_extents[1];
5800         else
5801                 unpin = &fs_info->freed_extents[0];
5802
5803         while (1) {
5804                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5805                                             EXTENT_DIRTY, NULL);
5806                 if (ret)
5807                         break;
5808
5809                 if (btrfs_test_opt(root, DISCARD))
5810                         ret = btrfs_discard_extent(root, start,
5811                                                    end + 1 - start, NULL);
5812
5813                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5814                 unpin_extent_range(root, start, end);
5815                 cond_resched();
5816         }
5817
5818         return 0;
5819 }
5820
5821 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5822                              u64 owner, u64 root_objectid)
5823 {
5824         struct btrfs_space_info *space_info;
5825         u64 flags;
5826
5827         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5828                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5829                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5830                 else
5831                         flags = BTRFS_BLOCK_GROUP_METADATA;
5832         } else {
5833                 flags = BTRFS_BLOCK_GROUP_DATA;
5834         }
5835
5836         space_info = __find_space_info(fs_info, flags);
5837         BUG_ON(!space_info); /* Logic bug */
5838         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5839 }
5840
5841
5842 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5843                                 struct btrfs_root *root,
5844                                 u64 bytenr, u64 num_bytes, u64 parent,
5845                                 u64 root_objectid, u64 owner_objectid,
5846                                 u64 owner_offset, int refs_to_drop,
5847                                 struct btrfs_delayed_extent_op *extent_op,
5848                                 int no_quota)
5849 {
5850         struct btrfs_key key;
5851         struct btrfs_path *path;
5852         struct btrfs_fs_info *info = root->fs_info;
5853         struct btrfs_root *extent_root = info->extent_root;
5854         struct extent_buffer *leaf;
5855         struct btrfs_extent_item *ei;
5856         struct btrfs_extent_inline_ref *iref;
5857         int ret;
5858         int is_data;
5859         int extent_slot = 0;
5860         int found_extent = 0;
5861         int num_to_del = 1;
5862         u32 item_size;
5863         u64 refs;
5864         int last_ref = 0;
5865         enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
5866         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5867                                                  SKINNY_METADATA);
5868
5869         if (!info->quota_enabled || !is_fstree(root_objectid))
5870                 no_quota = 1;
5871
5872         path = btrfs_alloc_path();
5873         if (!path)
5874                 return -ENOMEM;
5875
5876         path->reada = 1;
5877         path->leave_spinning = 1;
5878
5879         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5880         BUG_ON(!is_data && refs_to_drop != 1);
5881
5882         if (is_data)
5883                 skinny_metadata = 0;
5884
5885         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5886                                     bytenr, num_bytes, parent,
5887                                     root_objectid, owner_objectid,
5888                                     owner_offset);
5889         if (ret == 0) {
5890                 extent_slot = path->slots[0];
5891                 while (extent_slot >= 0) {
5892                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5893                                               extent_slot);
5894                         if (key.objectid != bytenr)
5895                                 break;
5896                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5897                             key.offset == num_bytes) {
5898                                 found_extent = 1;
5899                                 break;
5900                         }
5901                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5902                             key.offset == owner_objectid) {
5903                                 found_extent = 1;
5904                                 break;
5905                         }
5906                         if (path->slots[0] - extent_slot > 5)
5907                                 break;
5908                         extent_slot--;
5909                 }
5910 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5911                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5912                 if (found_extent && item_size < sizeof(*ei))
5913                         found_extent = 0;
5914 #endif
5915                 if (!found_extent) {
5916                         BUG_ON(iref);
5917                         ret = remove_extent_backref(trans, extent_root, path,
5918                                                     NULL, refs_to_drop,
5919                                                     is_data, &last_ref);
5920                         if (ret) {
5921                                 btrfs_abort_transaction(trans, extent_root, ret);
5922                                 goto out;
5923                         }
5924                         btrfs_release_path(path);
5925                         path->leave_spinning = 1;
5926
5927                         key.objectid = bytenr;
5928                         key.type = BTRFS_EXTENT_ITEM_KEY;
5929                         key.offset = num_bytes;
5930
5931                         if (!is_data && skinny_metadata) {
5932                                 key.type = BTRFS_METADATA_ITEM_KEY;
5933                                 key.offset = owner_objectid;
5934                         }
5935
5936                         ret = btrfs_search_slot(trans, extent_root,
5937                                                 &key, path, -1, 1);
5938                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5939                                 /*
5940                                  * Couldn't find our skinny metadata item,
5941                                  * see if we have ye olde extent item.
5942                                  */
5943                                 path->slots[0]--;
5944                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5945                                                       path->slots[0]);
5946                                 if (key.objectid == bytenr &&
5947                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5948                                     key.offset == num_bytes)
5949                                         ret = 0;
5950                         }
5951
5952                         if (ret > 0 && skinny_metadata) {
5953                                 skinny_metadata = false;
5954                                 key.objectid = bytenr;
5955                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5956                                 key.offset = num_bytes;
5957                                 btrfs_release_path(path);
5958                                 ret = btrfs_search_slot(trans, extent_root,
5959                                                         &key, path, -1, 1);
5960                         }
5961
5962                         if (ret) {
5963                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5964                                         ret, bytenr);
5965                                 if (ret > 0)
5966                                         btrfs_print_leaf(extent_root,
5967                                                          path->nodes[0]);
5968                         }
5969                         if (ret < 0) {
5970                                 btrfs_abort_transaction(trans, extent_root, ret);
5971                                 goto out;
5972                         }
5973                         extent_slot = path->slots[0];
5974                 }
5975         } else if (WARN_ON(ret == -ENOENT)) {
5976                 btrfs_print_leaf(extent_root, path->nodes[0]);
5977                 btrfs_err(info,
5978                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5979                         bytenr, parent, root_objectid, owner_objectid,
5980                         owner_offset);
5981                 btrfs_abort_transaction(trans, extent_root, ret);
5982                 goto out;
5983         } else {
5984                 btrfs_abort_transaction(trans, extent_root, ret);
5985                 goto out;
5986         }
5987
5988         leaf = path->nodes[0];
5989         item_size = btrfs_item_size_nr(leaf, extent_slot);
5990 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5991         if (item_size < sizeof(*ei)) {
5992                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5993                 ret = convert_extent_item_v0(trans, extent_root, path,
5994                                              owner_objectid, 0);
5995                 if (ret < 0) {
5996                         btrfs_abort_transaction(trans, extent_root, ret);
5997                         goto out;
5998                 }
5999
6000                 btrfs_release_path(path);
6001                 path->leave_spinning = 1;
6002
6003                 key.objectid = bytenr;
6004                 key.type = BTRFS_EXTENT_ITEM_KEY;
6005                 key.offset = num_bytes;
6006
6007                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6008                                         -1, 1);
6009                 if (ret) {
6010                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6011                                 ret, bytenr);
6012                         btrfs_print_leaf(extent_root, path->nodes[0]);
6013                 }
6014                 if (ret < 0) {
6015                         btrfs_abort_transaction(trans, extent_root, ret);
6016                         goto out;
6017                 }
6018
6019                 extent_slot = path->slots[0];
6020                 leaf = path->nodes[0];
6021                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6022         }
6023 #endif
6024         BUG_ON(item_size < sizeof(*ei));
6025         ei = btrfs_item_ptr(leaf, extent_slot,
6026                             struct btrfs_extent_item);
6027         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6028             key.type == BTRFS_EXTENT_ITEM_KEY) {
6029                 struct btrfs_tree_block_info *bi;
6030                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6031                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6032                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6033         }
6034
6035         refs = btrfs_extent_refs(leaf, ei);
6036         if (refs < refs_to_drop) {
6037                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6038                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6039                 ret = -EINVAL;
6040                 btrfs_abort_transaction(trans, extent_root, ret);
6041                 goto out;
6042         }
6043         refs -= refs_to_drop;
6044
6045         if (refs > 0) {
6046                 type = BTRFS_QGROUP_OPER_SUB_SHARED;
6047                 if (extent_op)
6048                         __run_delayed_extent_op(extent_op, leaf, ei);
6049                 /*
6050                  * In the case of inline back ref, reference count will
6051                  * be updated by remove_extent_backref
6052                  */
6053                 if (iref) {
6054                         BUG_ON(!found_extent);
6055                 } else {
6056                         btrfs_set_extent_refs(leaf, ei, refs);
6057                         btrfs_mark_buffer_dirty(leaf);
6058                 }
6059                 if (found_extent) {
6060                         ret = remove_extent_backref(trans, extent_root, path,
6061                                                     iref, refs_to_drop,
6062                                                     is_data, &last_ref);
6063                         if (ret) {
6064                                 btrfs_abort_transaction(trans, extent_root, ret);
6065                                 goto out;
6066                         }
6067                 }
6068                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6069                                  root_objectid);
6070         } else {
6071                 if (found_extent) {
6072                         BUG_ON(is_data && refs_to_drop !=
6073                                extent_data_ref_count(root, path, iref));
6074                         if (iref) {
6075                                 BUG_ON(path->slots[0] != extent_slot);
6076                         } else {
6077                                 BUG_ON(path->slots[0] != extent_slot + 1);
6078                                 path->slots[0] = extent_slot;
6079                                 num_to_del = 2;
6080                         }
6081                 }
6082
6083                 last_ref = 1;
6084                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6085                                       num_to_del);
6086                 if (ret) {
6087                         btrfs_abort_transaction(trans, extent_root, ret);
6088                         goto out;
6089                 }
6090                 btrfs_release_path(path);
6091
6092                 if (is_data) {
6093                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6094                         if (ret) {
6095                                 btrfs_abort_transaction(trans, extent_root, ret);
6096                                 goto out;
6097                         }
6098                 }
6099
6100                 ret = update_block_group(root, bytenr, num_bytes, 0);
6101                 if (ret) {
6102                         btrfs_abort_transaction(trans, extent_root, ret);
6103                         goto out;
6104                 }
6105         }
6106         btrfs_release_path(path);
6107
6108         /* Deal with the quota accounting */
6109         if (!ret && last_ref && !no_quota) {
6110                 int mod_seq = 0;
6111
6112                 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
6113                     type == BTRFS_QGROUP_OPER_SUB_SHARED)
6114                         mod_seq = 1;
6115
6116                 ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
6117                                               bytenr, num_bytes, type,
6118                                               mod_seq);
6119         }
6120 out:
6121         btrfs_free_path(path);
6122         return ret;
6123 }
6124
6125 /*
6126  * when we free an block, it is possible (and likely) that we free the last
6127  * delayed ref for that extent as well.  This searches the delayed ref tree for
6128  * a given extent, and if there are no other delayed refs to be processed, it
6129  * removes it from the tree.
6130  */
6131 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6132                                       struct btrfs_root *root, u64 bytenr)
6133 {
6134         struct btrfs_delayed_ref_head *head;
6135         struct btrfs_delayed_ref_root *delayed_refs;
6136         int ret = 0;
6137
6138         delayed_refs = &trans->transaction->delayed_refs;
6139         spin_lock(&delayed_refs->lock);
6140         head = btrfs_find_delayed_ref_head(trans, bytenr);
6141         if (!head)
6142                 goto out_delayed_unlock;
6143
6144         spin_lock(&head->lock);
6145         if (rb_first(&head->ref_root))
6146                 goto out;
6147
6148         if (head->extent_op) {
6149                 if (!head->must_insert_reserved)
6150                         goto out;
6151                 btrfs_free_delayed_extent_op(head->extent_op);
6152                 head->extent_op = NULL;
6153         }
6154
6155         /*
6156          * waiting for the lock here would deadlock.  If someone else has it
6157          * locked they are already in the process of dropping it anyway
6158          */
6159         if (!mutex_trylock(&head->mutex))
6160                 goto out;
6161
6162         /*
6163          * at this point we have a head with no other entries.  Go
6164          * ahead and process it.
6165          */
6166         head->node.in_tree = 0;
6167         rb_erase(&head->href_node, &delayed_refs->href_root);
6168
6169         atomic_dec(&delayed_refs->num_entries);
6170
6171         /*
6172          * we don't take a ref on the node because we're removing it from the
6173          * tree, so we just steal the ref the tree was holding.
6174          */
6175         delayed_refs->num_heads--;
6176         if (head->processing == 0)
6177                 delayed_refs->num_heads_ready--;
6178         head->processing = 0;
6179         spin_unlock(&head->lock);
6180         spin_unlock(&delayed_refs->lock);
6181
6182         BUG_ON(head->extent_op);
6183         if (head->must_insert_reserved)
6184                 ret = 1;
6185
6186         mutex_unlock(&head->mutex);
6187         btrfs_put_delayed_ref(&head->node);
6188         return ret;
6189 out:
6190         spin_unlock(&head->lock);
6191
6192 out_delayed_unlock:
6193         spin_unlock(&delayed_refs->lock);
6194         return 0;
6195 }
6196
6197 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6198                            struct btrfs_root *root,
6199                            struct extent_buffer *buf,
6200                            u64 parent, int last_ref)
6201 {
6202         struct btrfs_block_group_cache *cache = NULL;
6203         int pin = 1;
6204         int ret;
6205
6206         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6207                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6208                                         buf->start, buf->len,
6209                                         parent, root->root_key.objectid,
6210                                         btrfs_header_level(buf),
6211                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6212                 BUG_ON(ret); /* -ENOMEM */
6213         }
6214
6215         if (!last_ref)
6216                 return;
6217
6218         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6219
6220         if (btrfs_header_generation(buf) == trans->transid) {
6221                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6222                         ret = check_ref_cleanup(trans, root, buf->start);
6223                         if (!ret)
6224                                 goto out;
6225                 }
6226
6227                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6228                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6229                         goto out;
6230                 }
6231
6232                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6233
6234                 btrfs_add_free_space(cache, buf->start, buf->len);
6235                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6236                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6237                 pin = 0;
6238         }
6239 out:
6240         if (pin)
6241                 add_pinned_bytes(root->fs_info, buf->len,
6242                                  btrfs_header_level(buf),
6243                                  root->root_key.objectid);
6244
6245         /*
6246          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6247          * anymore.
6248          */
6249         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6250         btrfs_put_block_group(cache);
6251 }
6252
6253 /* Can return -ENOMEM */
6254 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6255                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6256                       u64 owner, u64 offset, int no_quota)
6257 {
6258         int ret;
6259         struct btrfs_fs_info *fs_info = root->fs_info;
6260
6261         if (btrfs_test_is_dummy_root(root))
6262                 return 0;
6263
6264         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6265
6266         /*
6267          * tree log blocks never actually go into the extent allocation
6268          * tree, just update pinning info and exit early.
6269          */
6270         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6271                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6272                 /* unlocks the pinned mutex */
6273                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6274                 ret = 0;
6275         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6276                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6277                                         num_bytes,
6278                                         parent, root_objectid, (int)owner,
6279                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6280         } else {
6281                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6282                                                 num_bytes,
6283                                                 parent, root_objectid, owner,
6284                                                 offset, BTRFS_DROP_DELAYED_REF,
6285                                                 NULL, no_quota);
6286         }
6287         return ret;
6288 }
6289
6290 /*
6291  * when we wait for progress in the block group caching, its because
6292  * our allocation attempt failed at least once.  So, we must sleep
6293  * and let some progress happen before we try again.
6294  *
6295  * This function will sleep at least once waiting for new free space to
6296  * show up, and then it will check the block group free space numbers
6297  * for our min num_bytes.  Another option is to have it go ahead
6298  * and look in the rbtree for a free extent of a given size, but this
6299  * is a good start.
6300  *
6301  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6302  * any of the information in this block group.
6303  */
6304 static noinline void
6305 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6306                                 u64 num_bytes)
6307 {
6308         struct btrfs_caching_control *caching_ctl;
6309
6310         caching_ctl = get_caching_control(cache);
6311         if (!caching_ctl)
6312                 return;
6313
6314         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6315                    (cache->free_space_ctl->free_space >= num_bytes));
6316
6317         put_caching_control(caching_ctl);
6318 }
6319
6320 static noinline int
6321 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6322 {
6323         struct btrfs_caching_control *caching_ctl;
6324         int ret = 0;
6325
6326         caching_ctl = get_caching_control(cache);
6327         if (!caching_ctl)
6328                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6329
6330         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6331         if (cache->cached == BTRFS_CACHE_ERROR)
6332                 ret = -EIO;
6333         put_caching_control(caching_ctl);
6334         return ret;
6335 }
6336
6337 int __get_raid_index(u64 flags)
6338 {
6339         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6340                 return BTRFS_RAID_RAID10;
6341         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6342                 return BTRFS_RAID_RAID1;
6343         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6344                 return BTRFS_RAID_DUP;
6345         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6346                 return BTRFS_RAID_RAID0;
6347         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6348                 return BTRFS_RAID_RAID5;
6349         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6350                 return BTRFS_RAID_RAID6;
6351
6352         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6353 }
6354
6355 int get_block_group_index(struct btrfs_block_group_cache *cache)
6356 {
6357         return __get_raid_index(cache->flags);
6358 }
6359
6360 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6361         [BTRFS_RAID_RAID10]     = "raid10",
6362         [BTRFS_RAID_RAID1]      = "raid1",
6363         [BTRFS_RAID_DUP]        = "dup",
6364         [BTRFS_RAID_RAID0]      = "raid0",
6365         [BTRFS_RAID_SINGLE]     = "single",
6366         [BTRFS_RAID_RAID5]      = "raid5",
6367         [BTRFS_RAID_RAID6]      = "raid6",
6368 };
6369
6370 static const char *get_raid_name(enum btrfs_raid_types type)
6371 {
6372         if (type >= BTRFS_NR_RAID_TYPES)
6373                 return NULL;
6374
6375         return btrfs_raid_type_names[type];
6376 }
6377
6378 enum btrfs_loop_type {
6379         LOOP_CACHING_NOWAIT = 0,
6380         LOOP_CACHING_WAIT = 1,
6381         LOOP_ALLOC_CHUNK = 2,
6382         LOOP_NO_EMPTY_SIZE = 3,
6383 };
6384
6385 static inline void
6386 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6387                        int delalloc)
6388 {
6389         if (delalloc)
6390                 down_read(&cache->data_rwsem);
6391 }
6392
6393 static inline void
6394 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6395                        int delalloc)
6396 {
6397         btrfs_get_block_group(cache);
6398         if (delalloc)
6399                 down_read(&cache->data_rwsem);
6400 }
6401
6402 static struct btrfs_block_group_cache *
6403 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6404                    struct btrfs_free_cluster *cluster,
6405                    int delalloc)
6406 {
6407         struct btrfs_block_group_cache *used_bg;
6408         bool locked = false;
6409 again:
6410         spin_lock(&cluster->refill_lock);
6411         if (locked) {
6412                 if (used_bg == cluster->block_group)
6413                         return used_bg;
6414
6415                 up_read(&used_bg->data_rwsem);
6416                 btrfs_put_block_group(used_bg);
6417         }
6418
6419         used_bg = cluster->block_group;
6420         if (!used_bg)
6421                 return NULL;
6422
6423         if (used_bg == block_group)
6424                 return used_bg;
6425
6426         btrfs_get_block_group(used_bg);
6427
6428         if (!delalloc)
6429                 return used_bg;
6430
6431         if (down_read_trylock(&used_bg->data_rwsem))
6432                 return used_bg;
6433
6434         spin_unlock(&cluster->refill_lock);
6435         down_read(&used_bg->data_rwsem);
6436         locked = true;
6437         goto again;
6438 }
6439
6440 static inline void
6441 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6442                          int delalloc)
6443 {
6444         if (delalloc)
6445                 up_read(&cache->data_rwsem);
6446         btrfs_put_block_group(cache);
6447 }
6448
6449 /*
6450  * walks the btree of allocated extents and find a hole of a given size.
6451  * The key ins is changed to record the hole:
6452  * ins->objectid == start position
6453  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6454  * ins->offset == the size of the hole.
6455  * Any available blocks before search_start are skipped.
6456  *
6457  * If there is no suitable free space, we will record the max size of
6458  * the free space extent currently.
6459  */
6460 static noinline int find_free_extent(struct btrfs_root *orig_root,
6461                                      u64 num_bytes, u64 empty_size,
6462                                      u64 hint_byte, struct btrfs_key *ins,
6463                                      u64 flags, int delalloc)
6464 {
6465         int ret = 0;
6466         struct btrfs_root *root = orig_root->fs_info->extent_root;
6467         struct btrfs_free_cluster *last_ptr = NULL;
6468         struct btrfs_block_group_cache *block_group = NULL;
6469         u64 search_start = 0;
6470         u64 max_extent_size = 0;
6471         int empty_cluster = 2 * 1024 * 1024;
6472         struct btrfs_space_info *space_info;
6473         int loop = 0;
6474         int index = __get_raid_index(flags);
6475         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6476                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6477         bool failed_cluster_refill = false;
6478         bool failed_alloc = false;
6479         bool use_cluster = true;
6480         bool have_caching_bg = false;
6481
6482         WARN_ON(num_bytes < root->sectorsize);
6483         ins->type = BTRFS_EXTENT_ITEM_KEY;
6484         ins->objectid = 0;
6485         ins->offset = 0;
6486
6487         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6488
6489         space_info = __find_space_info(root->fs_info, flags);
6490         if (!space_info) {
6491                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6492                 return -ENOSPC;
6493         }
6494
6495         /*
6496          * If the space info is for both data and metadata it means we have a
6497          * small filesystem and we can't use the clustering stuff.
6498          */
6499         if (btrfs_mixed_space_info(space_info))
6500                 use_cluster = false;
6501
6502         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6503                 last_ptr = &root->fs_info->meta_alloc_cluster;
6504                 if (!btrfs_test_opt(root, SSD))
6505                         empty_cluster = 64 * 1024;
6506         }
6507
6508         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6509             btrfs_test_opt(root, SSD)) {
6510                 last_ptr = &root->fs_info->data_alloc_cluster;
6511         }
6512
6513         if (last_ptr) {
6514                 spin_lock(&last_ptr->lock);
6515                 if (last_ptr->block_group)
6516                         hint_byte = last_ptr->window_start;
6517                 spin_unlock(&last_ptr->lock);
6518         }
6519
6520         search_start = max(search_start, first_logical_byte(root, 0));
6521         search_start = max(search_start, hint_byte);
6522
6523         if (!last_ptr)
6524                 empty_cluster = 0;
6525
6526         if (search_start == hint_byte) {
6527                 block_group = btrfs_lookup_block_group(root->fs_info,
6528                                                        search_start);
6529                 /*
6530                  * we don't want to use the block group if it doesn't match our
6531                  * allocation bits, or if its not cached.
6532                  *
6533                  * However if we are re-searching with an ideal block group
6534                  * picked out then we don't care that the block group is cached.
6535                  */
6536                 if (block_group && block_group_bits(block_group, flags) &&
6537                     block_group->cached != BTRFS_CACHE_NO) {
6538                         down_read(&space_info->groups_sem);
6539                         if (list_empty(&block_group->list) ||
6540                             block_group->ro) {
6541                                 /*
6542                                  * someone is removing this block group,
6543                                  * we can't jump into the have_block_group
6544                                  * target because our list pointers are not
6545                                  * valid
6546                                  */
6547                                 btrfs_put_block_group(block_group);
6548                                 up_read(&space_info->groups_sem);
6549                         } else {
6550                                 index = get_block_group_index(block_group);
6551                                 btrfs_lock_block_group(block_group, delalloc);
6552                                 goto have_block_group;
6553                         }
6554                 } else if (block_group) {
6555                         btrfs_put_block_group(block_group);
6556                 }
6557         }
6558 search:
6559         have_caching_bg = false;
6560         down_read(&space_info->groups_sem);
6561         list_for_each_entry(block_group, &space_info->block_groups[index],
6562                             list) {
6563                 u64 offset;
6564                 int cached;
6565
6566                 btrfs_grab_block_group(block_group, delalloc);
6567                 search_start = block_group->key.objectid;
6568
6569                 /*
6570                  * this can happen if we end up cycling through all the
6571                  * raid types, but we want to make sure we only allocate
6572                  * for the proper type.
6573                  */
6574                 if (!block_group_bits(block_group, flags)) {
6575                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6576                                 BTRFS_BLOCK_GROUP_RAID1 |
6577                                 BTRFS_BLOCK_GROUP_RAID5 |
6578                                 BTRFS_BLOCK_GROUP_RAID6 |
6579                                 BTRFS_BLOCK_GROUP_RAID10;
6580
6581                         /*
6582                          * if they asked for extra copies and this block group
6583                          * doesn't provide them, bail.  This does allow us to
6584                          * fill raid0 from raid1.
6585                          */
6586                         if ((flags & extra) && !(block_group->flags & extra))
6587                                 goto loop;
6588                 }
6589
6590 have_block_group:
6591                 cached = block_group_cache_done(block_group);
6592                 if (unlikely(!cached)) {
6593                         ret = cache_block_group(block_group, 0);
6594                         BUG_ON(ret < 0);
6595                         ret = 0;
6596                 }
6597
6598                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6599                         goto loop;
6600                 if (unlikely(block_group->ro))
6601                         goto loop;
6602
6603                 /*
6604                  * Ok we want to try and use the cluster allocator, so
6605                  * lets look there
6606                  */
6607                 if (last_ptr) {
6608                         struct btrfs_block_group_cache *used_block_group;
6609                         unsigned long aligned_cluster;
6610                         /*
6611                          * the refill lock keeps out other
6612                          * people trying to start a new cluster
6613                          */
6614                         used_block_group = btrfs_lock_cluster(block_group,
6615                                                               last_ptr,
6616                                                               delalloc);
6617                         if (!used_block_group)
6618                                 goto refill_cluster;
6619
6620                         if (used_block_group != block_group &&
6621                             (used_block_group->ro ||
6622                              !block_group_bits(used_block_group, flags)))
6623                                 goto release_cluster;
6624
6625                         offset = btrfs_alloc_from_cluster(used_block_group,
6626                                                 last_ptr,
6627                                                 num_bytes,
6628                                                 used_block_group->key.objectid,
6629                                                 &max_extent_size);
6630                         if (offset) {
6631                                 /* we have a block, we're done */
6632                                 spin_unlock(&last_ptr->refill_lock);
6633                                 trace_btrfs_reserve_extent_cluster(root,
6634                                                 used_block_group,
6635                                                 search_start, num_bytes);
6636                                 if (used_block_group != block_group) {
6637                                         btrfs_release_block_group(block_group,
6638                                                                   delalloc);
6639                                         block_group = used_block_group;
6640                                 }
6641                                 goto checks;
6642                         }
6643
6644                         WARN_ON(last_ptr->block_group != used_block_group);
6645 release_cluster:
6646                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6647                          * set up a new clusters, so lets just skip it
6648                          * and let the allocator find whatever block
6649                          * it can find.  If we reach this point, we
6650                          * will have tried the cluster allocator
6651                          * plenty of times and not have found
6652                          * anything, so we are likely way too
6653                          * fragmented for the clustering stuff to find
6654                          * anything.
6655                          *
6656                          * However, if the cluster is taken from the
6657                          * current block group, release the cluster
6658                          * first, so that we stand a better chance of
6659                          * succeeding in the unclustered
6660                          * allocation.  */
6661                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6662                             used_block_group != block_group) {
6663                                 spin_unlock(&last_ptr->refill_lock);
6664                                 btrfs_release_block_group(used_block_group,
6665                                                           delalloc);
6666                                 goto unclustered_alloc;
6667                         }
6668
6669                         /*
6670                          * this cluster didn't work out, free it and
6671                          * start over
6672                          */
6673                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6674
6675                         if (used_block_group != block_group)
6676                                 btrfs_release_block_group(used_block_group,
6677                                                           delalloc);
6678 refill_cluster:
6679                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6680                                 spin_unlock(&last_ptr->refill_lock);
6681                                 goto unclustered_alloc;
6682                         }
6683
6684                         aligned_cluster = max_t(unsigned long,
6685                                                 empty_cluster + empty_size,
6686                                               block_group->full_stripe_len);
6687
6688                         /* allocate a cluster in this block group */
6689                         ret = btrfs_find_space_cluster(root, block_group,
6690                                                        last_ptr, search_start,
6691                                                        num_bytes,
6692                                                        aligned_cluster);
6693                         if (ret == 0) {
6694                                 /*
6695                                  * now pull our allocation out of this
6696                                  * cluster
6697                                  */
6698                                 offset = btrfs_alloc_from_cluster(block_group,
6699                                                         last_ptr,
6700                                                         num_bytes,
6701                                                         search_start,
6702                                                         &max_extent_size);
6703                                 if (offset) {
6704                                         /* we found one, proceed */
6705                                         spin_unlock(&last_ptr->refill_lock);
6706                                         trace_btrfs_reserve_extent_cluster(root,
6707                                                 block_group, search_start,
6708                                                 num_bytes);
6709                                         goto checks;
6710                                 }
6711                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6712                                    && !failed_cluster_refill) {
6713                                 spin_unlock(&last_ptr->refill_lock);
6714
6715                                 failed_cluster_refill = true;
6716                                 wait_block_group_cache_progress(block_group,
6717                                        num_bytes + empty_cluster + empty_size);
6718                                 goto have_block_group;
6719                         }
6720
6721                         /*
6722                          * at this point we either didn't find a cluster
6723                          * or we weren't able to allocate a block from our
6724                          * cluster.  Free the cluster we've been trying
6725                          * to use, and go to the next block group
6726                          */
6727                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6728                         spin_unlock(&last_ptr->refill_lock);
6729                         goto loop;
6730                 }
6731
6732 unclustered_alloc:
6733                 spin_lock(&block_group->free_space_ctl->tree_lock);
6734                 if (cached &&
6735                     block_group->free_space_ctl->free_space <
6736                     num_bytes + empty_cluster + empty_size) {
6737                         if (block_group->free_space_ctl->free_space >
6738                             max_extent_size)
6739                                 max_extent_size =
6740                                         block_group->free_space_ctl->free_space;
6741                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6742                         goto loop;
6743                 }
6744                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6745
6746                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6747                                                     num_bytes, empty_size,
6748                                                     &max_extent_size);
6749                 /*
6750                  * If we didn't find a chunk, and we haven't failed on this
6751                  * block group before, and this block group is in the middle of
6752                  * caching and we are ok with waiting, then go ahead and wait
6753                  * for progress to be made, and set failed_alloc to true.
6754                  *
6755                  * If failed_alloc is true then we've already waited on this
6756                  * block group once and should move on to the next block group.
6757                  */
6758                 if (!offset && !failed_alloc && !cached &&
6759                     loop > LOOP_CACHING_NOWAIT) {
6760                         wait_block_group_cache_progress(block_group,
6761                                                 num_bytes + empty_size);
6762                         failed_alloc = true;
6763                         goto have_block_group;
6764                 } else if (!offset) {
6765                         if (!cached)
6766                                 have_caching_bg = true;
6767                         goto loop;
6768                 }
6769 checks:
6770                 search_start = ALIGN(offset, root->stripesize);
6771
6772                 /* move on to the next group */
6773                 if (search_start + num_bytes >
6774                     block_group->key.objectid + block_group->key.offset) {
6775                         btrfs_add_free_space(block_group, offset, num_bytes);
6776                         goto loop;
6777                 }
6778
6779                 if (offset < search_start)
6780                         btrfs_add_free_space(block_group, offset,
6781                                              search_start - offset);
6782                 BUG_ON(offset > search_start);
6783
6784                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
6785                                                   alloc_type, delalloc);
6786                 if (ret == -EAGAIN) {
6787                         btrfs_add_free_space(block_group, offset, num_bytes);
6788                         goto loop;
6789                 }
6790
6791                 /* we are all good, lets return */
6792                 ins->objectid = search_start;
6793                 ins->offset = num_bytes;
6794
6795                 trace_btrfs_reserve_extent(orig_root, block_group,
6796                                            search_start, num_bytes);
6797                 btrfs_release_block_group(block_group, delalloc);
6798                 break;
6799 loop:
6800                 failed_cluster_refill = false;
6801                 failed_alloc = false;
6802                 BUG_ON(index != get_block_group_index(block_group));
6803                 btrfs_release_block_group(block_group, delalloc);
6804         }
6805         up_read(&space_info->groups_sem);
6806
6807         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6808                 goto search;
6809
6810         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6811                 goto search;
6812
6813         /*
6814          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6815          *                      caching kthreads as we move along
6816          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6817          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6818          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6819          *                      again
6820          */
6821         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6822                 index = 0;
6823                 loop++;
6824                 if (loop == LOOP_ALLOC_CHUNK) {
6825                         struct btrfs_trans_handle *trans;
6826                         int exist = 0;
6827
6828                         trans = current->journal_info;
6829                         if (trans)
6830                                 exist = 1;
6831                         else
6832                                 trans = btrfs_join_transaction(root);
6833
6834                         if (IS_ERR(trans)) {
6835                                 ret = PTR_ERR(trans);
6836                                 goto out;
6837                         }
6838
6839                         ret = do_chunk_alloc(trans, root, flags,
6840                                              CHUNK_ALLOC_FORCE);
6841                         /*
6842                          * Do not bail out on ENOSPC since we
6843                          * can do more things.
6844                          */
6845                         if (ret < 0 && ret != -ENOSPC)
6846                                 btrfs_abort_transaction(trans,
6847                                                         root, ret);
6848                         else
6849                                 ret = 0;
6850                         if (!exist)
6851                                 btrfs_end_transaction(trans, root);
6852                         if (ret)
6853                                 goto out;
6854                 }
6855
6856                 if (loop == LOOP_NO_EMPTY_SIZE) {
6857                         empty_size = 0;
6858                         empty_cluster = 0;
6859                 }
6860
6861                 goto search;
6862         } else if (!ins->objectid) {
6863                 ret = -ENOSPC;
6864         } else if (ins->objectid) {
6865                 ret = 0;
6866         }
6867 out:
6868         if (ret == -ENOSPC)
6869                 ins->offset = max_extent_size;
6870         return ret;
6871 }
6872
6873 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6874                             int dump_block_groups)
6875 {
6876         struct btrfs_block_group_cache *cache;
6877         int index = 0;
6878
6879         spin_lock(&info->lock);
6880         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
6881                info->flags,
6882                info->total_bytes - info->bytes_used - info->bytes_pinned -
6883                info->bytes_reserved - info->bytes_readonly,
6884                (info->full) ? "" : "not ");
6885         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
6886                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6887                info->total_bytes, info->bytes_used, info->bytes_pinned,
6888                info->bytes_reserved, info->bytes_may_use,
6889                info->bytes_readonly);
6890         spin_unlock(&info->lock);
6891
6892         if (!dump_block_groups)
6893                 return;
6894
6895         down_read(&info->groups_sem);
6896 again:
6897         list_for_each_entry(cache, &info->block_groups[index], list) {
6898                 spin_lock(&cache->lock);
6899                 printk(KERN_INFO "BTRFS: "
6900                            "block group %llu has %llu bytes, "
6901                            "%llu used %llu pinned %llu reserved %s\n",
6902                        cache->key.objectid, cache->key.offset,
6903                        btrfs_block_group_used(&cache->item), cache->pinned,
6904                        cache->reserved, cache->ro ? "[readonly]" : "");
6905                 btrfs_dump_free_space(cache, bytes);
6906                 spin_unlock(&cache->lock);
6907         }
6908         if (++index < BTRFS_NR_RAID_TYPES)
6909                 goto again;
6910         up_read(&info->groups_sem);
6911 }
6912
6913 int btrfs_reserve_extent(struct btrfs_root *root,
6914                          u64 num_bytes, u64 min_alloc_size,
6915                          u64 empty_size, u64 hint_byte,
6916                          struct btrfs_key *ins, int is_data, int delalloc)
6917 {
6918         bool final_tried = false;
6919         u64 flags;
6920         int ret;
6921
6922         flags = btrfs_get_alloc_profile(root, is_data);
6923 again:
6924         WARN_ON(num_bytes < root->sectorsize);
6925         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6926                                flags, delalloc);
6927
6928         if (ret == -ENOSPC) {
6929                 if (!final_tried && ins->offset) {
6930                         num_bytes = min(num_bytes >> 1, ins->offset);
6931                         num_bytes = round_down(num_bytes, root->sectorsize);
6932                         num_bytes = max(num_bytes, min_alloc_size);
6933                         if (num_bytes == min_alloc_size)
6934                                 final_tried = true;
6935                         goto again;
6936                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6937                         struct btrfs_space_info *sinfo;
6938
6939                         sinfo = __find_space_info(root->fs_info, flags);
6940                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6941                                 flags, num_bytes);
6942                         if (sinfo)
6943                                 dump_space_info(sinfo, num_bytes, 1);
6944                 }
6945         }
6946
6947         return ret;
6948 }
6949
6950 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6951                                         u64 start, u64 len,
6952                                         int pin, int delalloc)
6953 {
6954         struct btrfs_block_group_cache *cache;
6955         int ret = 0;
6956
6957         cache = btrfs_lookup_block_group(root->fs_info, start);
6958         if (!cache) {
6959                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6960                         start);
6961                 return -ENOSPC;
6962         }
6963
6964         if (btrfs_test_opt(root, DISCARD))
6965                 ret = btrfs_discard_extent(root, start, len, NULL);
6966
6967         if (pin)
6968                 pin_down_extent(root, cache, start, len, 1);
6969         else {
6970                 btrfs_add_free_space(cache, start, len);
6971                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6972         }
6973         btrfs_put_block_group(cache);
6974
6975         trace_btrfs_reserved_extent_free(root, start, len);
6976
6977         return ret;
6978 }
6979
6980 int btrfs_free_reserved_extent(struct btrfs_root *root,
6981                                u64 start, u64 len, int delalloc)
6982 {
6983         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
6984 }
6985
6986 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6987                                        u64 start, u64 len)
6988 {
6989         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
6990 }
6991
6992 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6993                                       struct btrfs_root *root,
6994                                       u64 parent, u64 root_objectid,
6995                                       u64 flags, u64 owner, u64 offset,
6996                                       struct btrfs_key *ins, int ref_mod)
6997 {
6998         int ret;
6999         struct btrfs_fs_info *fs_info = root->fs_info;
7000         struct btrfs_extent_item *extent_item;
7001         struct btrfs_extent_inline_ref *iref;
7002         struct btrfs_path *path;
7003         struct extent_buffer *leaf;
7004         int type;
7005         u32 size;
7006
7007         if (parent > 0)
7008                 type = BTRFS_SHARED_DATA_REF_KEY;
7009         else
7010                 type = BTRFS_EXTENT_DATA_REF_KEY;
7011
7012         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7013
7014         path = btrfs_alloc_path();
7015         if (!path)
7016                 return -ENOMEM;
7017
7018         path->leave_spinning = 1;
7019         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7020                                       ins, size);
7021         if (ret) {
7022                 btrfs_free_path(path);
7023                 return ret;
7024         }
7025
7026         leaf = path->nodes[0];
7027         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7028                                      struct btrfs_extent_item);
7029         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7030         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7031         btrfs_set_extent_flags(leaf, extent_item,
7032                                flags | BTRFS_EXTENT_FLAG_DATA);
7033
7034         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7035         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7036         if (parent > 0) {
7037                 struct btrfs_shared_data_ref *ref;
7038                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7039                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7040                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7041         } else {
7042                 struct btrfs_extent_data_ref *ref;
7043                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7044                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7045                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7046                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7047                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7048         }
7049
7050         btrfs_mark_buffer_dirty(path->nodes[0]);
7051         btrfs_free_path(path);
7052
7053         /* Always set parent to 0 here since its exclusive anyway. */
7054         ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7055                                       ins->objectid, ins->offset,
7056                                       BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7057         if (ret)
7058                 return ret;
7059
7060         ret = update_block_group(root, ins->objectid, ins->offset, 1);
7061         if (ret) { /* -ENOENT, logic error */
7062                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7063                         ins->objectid, ins->offset);
7064                 BUG();
7065         }
7066         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7067         return ret;
7068 }
7069
7070 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7071                                      struct btrfs_root *root,
7072                                      u64 parent, u64 root_objectid,
7073                                      u64 flags, struct btrfs_disk_key *key,
7074                                      int level, struct btrfs_key *ins,
7075                                      int no_quota)
7076 {
7077         int ret;
7078         struct btrfs_fs_info *fs_info = root->fs_info;
7079         struct btrfs_extent_item *extent_item;
7080         struct btrfs_tree_block_info *block_info;
7081         struct btrfs_extent_inline_ref *iref;
7082         struct btrfs_path *path;
7083         struct extent_buffer *leaf;
7084         u32 size = sizeof(*extent_item) + sizeof(*iref);
7085         u64 num_bytes = ins->offset;
7086         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7087                                                  SKINNY_METADATA);
7088
7089         if (!skinny_metadata)
7090                 size += sizeof(*block_info);
7091
7092         path = btrfs_alloc_path();
7093         if (!path) {
7094                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7095                                                    root->nodesize);
7096                 return -ENOMEM;
7097         }
7098
7099         path->leave_spinning = 1;
7100         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7101                                       ins, size);
7102         if (ret) {
7103                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7104                                                    root->nodesize);
7105                 btrfs_free_path(path);
7106                 return ret;
7107         }
7108
7109         leaf = path->nodes[0];
7110         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7111                                      struct btrfs_extent_item);
7112         btrfs_set_extent_refs(leaf, extent_item, 1);
7113         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7114         btrfs_set_extent_flags(leaf, extent_item,
7115                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7116
7117         if (skinny_metadata) {
7118                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7119                 num_bytes = root->nodesize;
7120         } else {
7121                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7122                 btrfs_set_tree_block_key(leaf, block_info, key);
7123                 btrfs_set_tree_block_level(leaf, block_info, level);
7124                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7125         }
7126
7127         if (parent > 0) {
7128                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7129                 btrfs_set_extent_inline_ref_type(leaf, iref,
7130                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7131                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7132         } else {
7133                 btrfs_set_extent_inline_ref_type(leaf, iref,
7134                                                  BTRFS_TREE_BLOCK_REF_KEY);
7135                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7136         }
7137
7138         btrfs_mark_buffer_dirty(leaf);
7139         btrfs_free_path(path);
7140
7141         if (!no_quota) {
7142                 ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
7143                                               ins->objectid, num_bytes,
7144                                               BTRFS_QGROUP_OPER_ADD_EXCL, 0);
7145                 if (ret)
7146                         return ret;
7147         }
7148
7149         ret = update_block_group(root, ins->objectid, root->nodesize, 1);
7150         if (ret) { /* -ENOENT, logic error */
7151                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7152                         ins->objectid, ins->offset);
7153                 BUG();
7154         }
7155
7156         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7157         return ret;
7158 }
7159
7160 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7161                                      struct btrfs_root *root,
7162                                      u64 root_objectid, u64 owner,
7163                                      u64 offset, struct btrfs_key *ins)
7164 {
7165         int ret;
7166
7167         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7168
7169         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7170                                          ins->offset, 0,
7171                                          root_objectid, owner, offset,
7172                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7173         return ret;
7174 }
7175
7176 /*
7177  * this is used by the tree logging recovery code.  It records that
7178  * an extent has been allocated and makes sure to clear the free
7179  * space cache bits as well
7180  */
7181 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7182                                    struct btrfs_root *root,
7183                                    u64 root_objectid, u64 owner, u64 offset,
7184                                    struct btrfs_key *ins)
7185 {
7186         int ret;
7187         struct btrfs_block_group_cache *block_group;
7188
7189         /*
7190          * Mixed block groups will exclude before processing the log so we only
7191          * need to do the exlude dance if this fs isn't mixed.
7192          */
7193         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7194                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7195                 if (ret)
7196                         return ret;
7197         }
7198
7199         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7200         if (!block_group)
7201                 return -EINVAL;
7202
7203         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7204                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7205         BUG_ON(ret); /* logic error */
7206         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7207                                          0, owner, offset, ins, 1);
7208         btrfs_put_block_group(block_group);
7209         return ret;
7210 }
7211
7212 static struct extent_buffer *
7213 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7214                       u64 bytenr, u32 blocksize, int level)
7215 {
7216         struct extent_buffer *buf;
7217
7218         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
7219         if (!buf)
7220                 return ERR_PTR(-ENOMEM);
7221         btrfs_set_header_generation(buf, trans->transid);
7222         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7223         btrfs_tree_lock(buf);
7224         clean_tree_block(trans, root, buf);
7225         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7226
7227         btrfs_set_lock_blocking(buf);
7228         btrfs_set_buffer_uptodate(buf);
7229
7230         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7231                 buf->log_index = root->log_transid % 2;
7232                 /*
7233                  * we allow two log transactions at a time, use different
7234                  * EXENT bit to differentiate dirty pages.
7235                  */
7236                 if (buf->log_index == 0)
7237                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7238                                         buf->start + buf->len - 1, GFP_NOFS);
7239                 else
7240                         set_extent_new(&root->dirty_log_pages, buf->start,
7241                                         buf->start + buf->len - 1, GFP_NOFS);
7242         } else {
7243                 buf->log_index = -1;
7244                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7245                          buf->start + buf->len - 1, GFP_NOFS);
7246         }
7247         trans->blocks_used++;
7248         /* this returns a buffer locked for blocking */
7249         return buf;
7250 }
7251
7252 static struct btrfs_block_rsv *
7253 use_block_rsv(struct btrfs_trans_handle *trans,
7254               struct btrfs_root *root, u32 blocksize)
7255 {
7256         struct btrfs_block_rsv *block_rsv;
7257         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7258         int ret;
7259         bool global_updated = false;
7260
7261         block_rsv = get_block_rsv(trans, root);
7262
7263         if (unlikely(block_rsv->size == 0))
7264                 goto try_reserve;
7265 again:
7266         ret = block_rsv_use_bytes(block_rsv, blocksize);
7267         if (!ret)
7268                 return block_rsv;
7269
7270         if (block_rsv->failfast)
7271                 return ERR_PTR(ret);
7272
7273         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7274                 global_updated = true;
7275                 update_global_block_rsv(root->fs_info);
7276                 goto again;
7277         }
7278
7279         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7280                 static DEFINE_RATELIMIT_STATE(_rs,
7281                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7282                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7283                 if (__ratelimit(&_rs))
7284                         WARN(1, KERN_DEBUG
7285                                 "BTRFS: block rsv returned %d\n", ret);
7286         }
7287 try_reserve:
7288         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7289                                      BTRFS_RESERVE_NO_FLUSH);
7290         if (!ret)
7291                 return block_rsv;
7292         /*
7293          * If we couldn't reserve metadata bytes try and use some from
7294          * the global reserve if its space type is the same as the global
7295          * reservation.
7296          */
7297         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7298             block_rsv->space_info == global_rsv->space_info) {
7299                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7300                 if (!ret)
7301                         return global_rsv;
7302         }
7303         return ERR_PTR(ret);
7304 }
7305
7306 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7307                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7308 {
7309         block_rsv_add_bytes(block_rsv, blocksize, 0);
7310         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7311 }
7312
7313 /*
7314  * finds a free extent and does all the dirty work required for allocation
7315  * returns the key for the extent through ins, and a tree buffer for
7316  * the first block of the extent through buf.
7317  *
7318  * returns the tree buffer or NULL.
7319  */
7320 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7321                                         struct btrfs_root *root,
7322                                         u64 parent, u64 root_objectid,
7323                                         struct btrfs_disk_key *key, int level,
7324                                         u64 hint, u64 empty_size)
7325 {
7326         struct btrfs_key ins;
7327         struct btrfs_block_rsv *block_rsv;
7328         struct extent_buffer *buf;
7329         u64 flags = 0;
7330         int ret;
7331         u32 blocksize = root->nodesize;
7332         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7333                                                  SKINNY_METADATA);
7334
7335         if (btrfs_test_is_dummy_root(root)) {
7336                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7337                                             blocksize, level);
7338                 if (!IS_ERR(buf))
7339                         root->alloc_bytenr += blocksize;
7340                 return buf;
7341         }
7342
7343         block_rsv = use_block_rsv(trans, root, blocksize);
7344         if (IS_ERR(block_rsv))
7345                 return ERR_CAST(block_rsv);
7346
7347         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7348                                    empty_size, hint, &ins, 0, 0);
7349         if (ret) {
7350                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7351                 return ERR_PTR(ret);
7352         }
7353
7354         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
7355                                     blocksize, level);
7356         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7357
7358         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7359                 if (parent == 0)
7360                         parent = ins.objectid;
7361                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7362         } else
7363                 BUG_ON(parent > 0);
7364
7365         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7366                 struct btrfs_delayed_extent_op *extent_op;
7367                 extent_op = btrfs_alloc_delayed_extent_op();
7368                 BUG_ON(!extent_op); /* -ENOMEM */
7369                 if (key)
7370                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7371                 else
7372                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7373                 extent_op->flags_to_set = flags;
7374                 if (skinny_metadata)
7375                         extent_op->update_key = 0;
7376                 else
7377                         extent_op->update_key = 1;
7378                 extent_op->update_flags = 1;
7379                 extent_op->is_data = 0;
7380                 extent_op->level = level;
7381
7382                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7383                                         ins.objectid,
7384                                         ins.offset, parent, root_objectid,
7385                                         level, BTRFS_ADD_DELAYED_EXTENT,
7386                                         extent_op, 0);
7387                 BUG_ON(ret); /* -ENOMEM */
7388         }
7389         return buf;
7390 }
7391
7392 struct walk_control {
7393         u64 refs[BTRFS_MAX_LEVEL];
7394         u64 flags[BTRFS_MAX_LEVEL];
7395         struct btrfs_key update_progress;
7396         int stage;
7397         int level;
7398         int shared_level;
7399         int update_ref;
7400         int keep_locks;
7401         int reada_slot;
7402         int reada_count;
7403         int for_reloc;
7404 };
7405
7406 #define DROP_REFERENCE  1
7407 #define UPDATE_BACKREF  2
7408
7409 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7410                                      struct btrfs_root *root,
7411                                      struct walk_control *wc,
7412                                      struct btrfs_path *path)
7413 {
7414         u64 bytenr;
7415         u64 generation;
7416         u64 refs;
7417         u64 flags;
7418         u32 nritems;
7419         u32 blocksize;
7420         struct btrfs_key key;
7421         struct extent_buffer *eb;
7422         int ret;
7423         int slot;
7424         int nread = 0;
7425
7426         if (path->slots[wc->level] < wc->reada_slot) {
7427                 wc->reada_count = wc->reada_count * 2 / 3;
7428                 wc->reada_count = max(wc->reada_count, 2);
7429         } else {
7430                 wc->reada_count = wc->reada_count * 3 / 2;
7431                 wc->reada_count = min_t(int, wc->reada_count,
7432                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7433         }
7434
7435         eb = path->nodes[wc->level];
7436         nritems = btrfs_header_nritems(eb);
7437         blocksize = root->nodesize;
7438
7439         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7440                 if (nread >= wc->reada_count)
7441                         break;
7442
7443                 cond_resched();
7444                 bytenr = btrfs_node_blockptr(eb, slot);
7445                 generation = btrfs_node_ptr_generation(eb, slot);
7446
7447                 if (slot == path->slots[wc->level])
7448                         goto reada;
7449
7450                 if (wc->stage == UPDATE_BACKREF &&
7451                     generation <= root->root_key.offset)
7452                         continue;
7453
7454                 /* We don't lock the tree block, it's OK to be racy here */
7455                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7456                                                wc->level - 1, 1, &refs,
7457                                                &flags);
7458                 /* We don't care about errors in readahead. */
7459                 if (ret < 0)
7460                         continue;
7461                 BUG_ON(refs == 0);
7462
7463                 if (wc->stage == DROP_REFERENCE) {
7464                         if (refs == 1)
7465                                 goto reada;
7466
7467                         if (wc->level == 1 &&
7468                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7469                                 continue;
7470                         if (!wc->update_ref ||
7471                             generation <= root->root_key.offset)
7472                                 continue;
7473                         btrfs_node_key_to_cpu(eb, &key, slot);
7474                         ret = btrfs_comp_cpu_keys(&key,
7475                                                   &wc->update_progress);
7476                         if (ret < 0)
7477                                 continue;
7478                 } else {
7479                         if (wc->level == 1 &&
7480                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7481                                 continue;
7482                 }
7483 reada:
7484                 readahead_tree_block(root, bytenr, blocksize);
7485                 nread++;
7486         }
7487         wc->reada_slot = slot;
7488 }
7489
7490 static int account_leaf_items(struct btrfs_trans_handle *trans,
7491                               struct btrfs_root *root,
7492                               struct extent_buffer *eb)
7493 {
7494         int nr = btrfs_header_nritems(eb);
7495         int i, extent_type, ret;
7496         struct btrfs_key key;
7497         struct btrfs_file_extent_item *fi;
7498         u64 bytenr, num_bytes;
7499
7500         for (i = 0; i < nr; i++) {
7501                 btrfs_item_key_to_cpu(eb, &key, i);
7502
7503                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7504                         continue;
7505
7506                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7507                 /* filter out non qgroup-accountable extents  */
7508                 extent_type = btrfs_file_extent_type(eb, fi);
7509
7510                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7511                         continue;
7512
7513                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7514                 if (!bytenr)
7515                         continue;
7516
7517                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7518
7519                 ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7520                                               root->objectid,
7521                                               bytenr, num_bytes,
7522                                               BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
7523                 if (ret)
7524                         return ret;
7525         }
7526         return 0;
7527 }
7528
7529 /*
7530  * Walk up the tree from the bottom, freeing leaves and any interior
7531  * nodes which have had all slots visited. If a node (leaf or
7532  * interior) is freed, the node above it will have it's slot
7533  * incremented. The root node will never be freed.
7534  *
7535  * At the end of this function, we should have a path which has all
7536  * slots incremented to the next position for a search. If we need to
7537  * read a new node it will be NULL and the node above it will have the
7538  * correct slot selected for a later read.
7539  *
7540  * If we increment the root nodes slot counter past the number of
7541  * elements, 1 is returned to signal completion of the search.
7542  */
7543 static int adjust_slots_upwards(struct btrfs_root *root,
7544                                 struct btrfs_path *path, int root_level)
7545 {
7546         int level = 0;
7547         int nr, slot;
7548         struct extent_buffer *eb;
7549
7550         if (root_level == 0)
7551                 return 1;
7552
7553         while (level <= root_level) {
7554                 eb = path->nodes[level];
7555                 nr = btrfs_header_nritems(eb);
7556                 path->slots[level]++;
7557                 slot = path->slots[level];
7558                 if (slot >= nr || level == 0) {
7559                         /*
7560                          * Don't free the root -  we will detect this
7561                          * condition after our loop and return a
7562                          * positive value for caller to stop walking the tree.
7563                          */
7564                         if (level != root_level) {
7565                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7566                                 path->locks[level] = 0;
7567
7568                                 free_extent_buffer(eb);
7569                                 path->nodes[level] = NULL;
7570                                 path->slots[level] = 0;
7571                         }
7572                 } else {
7573                         /*
7574                          * We have a valid slot to walk back down
7575                          * from. Stop here so caller can process these
7576                          * new nodes.
7577                          */
7578                         break;
7579                 }
7580
7581                 level++;
7582         }
7583
7584         eb = path->nodes[root_level];
7585         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7586                 return 1;
7587
7588         return 0;
7589 }
7590
7591 /*
7592  * root_eb is the subtree root and is locked before this function is called.
7593  */
7594 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7595                                   struct btrfs_root *root,
7596                                   struct extent_buffer *root_eb,
7597                                   u64 root_gen,
7598                                   int root_level)
7599 {
7600         int ret = 0;
7601         int level;
7602         struct extent_buffer *eb = root_eb;
7603         struct btrfs_path *path = NULL;
7604
7605         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7606         BUG_ON(root_eb == NULL);
7607
7608         if (!root->fs_info->quota_enabled)
7609                 return 0;
7610
7611         if (!extent_buffer_uptodate(root_eb)) {
7612                 ret = btrfs_read_buffer(root_eb, root_gen);
7613                 if (ret)
7614                         goto out;
7615         }
7616
7617         if (root_level == 0) {
7618                 ret = account_leaf_items(trans, root, root_eb);
7619                 goto out;
7620         }
7621
7622         path = btrfs_alloc_path();
7623         if (!path)
7624                 return -ENOMEM;
7625
7626         /*
7627          * Walk down the tree.  Missing extent blocks are filled in as
7628          * we go. Metadata is accounted every time we read a new
7629          * extent block.
7630          *
7631          * When we reach a leaf, we account for file extent items in it,
7632          * walk back up the tree (adjusting slot pointers as we go)
7633          * and restart the search process.
7634          */
7635         extent_buffer_get(root_eb); /* For path */
7636         path->nodes[root_level] = root_eb;
7637         path->slots[root_level] = 0;
7638         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
7639 walk_down:
7640         level = root_level;
7641         while (level >= 0) {
7642                 if (path->nodes[level] == NULL) {
7643                         int parent_slot;
7644                         u64 child_gen;
7645                         u64 child_bytenr;
7646
7647                         /* We need to get child blockptr/gen from
7648                          * parent before we can read it. */
7649                         eb = path->nodes[level + 1];
7650                         parent_slot = path->slots[level + 1];
7651                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
7652                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
7653
7654                         eb = read_tree_block(root, child_bytenr, child_gen);
7655                         if (!eb || !extent_buffer_uptodate(eb)) {
7656                                 ret = -EIO;
7657                                 goto out;
7658                         }
7659
7660                         path->nodes[level] = eb;
7661                         path->slots[level] = 0;
7662
7663                         btrfs_tree_read_lock(eb);
7664                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7665                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
7666
7667                         ret = btrfs_qgroup_record_ref(trans, root->fs_info,
7668                                                 root->objectid,
7669                                                 child_bytenr,
7670                                                 root->nodesize,
7671                                                 BTRFS_QGROUP_OPER_SUB_SUBTREE,
7672                                                 0);
7673                         if (ret)
7674                                 goto out;
7675
7676                 }
7677
7678                 if (level == 0) {
7679                         ret = account_leaf_items(trans, root, path->nodes[level]);
7680                         if (ret)
7681                                 goto out;
7682
7683                         /* Nonzero return here means we completed our search */
7684                         ret = adjust_slots_upwards(root, path, root_level);
7685                         if (ret)
7686                                 break;
7687
7688                         /* Restart search with new slots */
7689                         goto walk_down;
7690                 }
7691
7692                 level--;
7693         }
7694
7695         ret = 0;
7696 out:
7697         btrfs_free_path(path);
7698
7699         return ret;
7700 }
7701
7702 /*
7703  * helper to process tree block while walking down the tree.
7704  *
7705  * when wc->stage == UPDATE_BACKREF, this function updates
7706  * back refs for pointers in the block.
7707  *
7708  * NOTE: return value 1 means we should stop walking down.
7709  */
7710 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7711                                    struct btrfs_root *root,
7712                                    struct btrfs_path *path,
7713                                    struct walk_control *wc, int lookup_info)
7714 {
7715         int level = wc->level;
7716         struct extent_buffer *eb = path->nodes[level];
7717         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7718         int ret;
7719
7720         if (wc->stage == UPDATE_BACKREF &&
7721             btrfs_header_owner(eb) != root->root_key.objectid)
7722                 return 1;
7723
7724         /*
7725          * when reference count of tree block is 1, it won't increase
7726          * again. once full backref flag is set, we never clear it.
7727          */
7728         if (lookup_info &&
7729             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7730              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7731                 BUG_ON(!path->locks[level]);
7732                 ret = btrfs_lookup_extent_info(trans, root,
7733                                                eb->start, level, 1,
7734                                                &wc->refs[level],
7735                                                &wc->flags[level]);
7736                 BUG_ON(ret == -ENOMEM);
7737                 if (ret)
7738                         return ret;
7739                 BUG_ON(wc->refs[level] == 0);
7740         }
7741
7742         if (wc->stage == DROP_REFERENCE) {
7743                 if (wc->refs[level] > 1)
7744                         return 1;
7745
7746                 if (path->locks[level] && !wc->keep_locks) {
7747                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7748                         path->locks[level] = 0;
7749                 }
7750                 return 0;
7751         }
7752
7753         /* wc->stage == UPDATE_BACKREF */
7754         if (!(wc->flags[level] & flag)) {
7755                 BUG_ON(!path->locks[level]);
7756                 ret = btrfs_inc_ref(trans, root, eb, 1);
7757                 BUG_ON(ret); /* -ENOMEM */
7758                 ret = btrfs_dec_ref(trans, root, eb, 0);
7759                 BUG_ON(ret); /* -ENOMEM */
7760                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7761                                                   eb->len, flag,
7762                                                   btrfs_header_level(eb), 0);
7763                 BUG_ON(ret); /* -ENOMEM */
7764                 wc->flags[level] |= flag;
7765         }
7766
7767         /*
7768          * the block is shared by multiple trees, so it's not good to
7769          * keep the tree lock
7770          */
7771         if (path->locks[level] && level > 0) {
7772                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7773                 path->locks[level] = 0;
7774         }
7775         return 0;
7776 }
7777
7778 /*
7779  * helper to process tree block pointer.
7780  *
7781  * when wc->stage == DROP_REFERENCE, this function checks
7782  * reference count of the block pointed to. if the block
7783  * is shared and we need update back refs for the subtree
7784  * rooted at the block, this function changes wc->stage to
7785  * UPDATE_BACKREF. if the block is shared and there is no
7786  * need to update back, this function drops the reference
7787  * to the block.
7788  *
7789  * NOTE: return value 1 means we should stop walking down.
7790  */
7791 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7792                                  struct btrfs_root *root,
7793                                  struct btrfs_path *path,
7794                                  struct walk_control *wc, int *lookup_info)
7795 {
7796         u64 bytenr;
7797         u64 generation;
7798         u64 parent;
7799         u32 blocksize;
7800         struct btrfs_key key;
7801         struct extent_buffer *next;
7802         int level = wc->level;
7803         int reada = 0;
7804         int ret = 0;
7805         bool need_account = false;
7806
7807         generation = btrfs_node_ptr_generation(path->nodes[level],
7808                                                path->slots[level]);
7809         /*
7810          * if the lower level block was created before the snapshot
7811          * was created, we know there is no need to update back refs
7812          * for the subtree
7813          */
7814         if (wc->stage == UPDATE_BACKREF &&
7815             generation <= root->root_key.offset) {
7816                 *lookup_info = 1;
7817                 return 1;
7818         }
7819
7820         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7821         blocksize = root->nodesize;
7822
7823         next = btrfs_find_tree_block(root, bytenr);
7824         if (!next) {
7825                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7826                 if (!next)
7827                         return -ENOMEM;
7828                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7829                                                level - 1);
7830                 reada = 1;
7831         }
7832         btrfs_tree_lock(next);
7833         btrfs_set_lock_blocking(next);
7834
7835         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7836                                        &wc->refs[level - 1],
7837                                        &wc->flags[level - 1]);
7838         if (ret < 0) {
7839                 btrfs_tree_unlock(next);
7840                 return ret;
7841         }
7842
7843         if (unlikely(wc->refs[level - 1] == 0)) {
7844                 btrfs_err(root->fs_info, "Missing references.");
7845                 BUG();
7846         }
7847         *lookup_info = 0;
7848
7849         if (wc->stage == DROP_REFERENCE) {
7850                 if (wc->refs[level - 1] > 1) {
7851                         need_account = true;
7852                         if (level == 1 &&
7853                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7854                                 goto skip;
7855
7856                         if (!wc->update_ref ||
7857                             generation <= root->root_key.offset)
7858                                 goto skip;
7859
7860                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7861                                               path->slots[level]);
7862                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7863                         if (ret < 0)
7864                                 goto skip;
7865
7866                         wc->stage = UPDATE_BACKREF;
7867                         wc->shared_level = level - 1;
7868                 }
7869         } else {
7870                 if (level == 1 &&
7871                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7872                         goto skip;
7873         }
7874
7875         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7876                 btrfs_tree_unlock(next);
7877                 free_extent_buffer(next);
7878                 next = NULL;
7879                 *lookup_info = 1;
7880         }
7881
7882         if (!next) {
7883                 if (reada && level == 1)
7884                         reada_walk_down(trans, root, wc, path);
7885                 next = read_tree_block(root, bytenr, generation);
7886                 if (!next || !extent_buffer_uptodate(next)) {
7887                         free_extent_buffer(next);
7888                         return -EIO;
7889                 }
7890                 btrfs_tree_lock(next);
7891                 btrfs_set_lock_blocking(next);
7892         }
7893
7894         level--;
7895         BUG_ON(level != btrfs_header_level(next));
7896         path->nodes[level] = next;
7897         path->slots[level] = 0;
7898         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7899         wc->level = level;
7900         if (wc->level == 1)
7901                 wc->reada_slot = 0;
7902         return 0;
7903 skip:
7904         wc->refs[level - 1] = 0;
7905         wc->flags[level - 1] = 0;
7906         if (wc->stage == DROP_REFERENCE) {
7907                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7908                         parent = path->nodes[level]->start;
7909                 } else {
7910                         BUG_ON(root->root_key.objectid !=
7911                                btrfs_header_owner(path->nodes[level]));
7912                         parent = 0;
7913                 }
7914
7915                 if (need_account) {
7916                         ret = account_shared_subtree(trans, root, next,
7917                                                      generation, level - 1);
7918                         if (ret) {
7919                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
7920                                         "%d accounting shared subtree. Quota "
7921                                         "is out of sync, rescan required.\n",
7922                                         root->fs_info->sb->s_id, ret);
7923                         }
7924                 }
7925                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7926                                 root->root_key.objectid, level - 1, 0, 0);
7927                 BUG_ON(ret); /* -ENOMEM */
7928         }
7929         btrfs_tree_unlock(next);
7930         free_extent_buffer(next);
7931         *lookup_info = 1;
7932         return 1;
7933 }
7934
7935 /*
7936  * helper to process tree block while walking up the tree.
7937  *
7938  * when wc->stage == DROP_REFERENCE, this function drops
7939  * reference count on the block.
7940  *
7941  * when wc->stage == UPDATE_BACKREF, this function changes
7942  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7943  * to UPDATE_BACKREF previously while processing the block.
7944  *
7945  * NOTE: return value 1 means we should stop walking up.
7946  */
7947 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7948                                  struct btrfs_root *root,
7949                                  struct btrfs_path *path,
7950                                  struct walk_control *wc)
7951 {
7952         int ret;
7953         int level = wc->level;
7954         struct extent_buffer *eb = path->nodes[level];
7955         u64 parent = 0;
7956
7957         if (wc->stage == UPDATE_BACKREF) {
7958                 BUG_ON(wc->shared_level < level);
7959                 if (level < wc->shared_level)
7960                         goto out;
7961
7962                 ret = find_next_key(path, level + 1, &wc->update_progress);
7963                 if (ret > 0)
7964                         wc->update_ref = 0;
7965
7966                 wc->stage = DROP_REFERENCE;
7967                 wc->shared_level = -1;
7968                 path->slots[level] = 0;
7969
7970                 /*
7971                  * check reference count again if the block isn't locked.
7972                  * we should start walking down the tree again if reference
7973                  * count is one.
7974                  */
7975                 if (!path->locks[level]) {
7976                         BUG_ON(level == 0);
7977                         btrfs_tree_lock(eb);
7978                         btrfs_set_lock_blocking(eb);
7979                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7980
7981                         ret = btrfs_lookup_extent_info(trans, root,
7982                                                        eb->start, level, 1,
7983                                                        &wc->refs[level],
7984                                                        &wc->flags[level]);
7985                         if (ret < 0) {
7986                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7987                                 path->locks[level] = 0;
7988                                 return ret;
7989                         }
7990                         BUG_ON(wc->refs[level] == 0);
7991                         if (wc->refs[level] == 1) {
7992                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7993                                 path->locks[level] = 0;
7994                                 return 1;
7995                         }
7996                 }
7997         }
7998
7999         /* wc->stage == DROP_REFERENCE */
8000         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8001
8002         if (wc->refs[level] == 1) {
8003                 if (level == 0) {
8004                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8005                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8006                         else
8007                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8008                         BUG_ON(ret); /* -ENOMEM */
8009                         ret = account_leaf_items(trans, root, eb);
8010                         if (ret) {
8011                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8012                                         "%d accounting leaf items. Quota "
8013                                         "is out of sync, rescan required.\n",
8014                                         root->fs_info->sb->s_id, ret);
8015                         }
8016                 }
8017                 /* make block locked assertion in clean_tree_block happy */
8018                 if (!path->locks[level] &&
8019                     btrfs_header_generation(eb) == trans->transid) {
8020                         btrfs_tree_lock(eb);
8021                         btrfs_set_lock_blocking(eb);
8022                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8023                 }
8024                 clean_tree_block(trans, root, eb);
8025         }
8026
8027         if (eb == root->node) {
8028                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8029                         parent = eb->start;
8030                 else
8031                         BUG_ON(root->root_key.objectid !=
8032                                btrfs_header_owner(eb));
8033         } else {
8034                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8035                         parent = path->nodes[level + 1]->start;
8036                 else
8037                         BUG_ON(root->root_key.objectid !=
8038                                btrfs_header_owner(path->nodes[level + 1]));
8039         }
8040
8041         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8042 out:
8043         wc->refs[level] = 0;
8044         wc->flags[level] = 0;
8045         return 0;
8046 }
8047
8048 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8049                                    struct btrfs_root *root,
8050                                    struct btrfs_path *path,
8051                                    struct walk_control *wc)
8052 {
8053         int level = wc->level;
8054         int lookup_info = 1;
8055         int ret;
8056
8057         while (level >= 0) {
8058                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8059                 if (ret > 0)
8060                         break;
8061
8062                 if (level == 0)
8063                         break;
8064
8065                 if (path->slots[level] >=
8066                     btrfs_header_nritems(path->nodes[level]))
8067                         break;
8068
8069                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8070                 if (ret > 0) {
8071                         path->slots[level]++;
8072                         continue;
8073                 } else if (ret < 0)
8074                         return ret;
8075                 level = wc->level;
8076         }
8077         return 0;
8078 }
8079
8080 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8081                                  struct btrfs_root *root,
8082                                  struct btrfs_path *path,
8083                                  struct walk_control *wc, int max_level)
8084 {
8085         int level = wc->level;
8086         int ret;
8087
8088         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8089         while (level < max_level && path->nodes[level]) {
8090                 wc->level = level;
8091                 if (path->slots[level] + 1 <
8092                     btrfs_header_nritems(path->nodes[level])) {
8093                         path->slots[level]++;
8094                         return 0;
8095                 } else {
8096                         ret = walk_up_proc(trans, root, path, wc);
8097                         if (ret > 0)
8098                                 return 0;
8099
8100                         if (path->locks[level]) {
8101                                 btrfs_tree_unlock_rw(path->nodes[level],
8102                                                      path->locks[level]);
8103                                 path->locks[level] = 0;
8104                         }
8105                         free_extent_buffer(path->nodes[level]);
8106                         path->nodes[level] = NULL;
8107                         level++;
8108                 }
8109         }
8110         return 1;
8111 }
8112
8113 /*
8114  * drop a subvolume tree.
8115  *
8116  * this function traverses the tree freeing any blocks that only
8117  * referenced by the tree.
8118  *
8119  * when a shared tree block is found. this function decreases its
8120  * reference count by one. if update_ref is true, this function
8121  * also make sure backrefs for the shared block and all lower level
8122  * blocks are properly updated.
8123  *
8124  * If called with for_reloc == 0, may exit early with -EAGAIN
8125  */
8126 int btrfs_drop_snapshot(struct btrfs_root *root,
8127                          struct btrfs_block_rsv *block_rsv, int update_ref,
8128                          int for_reloc)
8129 {
8130         struct btrfs_path *path;
8131         struct btrfs_trans_handle *trans;
8132         struct btrfs_root *tree_root = root->fs_info->tree_root;
8133         struct btrfs_root_item *root_item = &root->root_item;
8134         struct walk_control *wc;
8135         struct btrfs_key key;
8136         int err = 0;
8137         int ret;
8138         int level;
8139         bool root_dropped = false;
8140
8141         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8142
8143         path = btrfs_alloc_path();
8144         if (!path) {
8145                 err = -ENOMEM;
8146                 goto out;
8147         }
8148
8149         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8150         if (!wc) {
8151                 btrfs_free_path(path);
8152                 err = -ENOMEM;
8153                 goto out;
8154         }
8155
8156         trans = btrfs_start_transaction(tree_root, 0);
8157         if (IS_ERR(trans)) {
8158                 err = PTR_ERR(trans);
8159                 goto out_free;
8160         }
8161
8162         if (block_rsv)
8163                 trans->block_rsv = block_rsv;
8164
8165         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8166                 level = btrfs_header_level(root->node);
8167                 path->nodes[level] = btrfs_lock_root_node(root);
8168                 btrfs_set_lock_blocking(path->nodes[level]);
8169                 path->slots[level] = 0;
8170                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8171                 memset(&wc->update_progress, 0,
8172                        sizeof(wc->update_progress));
8173         } else {
8174                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8175                 memcpy(&wc->update_progress, &key,
8176                        sizeof(wc->update_progress));
8177
8178                 level = root_item->drop_level;
8179                 BUG_ON(level == 0);
8180                 path->lowest_level = level;
8181                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8182                 path->lowest_level = 0;
8183                 if (ret < 0) {
8184                         err = ret;
8185                         goto out_end_trans;
8186                 }
8187                 WARN_ON(ret > 0);
8188
8189                 /*
8190                  * unlock our path, this is safe because only this
8191                  * function is allowed to delete this snapshot
8192                  */
8193                 btrfs_unlock_up_safe(path, 0);
8194
8195                 level = btrfs_header_level(root->node);
8196                 while (1) {
8197                         btrfs_tree_lock(path->nodes[level]);
8198                         btrfs_set_lock_blocking(path->nodes[level]);
8199                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8200
8201                         ret = btrfs_lookup_extent_info(trans, root,
8202                                                 path->nodes[level]->start,
8203                                                 level, 1, &wc->refs[level],
8204                                                 &wc->flags[level]);
8205                         if (ret < 0) {
8206                                 err = ret;
8207                                 goto out_end_trans;
8208                         }
8209                         BUG_ON(wc->refs[level] == 0);
8210
8211                         if (level == root_item->drop_level)
8212                                 break;
8213
8214                         btrfs_tree_unlock(path->nodes[level]);
8215                         path->locks[level] = 0;
8216                         WARN_ON(wc->refs[level] != 1);
8217                         level--;
8218                 }
8219         }
8220
8221         wc->level = level;
8222         wc->shared_level = -1;
8223         wc->stage = DROP_REFERENCE;
8224         wc->update_ref = update_ref;
8225         wc->keep_locks = 0;
8226         wc->for_reloc = for_reloc;
8227         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8228
8229         while (1) {
8230
8231                 ret = walk_down_tree(trans, root, path, wc);
8232                 if (ret < 0) {
8233                         err = ret;
8234                         break;
8235                 }
8236
8237                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8238                 if (ret < 0) {
8239                         err = ret;
8240                         break;
8241                 }
8242
8243                 if (ret > 0) {
8244                         BUG_ON(wc->stage != DROP_REFERENCE);
8245                         break;
8246                 }
8247
8248                 if (wc->stage == DROP_REFERENCE) {
8249                         level = wc->level;
8250                         btrfs_node_key(path->nodes[level],
8251                                        &root_item->drop_progress,
8252                                        path->slots[level]);
8253                         root_item->drop_level = level;
8254                 }
8255
8256                 BUG_ON(wc->level == 0);
8257                 if (btrfs_should_end_transaction(trans, tree_root) ||
8258                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8259                         ret = btrfs_update_root(trans, tree_root,
8260                                                 &root->root_key,
8261                                                 root_item);
8262                         if (ret) {
8263                                 btrfs_abort_transaction(trans, tree_root, ret);
8264                                 err = ret;
8265                                 goto out_end_trans;
8266                         }
8267
8268                         /*
8269                          * Qgroup update accounting is run from
8270                          * delayed ref handling. This usually works
8271                          * out because delayed refs are normally the
8272                          * only way qgroup updates are added. However,
8273                          * we may have added updates during our tree
8274                          * walk so run qgroups here to make sure we
8275                          * don't lose any updates.
8276                          */
8277                         ret = btrfs_delayed_qgroup_accounting(trans,
8278                                                               root->fs_info);
8279                         if (ret)
8280                                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8281                                                    "running qgroup updates "
8282                                                    "during snapshot delete. "
8283                                                    "Quota is out of sync, "
8284                                                    "rescan required.\n", ret);
8285
8286                         btrfs_end_transaction_throttle(trans, tree_root);
8287                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8288                                 pr_debug("BTRFS: drop snapshot early exit\n");
8289                                 err = -EAGAIN;
8290                                 goto out_free;
8291                         }
8292
8293                         trans = btrfs_start_transaction(tree_root, 0);
8294                         if (IS_ERR(trans)) {
8295                                 err = PTR_ERR(trans);
8296                                 goto out_free;
8297                         }
8298                         if (block_rsv)
8299                                 trans->block_rsv = block_rsv;
8300                 }
8301         }
8302         btrfs_release_path(path);
8303         if (err)
8304                 goto out_end_trans;
8305
8306         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8307         if (ret) {
8308                 btrfs_abort_transaction(trans, tree_root, ret);
8309                 goto out_end_trans;
8310         }
8311
8312         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8313                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8314                                       NULL, NULL);
8315                 if (ret < 0) {
8316                         btrfs_abort_transaction(trans, tree_root, ret);
8317                         err = ret;
8318                         goto out_end_trans;
8319                 } else if (ret > 0) {
8320                         /* if we fail to delete the orphan item this time
8321                          * around, it'll get picked up the next time.
8322                          *
8323                          * The most common failure here is just -ENOENT.
8324                          */
8325                         btrfs_del_orphan_item(trans, tree_root,
8326                                               root->root_key.objectid);
8327                 }
8328         }
8329
8330         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8331                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
8332         } else {
8333                 free_extent_buffer(root->node);
8334                 free_extent_buffer(root->commit_root);
8335                 btrfs_put_fs_root(root);
8336         }
8337         root_dropped = true;
8338 out_end_trans:
8339         ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
8340         if (ret)
8341                 printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
8342                                    "running qgroup updates "
8343                                    "during snapshot delete. "
8344                                    "Quota is out of sync, "
8345                                    "rescan required.\n", ret);
8346
8347         btrfs_end_transaction_throttle(trans, tree_root);
8348 out_free:
8349         kfree(wc);
8350         btrfs_free_path(path);
8351 out:
8352         /*
8353          * So if we need to stop dropping the snapshot for whatever reason we
8354          * need to make sure to add it back to the dead root list so that we
8355          * keep trying to do the work later.  This also cleans up roots if we
8356          * don't have it in the radix (like when we recover after a power fail
8357          * or unmount) so we don't leak memory.
8358          */
8359         if (!for_reloc && root_dropped == false)
8360                 btrfs_add_dead_root(root);
8361         if (err && err != -EAGAIN)
8362                 btrfs_std_error(root->fs_info, err);
8363         return err;
8364 }
8365
8366 /*
8367  * drop subtree rooted at tree block 'node'.
8368  *
8369  * NOTE: this function will unlock and release tree block 'node'
8370  * only used by relocation code
8371  */
8372 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8373                         struct btrfs_root *root,
8374                         struct extent_buffer *node,
8375                         struct extent_buffer *parent)
8376 {
8377         struct btrfs_path *path;
8378         struct walk_control *wc;
8379         int level;
8380         int parent_level;
8381         int ret = 0;
8382         int wret;
8383
8384         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8385
8386         path = btrfs_alloc_path();
8387         if (!path)
8388                 return -ENOMEM;
8389
8390         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8391         if (!wc) {
8392                 btrfs_free_path(path);
8393                 return -ENOMEM;
8394         }
8395
8396         btrfs_assert_tree_locked(parent);
8397         parent_level = btrfs_header_level(parent);
8398         extent_buffer_get(parent);
8399         path->nodes[parent_level] = parent;
8400         path->slots[parent_level] = btrfs_header_nritems(parent);
8401
8402         btrfs_assert_tree_locked(node);
8403         level = btrfs_header_level(node);
8404         path->nodes[level] = node;
8405         path->slots[level] = 0;
8406         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8407
8408         wc->refs[parent_level] = 1;
8409         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8410         wc->level = level;
8411         wc->shared_level = -1;
8412         wc->stage = DROP_REFERENCE;
8413         wc->update_ref = 0;
8414         wc->keep_locks = 1;
8415         wc->for_reloc = 1;
8416         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8417
8418         while (1) {
8419                 wret = walk_down_tree(trans, root, path, wc);
8420                 if (wret < 0) {
8421                         ret = wret;
8422                         break;
8423                 }
8424
8425                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8426                 if (wret < 0)
8427                         ret = wret;
8428                 if (wret != 0)
8429                         break;
8430         }
8431
8432         kfree(wc);
8433         btrfs_free_path(path);
8434         return ret;
8435 }
8436
8437 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8438 {
8439         u64 num_devices;
8440         u64 stripped;
8441
8442         /*
8443          * if restripe for this chunk_type is on pick target profile and
8444          * return, otherwise do the usual balance
8445          */
8446         stripped = get_restripe_target(root->fs_info, flags);
8447         if (stripped)
8448                 return extended_to_chunk(stripped);
8449
8450         num_devices = root->fs_info->fs_devices->rw_devices;
8451
8452         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8453                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8454                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8455
8456         if (num_devices == 1) {
8457                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8458                 stripped = flags & ~stripped;
8459
8460                 /* turn raid0 into single device chunks */
8461                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8462                         return stripped;
8463
8464                 /* turn mirroring into duplication */
8465                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8466                              BTRFS_BLOCK_GROUP_RAID10))
8467                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8468         } else {
8469                 /* they already had raid on here, just return */
8470                 if (flags & stripped)
8471                         return flags;
8472
8473                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8474                 stripped = flags & ~stripped;
8475
8476                 /* switch duplicated blocks with raid1 */
8477                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8478                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8479
8480                 /* this is drive concat, leave it alone */
8481         }
8482
8483         return flags;
8484 }
8485
8486 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8487 {
8488         struct btrfs_space_info *sinfo = cache->space_info;
8489         u64 num_bytes;
8490         u64 min_allocable_bytes;
8491         int ret = -ENOSPC;
8492
8493
8494         /*
8495          * We need some metadata space and system metadata space for
8496          * allocating chunks in some corner cases until we force to set
8497          * it to be readonly.
8498          */
8499         if ((sinfo->flags &
8500              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8501             !force)
8502                 min_allocable_bytes = 1 * 1024 * 1024;
8503         else
8504                 min_allocable_bytes = 0;
8505
8506         spin_lock(&sinfo->lock);
8507         spin_lock(&cache->lock);
8508
8509         if (cache->ro) {
8510                 ret = 0;
8511                 goto out;
8512         }
8513
8514         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8515                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8516
8517         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8518             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8519             min_allocable_bytes <= sinfo->total_bytes) {
8520                 sinfo->bytes_readonly += num_bytes;
8521                 cache->ro = 1;
8522                 ret = 0;
8523         }
8524 out:
8525         spin_unlock(&cache->lock);
8526         spin_unlock(&sinfo->lock);
8527         return ret;
8528 }
8529
8530 int btrfs_set_block_group_ro(struct btrfs_root *root,
8531                              struct btrfs_block_group_cache *cache)
8532
8533 {
8534         struct btrfs_trans_handle *trans;
8535         u64 alloc_flags;
8536         int ret;
8537
8538         BUG_ON(cache->ro);
8539
8540         trans = btrfs_join_transaction(root);
8541         if (IS_ERR(trans))
8542                 return PTR_ERR(trans);
8543
8544         alloc_flags = update_block_group_flags(root, cache->flags);
8545         if (alloc_flags != cache->flags) {
8546                 ret = do_chunk_alloc(trans, root, alloc_flags,
8547                                      CHUNK_ALLOC_FORCE);
8548                 if (ret < 0)
8549                         goto out;
8550         }
8551
8552         ret = set_block_group_ro(cache, 0);
8553         if (!ret)
8554                 goto out;
8555         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8556         ret = do_chunk_alloc(trans, root, alloc_flags,
8557                              CHUNK_ALLOC_FORCE);
8558         if (ret < 0)
8559                 goto out;
8560         ret = set_block_group_ro(cache, 0);
8561 out:
8562         btrfs_end_transaction(trans, root);
8563         return ret;
8564 }
8565
8566 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8567                             struct btrfs_root *root, u64 type)
8568 {
8569         u64 alloc_flags = get_alloc_profile(root, type);
8570         return do_chunk_alloc(trans, root, alloc_flags,
8571                               CHUNK_ALLOC_FORCE);
8572 }
8573
8574 /*
8575  * helper to account the unused space of all the readonly block group in the
8576  * list. takes mirrors into account.
8577  */
8578 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8579 {
8580         struct btrfs_block_group_cache *block_group;
8581         u64 free_bytes = 0;
8582         int factor;
8583
8584         list_for_each_entry(block_group, groups_list, list) {
8585                 spin_lock(&block_group->lock);
8586
8587                 if (!block_group->ro) {
8588                         spin_unlock(&block_group->lock);
8589                         continue;
8590                 }
8591
8592                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8593                                           BTRFS_BLOCK_GROUP_RAID10 |
8594                                           BTRFS_BLOCK_GROUP_DUP))
8595                         factor = 2;
8596                 else
8597                         factor = 1;
8598
8599                 free_bytes += (block_group->key.offset -
8600                                btrfs_block_group_used(&block_group->item)) *
8601                                factor;
8602
8603                 spin_unlock(&block_group->lock);
8604         }
8605
8606         return free_bytes;
8607 }
8608
8609 /*
8610  * helper to account the unused space of all the readonly block group in the
8611  * space_info. takes mirrors into account.
8612  */
8613 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8614 {
8615         int i;
8616         u64 free_bytes = 0;
8617
8618         spin_lock(&sinfo->lock);
8619
8620         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8621                 if (!list_empty(&sinfo->block_groups[i]))
8622                         free_bytes += __btrfs_get_ro_block_group_free_space(
8623                                                 &sinfo->block_groups[i]);
8624
8625         spin_unlock(&sinfo->lock);
8626
8627         return free_bytes;
8628 }
8629
8630 void btrfs_set_block_group_rw(struct btrfs_root *root,
8631                               struct btrfs_block_group_cache *cache)
8632 {
8633         struct btrfs_space_info *sinfo = cache->space_info;
8634         u64 num_bytes;
8635
8636         BUG_ON(!cache->ro);
8637
8638         spin_lock(&sinfo->lock);
8639         spin_lock(&cache->lock);
8640         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8641                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8642         sinfo->bytes_readonly -= num_bytes;
8643         cache->ro = 0;
8644         spin_unlock(&cache->lock);
8645         spin_unlock(&sinfo->lock);
8646 }
8647
8648 /*
8649  * checks to see if its even possible to relocate this block group.
8650  *
8651  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8652  * ok to go ahead and try.
8653  */
8654 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8655 {
8656         struct btrfs_block_group_cache *block_group;
8657         struct btrfs_space_info *space_info;
8658         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8659         struct btrfs_device *device;
8660         struct btrfs_trans_handle *trans;
8661         u64 min_free;
8662         u64 dev_min = 1;
8663         u64 dev_nr = 0;
8664         u64 target;
8665         int index;
8666         int full = 0;
8667         int ret = 0;
8668
8669         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8670
8671         /* odd, couldn't find the block group, leave it alone */
8672         if (!block_group)
8673                 return -1;
8674
8675         min_free = btrfs_block_group_used(&block_group->item);
8676
8677         /* no bytes used, we're good */
8678         if (!min_free)
8679                 goto out;
8680
8681         space_info = block_group->space_info;
8682         spin_lock(&space_info->lock);
8683
8684         full = space_info->full;
8685
8686         /*
8687          * if this is the last block group we have in this space, we can't
8688          * relocate it unless we're able to allocate a new chunk below.
8689          *
8690          * Otherwise, we need to make sure we have room in the space to handle
8691          * all of the extents from this block group.  If we can, we're good
8692          */
8693         if ((space_info->total_bytes != block_group->key.offset) &&
8694             (space_info->bytes_used + space_info->bytes_reserved +
8695              space_info->bytes_pinned + space_info->bytes_readonly +
8696              min_free < space_info->total_bytes)) {
8697                 spin_unlock(&space_info->lock);
8698                 goto out;
8699         }
8700         spin_unlock(&space_info->lock);
8701
8702         /*
8703          * ok we don't have enough space, but maybe we have free space on our
8704          * devices to allocate new chunks for relocation, so loop through our
8705          * alloc devices and guess if we have enough space.  if this block
8706          * group is going to be restriped, run checks against the target
8707          * profile instead of the current one.
8708          */
8709         ret = -1;
8710
8711         /*
8712          * index:
8713          *      0: raid10
8714          *      1: raid1
8715          *      2: dup
8716          *      3: raid0
8717          *      4: single
8718          */
8719         target = get_restripe_target(root->fs_info, block_group->flags);
8720         if (target) {
8721                 index = __get_raid_index(extended_to_chunk(target));
8722         } else {
8723                 /*
8724                  * this is just a balance, so if we were marked as full
8725                  * we know there is no space for a new chunk
8726                  */
8727                 if (full)
8728                         goto out;
8729
8730                 index = get_block_group_index(block_group);
8731         }
8732
8733         if (index == BTRFS_RAID_RAID10) {
8734                 dev_min = 4;
8735                 /* Divide by 2 */
8736                 min_free >>= 1;
8737         } else if (index == BTRFS_RAID_RAID1) {
8738                 dev_min = 2;
8739         } else if (index == BTRFS_RAID_DUP) {
8740                 /* Multiply by 2 */
8741                 min_free <<= 1;
8742         } else if (index == BTRFS_RAID_RAID0) {
8743                 dev_min = fs_devices->rw_devices;
8744                 do_div(min_free, dev_min);
8745         }
8746
8747         /* We need to do this so that we can look at pending chunks */
8748         trans = btrfs_join_transaction(root);
8749         if (IS_ERR(trans)) {
8750                 ret = PTR_ERR(trans);
8751                 goto out;
8752         }
8753
8754         mutex_lock(&root->fs_info->chunk_mutex);
8755         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8756                 u64 dev_offset;
8757
8758                 /*
8759                  * check to make sure we can actually find a chunk with enough
8760                  * space to fit our block group in.
8761                  */
8762                 if (device->total_bytes > device->bytes_used + min_free &&
8763                     !device->is_tgtdev_for_dev_replace) {
8764                         ret = find_free_dev_extent(trans, device, min_free,
8765                                                    &dev_offset, NULL);
8766                         if (!ret)
8767                                 dev_nr++;
8768
8769                         if (dev_nr >= dev_min)
8770                                 break;
8771
8772                         ret = -1;
8773                 }
8774         }
8775         mutex_unlock(&root->fs_info->chunk_mutex);
8776         btrfs_end_transaction(trans, root);
8777 out:
8778         btrfs_put_block_group(block_group);
8779         return ret;
8780 }
8781
8782 static int find_first_block_group(struct btrfs_root *root,
8783                 struct btrfs_path *path, struct btrfs_key *key)
8784 {
8785         int ret = 0;
8786         struct btrfs_key found_key;
8787         struct extent_buffer *leaf;
8788         int slot;
8789
8790         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8791         if (ret < 0)
8792                 goto out;
8793
8794         while (1) {
8795                 slot = path->slots[0];
8796                 leaf = path->nodes[0];
8797                 if (slot >= btrfs_header_nritems(leaf)) {
8798                         ret = btrfs_next_leaf(root, path);
8799                         if (ret == 0)
8800                                 continue;
8801                         if (ret < 0)
8802                                 goto out;
8803                         break;
8804                 }
8805                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8806
8807                 if (found_key.objectid >= key->objectid &&
8808                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8809                         ret = 0;
8810                         goto out;
8811                 }
8812                 path->slots[0]++;
8813         }
8814 out:
8815         return ret;
8816 }
8817
8818 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8819 {
8820         struct btrfs_block_group_cache *block_group;
8821         u64 last = 0;
8822
8823         while (1) {
8824                 struct inode *inode;
8825
8826                 block_group = btrfs_lookup_first_block_group(info, last);
8827                 while (block_group) {
8828                         spin_lock(&block_group->lock);
8829                         if (block_group->iref)
8830                                 break;
8831                         spin_unlock(&block_group->lock);
8832                         block_group = next_block_group(info->tree_root,
8833                                                        block_group);
8834                 }
8835                 if (!block_group) {
8836                         if (last == 0)
8837                                 break;
8838                         last = 0;
8839                         continue;
8840                 }
8841
8842                 inode = block_group->inode;
8843                 block_group->iref = 0;
8844                 block_group->inode = NULL;
8845                 spin_unlock(&block_group->lock);
8846                 iput(inode);
8847                 last = block_group->key.objectid + block_group->key.offset;
8848                 btrfs_put_block_group(block_group);
8849         }
8850 }
8851
8852 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8853 {
8854         struct btrfs_block_group_cache *block_group;
8855         struct btrfs_space_info *space_info;
8856         struct btrfs_caching_control *caching_ctl;
8857         struct rb_node *n;
8858
8859         down_write(&info->commit_root_sem);
8860         while (!list_empty(&info->caching_block_groups)) {
8861                 caching_ctl = list_entry(info->caching_block_groups.next,
8862                                          struct btrfs_caching_control, list);
8863                 list_del(&caching_ctl->list);
8864                 put_caching_control(caching_ctl);
8865         }
8866         up_write(&info->commit_root_sem);
8867
8868         spin_lock(&info->unused_bgs_lock);
8869         while (!list_empty(&info->unused_bgs)) {
8870                 block_group = list_first_entry(&info->unused_bgs,
8871                                                struct btrfs_block_group_cache,
8872                                                bg_list);
8873                 list_del_init(&block_group->bg_list);
8874                 btrfs_put_block_group(block_group);
8875         }
8876         spin_unlock(&info->unused_bgs_lock);
8877
8878         spin_lock(&info->block_group_cache_lock);
8879         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8880                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8881                                        cache_node);
8882                 rb_erase(&block_group->cache_node,
8883                          &info->block_group_cache_tree);
8884                 spin_unlock(&info->block_group_cache_lock);
8885
8886                 down_write(&block_group->space_info->groups_sem);
8887                 list_del(&block_group->list);
8888                 up_write(&block_group->space_info->groups_sem);
8889
8890                 if (block_group->cached == BTRFS_CACHE_STARTED)
8891                         wait_block_group_cache_done(block_group);
8892
8893                 /*
8894                  * We haven't cached this block group, which means we could
8895                  * possibly have excluded extents on this block group.
8896                  */
8897                 if (block_group->cached == BTRFS_CACHE_NO ||
8898                     block_group->cached == BTRFS_CACHE_ERROR)
8899                         free_excluded_extents(info->extent_root, block_group);
8900
8901                 btrfs_remove_free_space_cache(block_group);
8902                 btrfs_put_block_group(block_group);
8903
8904                 spin_lock(&info->block_group_cache_lock);
8905         }
8906         spin_unlock(&info->block_group_cache_lock);
8907
8908         /* now that all the block groups are freed, go through and
8909          * free all the space_info structs.  This is only called during
8910          * the final stages of unmount, and so we know nobody is
8911          * using them.  We call synchronize_rcu() once before we start,
8912          * just to be on the safe side.
8913          */
8914         synchronize_rcu();
8915
8916         release_global_block_rsv(info);
8917
8918         while (!list_empty(&info->space_info)) {
8919                 int i;
8920
8921                 space_info = list_entry(info->space_info.next,
8922                                         struct btrfs_space_info,
8923                                         list);
8924                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8925                         if (WARN_ON(space_info->bytes_pinned > 0 ||
8926                             space_info->bytes_reserved > 0 ||
8927                             space_info->bytes_may_use > 0)) {
8928                                 dump_space_info(space_info, 0, 0);
8929                         }
8930                 }
8931                 list_del(&space_info->list);
8932                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
8933                         struct kobject *kobj;
8934                         kobj = space_info->block_group_kobjs[i];
8935                         space_info->block_group_kobjs[i] = NULL;
8936                         if (kobj) {
8937                                 kobject_del(kobj);
8938                                 kobject_put(kobj);
8939                         }
8940                 }
8941                 kobject_del(&space_info->kobj);
8942                 kobject_put(&space_info->kobj);
8943         }
8944         return 0;
8945 }
8946
8947 static void __link_block_group(struct btrfs_space_info *space_info,
8948                                struct btrfs_block_group_cache *cache)
8949 {
8950         int index = get_block_group_index(cache);
8951         bool first = false;
8952
8953         down_write(&space_info->groups_sem);
8954         if (list_empty(&space_info->block_groups[index]))
8955                 first = true;
8956         list_add_tail(&cache->list, &space_info->block_groups[index]);
8957         up_write(&space_info->groups_sem);
8958
8959         if (first) {
8960                 struct raid_kobject *rkobj;
8961                 int ret;
8962
8963                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
8964                 if (!rkobj)
8965                         goto out_err;
8966                 rkobj->raid_type = index;
8967                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
8968                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
8969                                   "%s", get_raid_name(index));
8970                 if (ret) {
8971                         kobject_put(&rkobj->kobj);
8972                         goto out_err;
8973                 }
8974                 space_info->block_group_kobjs[index] = &rkobj->kobj;
8975         }
8976
8977         return;
8978 out_err:
8979         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
8980 }
8981
8982 static struct btrfs_block_group_cache *
8983 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8984 {
8985         struct btrfs_block_group_cache *cache;
8986
8987         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8988         if (!cache)
8989                 return NULL;
8990
8991         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8992                                         GFP_NOFS);
8993         if (!cache->free_space_ctl) {
8994                 kfree(cache);
8995                 return NULL;
8996         }
8997
8998         cache->key.objectid = start;
8999         cache->key.offset = size;
9000         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9001
9002         cache->sectorsize = root->sectorsize;
9003         cache->fs_info = root->fs_info;
9004         cache->full_stripe_len = btrfs_full_stripe_len(root,
9005                                                &root->fs_info->mapping_tree,
9006                                                start);
9007         atomic_set(&cache->count, 1);
9008         spin_lock_init(&cache->lock);
9009         init_rwsem(&cache->data_rwsem);
9010         INIT_LIST_HEAD(&cache->list);
9011         INIT_LIST_HEAD(&cache->cluster_list);
9012         INIT_LIST_HEAD(&cache->bg_list);
9013         btrfs_init_free_space_ctl(cache);
9014
9015         return cache;
9016 }
9017
9018 int btrfs_read_block_groups(struct btrfs_root *root)
9019 {
9020         struct btrfs_path *path;
9021         int ret;
9022         struct btrfs_block_group_cache *cache;
9023         struct btrfs_fs_info *info = root->fs_info;
9024         struct btrfs_space_info *space_info;
9025         struct btrfs_key key;
9026         struct btrfs_key found_key;
9027         struct extent_buffer *leaf;
9028         int need_clear = 0;
9029         u64 cache_gen;
9030
9031         root = info->extent_root;
9032         key.objectid = 0;
9033         key.offset = 0;
9034         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9035         path = btrfs_alloc_path();
9036         if (!path)
9037                 return -ENOMEM;
9038         path->reada = 1;
9039
9040         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9041         if (btrfs_test_opt(root, SPACE_CACHE) &&
9042             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9043                 need_clear = 1;
9044         if (btrfs_test_opt(root, CLEAR_CACHE))
9045                 need_clear = 1;
9046
9047         while (1) {
9048                 ret = find_first_block_group(root, path, &key);
9049                 if (ret > 0)
9050                         break;
9051                 if (ret != 0)
9052                         goto error;
9053
9054                 leaf = path->nodes[0];
9055                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9056
9057                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9058                                                        found_key.offset);
9059                 if (!cache) {
9060                         ret = -ENOMEM;
9061                         goto error;
9062                 }
9063
9064                 if (need_clear) {
9065                         /*
9066                          * When we mount with old space cache, we need to
9067                          * set BTRFS_DC_CLEAR and set dirty flag.
9068                          *
9069                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9070                          *    truncate the old free space cache inode and
9071                          *    setup a new one.
9072                          * b) Setting 'dirty flag' makes sure that we flush
9073                          *    the new space cache info onto disk.
9074                          */
9075                         cache->disk_cache_state = BTRFS_DC_CLEAR;
9076                         if (btrfs_test_opt(root, SPACE_CACHE))
9077                                 cache->dirty = 1;
9078                 }
9079
9080                 read_extent_buffer(leaf, &cache->item,
9081                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9082                                    sizeof(cache->item));
9083                 cache->flags = btrfs_block_group_flags(&cache->item);
9084
9085                 key.objectid = found_key.objectid + found_key.offset;
9086                 btrfs_release_path(path);
9087
9088                 /*
9089                  * We need to exclude the super stripes now so that the space
9090                  * info has super bytes accounted for, otherwise we'll think
9091                  * we have more space than we actually do.
9092                  */
9093                 ret = exclude_super_stripes(root, cache);
9094                 if (ret) {
9095                         /*
9096                          * We may have excluded something, so call this just in
9097                          * case.
9098                          */
9099                         free_excluded_extents(root, cache);
9100                         btrfs_put_block_group(cache);
9101                         goto error;
9102                 }
9103
9104                 /*
9105                  * check for two cases, either we are full, and therefore
9106                  * don't need to bother with the caching work since we won't
9107                  * find any space, or we are empty, and we can just add all
9108                  * the space in and be done with it.  This saves us _alot_ of
9109                  * time, particularly in the full case.
9110                  */
9111                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9112                         cache->last_byte_to_unpin = (u64)-1;
9113                         cache->cached = BTRFS_CACHE_FINISHED;
9114                         free_excluded_extents(root, cache);
9115                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9116                         cache->last_byte_to_unpin = (u64)-1;
9117                         cache->cached = BTRFS_CACHE_FINISHED;
9118                         add_new_free_space(cache, root->fs_info,
9119                                            found_key.objectid,
9120                                            found_key.objectid +
9121                                            found_key.offset);
9122                         free_excluded_extents(root, cache);
9123                 }
9124
9125                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9126                 if (ret) {
9127                         btrfs_remove_free_space_cache(cache);
9128                         btrfs_put_block_group(cache);
9129                         goto error;
9130                 }
9131
9132                 ret = update_space_info(info, cache->flags, found_key.offset,
9133                                         btrfs_block_group_used(&cache->item),
9134                                         &space_info);
9135                 if (ret) {
9136                         btrfs_remove_free_space_cache(cache);
9137                         spin_lock(&info->block_group_cache_lock);
9138                         rb_erase(&cache->cache_node,
9139                                  &info->block_group_cache_tree);
9140                         spin_unlock(&info->block_group_cache_lock);
9141                         btrfs_put_block_group(cache);
9142                         goto error;
9143                 }
9144
9145                 cache->space_info = space_info;
9146                 spin_lock(&cache->space_info->lock);
9147                 cache->space_info->bytes_readonly += cache->bytes_super;
9148                 spin_unlock(&cache->space_info->lock);
9149
9150                 __link_block_group(space_info, cache);
9151
9152                 set_avail_alloc_bits(root->fs_info, cache->flags);
9153                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9154                         set_block_group_ro(cache, 1);
9155                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9156                         spin_lock(&info->unused_bgs_lock);
9157                         /* Should always be true but just in case. */
9158                         if (list_empty(&cache->bg_list)) {
9159                                 btrfs_get_block_group(cache);
9160                                 list_add_tail(&cache->bg_list,
9161                                               &info->unused_bgs);
9162                         }
9163                         spin_unlock(&info->unused_bgs_lock);
9164                 }
9165         }
9166
9167         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9168                 if (!(get_alloc_profile(root, space_info->flags) &
9169                       (BTRFS_BLOCK_GROUP_RAID10 |
9170                        BTRFS_BLOCK_GROUP_RAID1 |
9171                        BTRFS_BLOCK_GROUP_RAID5 |
9172                        BTRFS_BLOCK_GROUP_RAID6 |
9173                        BTRFS_BLOCK_GROUP_DUP)))
9174                         continue;
9175                 /*
9176                  * avoid allocating from un-mirrored block group if there are
9177                  * mirrored block groups.
9178                  */
9179                 list_for_each_entry(cache,
9180                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9181                                 list)
9182                         set_block_group_ro(cache, 1);
9183                 list_for_each_entry(cache,
9184                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9185                                 list)
9186                         set_block_group_ro(cache, 1);
9187         }
9188
9189         init_global_block_rsv(info);
9190         ret = 0;
9191 error:
9192         btrfs_free_path(path);
9193         return ret;
9194 }
9195
9196 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9197                                        struct btrfs_root *root)
9198 {
9199         struct btrfs_block_group_cache *block_group, *tmp;
9200         struct btrfs_root *extent_root = root->fs_info->extent_root;
9201         struct btrfs_block_group_item item;
9202         struct btrfs_key key;
9203         int ret = 0;
9204
9205         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9206                 list_del_init(&block_group->bg_list);
9207                 if (ret)
9208                         continue;
9209
9210                 spin_lock(&block_group->lock);
9211                 memcpy(&item, &block_group->item, sizeof(item));
9212                 memcpy(&key, &block_group->key, sizeof(key));
9213                 spin_unlock(&block_group->lock);
9214
9215                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9216                                         sizeof(item));
9217                 if (ret)
9218                         btrfs_abort_transaction(trans, extent_root, ret);
9219                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9220                                                key.objectid, key.offset);
9221                 if (ret)
9222                         btrfs_abort_transaction(trans, extent_root, ret);
9223         }
9224 }
9225
9226 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9227                            struct btrfs_root *root, u64 bytes_used,
9228                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9229                            u64 size)
9230 {
9231         int ret;
9232         struct btrfs_root *extent_root;
9233         struct btrfs_block_group_cache *cache;
9234
9235         extent_root = root->fs_info->extent_root;
9236
9237         btrfs_set_log_full_commit(root->fs_info, trans);
9238
9239         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9240         if (!cache)
9241                 return -ENOMEM;
9242
9243         btrfs_set_block_group_used(&cache->item, bytes_used);
9244         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9245         btrfs_set_block_group_flags(&cache->item, type);
9246
9247         cache->flags = type;
9248         cache->last_byte_to_unpin = (u64)-1;
9249         cache->cached = BTRFS_CACHE_FINISHED;
9250         ret = exclude_super_stripes(root, cache);
9251         if (ret) {
9252                 /*
9253                  * We may have excluded something, so call this just in
9254                  * case.
9255                  */
9256                 free_excluded_extents(root, cache);
9257                 btrfs_put_block_group(cache);
9258                 return ret;
9259         }
9260
9261         add_new_free_space(cache, root->fs_info, chunk_offset,
9262                            chunk_offset + size);
9263
9264         free_excluded_extents(root, cache);
9265
9266         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9267         if (ret) {
9268                 btrfs_remove_free_space_cache(cache);
9269                 btrfs_put_block_group(cache);
9270                 return ret;
9271         }
9272
9273         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9274                                 &cache->space_info);
9275         if (ret) {
9276                 btrfs_remove_free_space_cache(cache);
9277                 spin_lock(&root->fs_info->block_group_cache_lock);
9278                 rb_erase(&cache->cache_node,
9279                          &root->fs_info->block_group_cache_tree);
9280                 spin_unlock(&root->fs_info->block_group_cache_lock);
9281                 btrfs_put_block_group(cache);
9282                 return ret;
9283         }
9284         update_global_block_rsv(root->fs_info);
9285
9286         spin_lock(&cache->space_info->lock);
9287         cache->space_info->bytes_readonly += cache->bytes_super;
9288         spin_unlock(&cache->space_info->lock);
9289
9290         __link_block_group(cache->space_info, cache);
9291
9292         list_add_tail(&cache->bg_list, &trans->new_bgs);
9293
9294         set_avail_alloc_bits(extent_root->fs_info, type);
9295
9296         return 0;
9297 }
9298
9299 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9300 {
9301         u64 extra_flags = chunk_to_extended(flags) &
9302                                 BTRFS_EXTENDED_PROFILE_MASK;
9303
9304         write_seqlock(&fs_info->profiles_lock);
9305         if (flags & BTRFS_BLOCK_GROUP_DATA)
9306                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9307         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9308                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9309         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9310                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9311         write_sequnlock(&fs_info->profiles_lock);
9312 }
9313
9314 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9315                              struct btrfs_root *root, u64 group_start)
9316 {
9317         struct btrfs_path *path;
9318         struct btrfs_block_group_cache *block_group;
9319         struct btrfs_free_cluster *cluster;
9320         struct btrfs_root *tree_root = root->fs_info->tree_root;
9321         struct btrfs_key key;
9322         struct inode *inode;
9323         struct kobject *kobj = NULL;
9324         int ret;
9325         int index;
9326         int factor;
9327
9328         root = root->fs_info->extent_root;
9329
9330         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9331         BUG_ON(!block_group);
9332         BUG_ON(!block_group->ro);
9333
9334         /*
9335          * Free the reserved super bytes from this block group before
9336          * remove it.
9337          */
9338         free_excluded_extents(root, block_group);
9339
9340         memcpy(&key, &block_group->key, sizeof(key));
9341         index = get_block_group_index(block_group);
9342         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9343                                   BTRFS_BLOCK_GROUP_RAID1 |
9344                                   BTRFS_BLOCK_GROUP_RAID10))
9345                 factor = 2;
9346         else
9347                 factor = 1;
9348
9349         /* make sure this block group isn't part of an allocation cluster */
9350         cluster = &root->fs_info->data_alloc_cluster;
9351         spin_lock(&cluster->refill_lock);
9352         btrfs_return_cluster_to_free_space(block_group, cluster);
9353         spin_unlock(&cluster->refill_lock);
9354
9355         /*
9356          * make sure this block group isn't part of a metadata
9357          * allocation cluster
9358          */
9359         cluster = &root->fs_info->meta_alloc_cluster;
9360         spin_lock(&cluster->refill_lock);
9361         btrfs_return_cluster_to_free_space(block_group, cluster);
9362         spin_unlock(&cluster->refill_lock);
9363
9364         path = btrfs_alloc_path();
9365         if (!path) {
9366                 ret = -ENOMEM;
9367                 goto out;
9368         }
9369
9370         inode = lookup_free_space_inode(tree_root, block_group, path);
9371         if (!IS_ERR(inode)) {
9372                 ret = btrfs_orphan_add(trans, inode);
9373                 if (ret) {
9374                         btrfs_add_delayed_iput(inode);
9375                         goto out;
9376                 }
9377                 clear_nlink(inode);
9378                 /* One for the block groups ref */
9379                 spin_lock(&block_group->lock);
9380                 if (block_group->iref) {
9381                         block_group->iref = 0;
9382                         block_group->inode = NULL;
9383                         spin_unlock(&block_group->lock);
9384                         iput(inode);
9385                 } else {
9386                         spin_unlock(&block_group->lock);
9387                 }
9388                 /* One for our lookup ref */
9389                 btrfs_add_delayed_iput(inode);
9390         }
9391
9392         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9393         key.offset = block_group->key.objectid;
9394         key.type = 0;
9395
9396         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9397         if (ret < 0)
9398                 goto out;
9399         if (ret > 0)
9400                 btrfs_release_path(path);
9401         if (ret == 0) {
9402                 ret = btrfs_del_item(trans, tree_root, path);
9403                 if (ret)
9404                         goto out;
9405                 btrfs_release_path(path);
9406         }
9407
9408         spin_lock(&root->fs_info->block_group_cache_lock);
9409         rb_erase(&block_group->cache_node,
9410                  &root->fs_info->block_group_cache_tree);
9411
9412         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9413                 root->fs_info->first_logical_byte = (u64)-1;
9414         spin_unlock(&root->fs_info->block_group_cache_lock);
9415
9416         down_write(&block_group->space_info->groups_sem);
9417         /*
9418          * we must use list_del_init so people can check to see if they
9419          * are still on the list after taking the semaphore
9420          */
9421         list_del_init(&block_group->list);
9422         if (list_empty(&block_group->space_info->block_groups[index])) {
9423                 kobj = block_group->space_info->block_group_kobjs[index];
9424                 block_group->space_info->block_group_kobjs[index] = NULL;
9425                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9426         }
9427         up_write(&block_group->space_info->groups_sem);
9428         if (kobj) {
9429                 kobject_del(kobj);
9430                 kobject_put(kobj);
9431         }
9432
9433         if (block_group->cached == BTRFS_CACHE_STARTED)
9434                 wait_block_group_cache_done(block_group);
9435
9436         btrfs_remove_free_space_cache(block_group);
9437
9438         spin_lock(&block_group->space_info->lock);
9439         block_group->space_info->total_bytes -= block_group->key.offset;
9440         block_group->space_info->bytes_readonly -= block_group->key.offset;
9441         block_group->space_info->disk_total -= block_group->key.offset * factor;
9442         spin_unlock(&block_group->space_info->lock);
9443
9444         memcpy(&key, &block_group->key, sizeof(key));
9445
9446         btrfs_put_block_group(block_group);
9447         btrfs_put_block_group(block_group);
9448
9449         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9450         if (ret > 0)
9451                 ret = -EIO;
9452         if (ret < 0)
9453                 goto out;
9454
9455         ret = btrfs_del_item(trans, root, path);
9456 out:
9457         btrfs_free_path(path);
9458         return ret;
9459 }
9460
9461 /*
9462  * Process the unused_bgs list and remove any that don't have any allocated
9463  * space inside of them.
9464  */
9465 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9466 {
9467         struct btrfs_block_group_cache *block_group;
9468         struct btrfs_space_info *space_info;
9469         struct btrfs_root *root = fs_info->extent_root;
9470         struct btrfs_trans_handle *trans;
9471         int ret = 0;
9472
9473         if (!fs_info->open)
9474                 return;
9475
9476         spin_lock(&fs_info->unused_bgs_lock);
9477         while (!list_empty(&fs_info->unused_bgs)) {
9478                 u64 start, end;
9479
9480                 block_group = list_first_entry(&fs_info->unused_bgs,
9481                                                struct btrfs_block_group_cache,
9482                                                bg_list);
9483                 space_info = block_group->space_info;
9484                 list_del_init(&block_group->bg_list);
9485                 if (ret || btrfs_mixed_space_info(space_info)) {
9486                         btrfs_put_block_group(block_group);
9487                         continue;
9488                 }
9489                 spin_unlock(&fs_info->unused_bgs_lock);
9490
9491                 /* Don't want to race with allocators so take the groups_sem */
9492                 down_write(&space_info->groups_sem);
9493                 spin_lock(&block_group->lock);
9494                 if (block_group->reserved ||
9495                     btrfs_block_group_used(&block_group->item) ||
9496                     block_group->ro) {
9497                         /*
9498                          * We want to bail if we made new allocations or have
9499                          * outstanding allocations in this block group.  We do
9500                          * the ro check in case balance is currently acting on
9501                          * this block group.
9502                          */
9503                         spin_unlock(&block_group->lock);
9504                         up_write(&space_info->groups_sem);
9505                         goto next;
9506                 }
9507                 spin_unlock(&block_group->lock);
9508
9509                 /* We don't want to force the issue, only flip if it's ok. */
9510                 ret = set_block_group_ro(block_group, 0);
9511                 up_write(&space_info->groups_sem);
9512                 if (ret < 0) {
9513                         ret = 0;
9514                         goto next;
9515                 }
9516
9517                 /*
9518                  * Want to do this before we do anything else so we can recover
9519                  * properly if we fail to join the transaction.
9520                  */
9521                 trans = btrfs_join_transaction(root);
9522                 if (IS_ERR(trans)) {
9523                         btrfs_set_block_group_rw(root, block_group);
9524                         ret = PTR_ERR(trans);
9525                         goto next;
9526                 }
9527
9528                 /*
9529                  * We could have pending pinned extents for this block group,
9530                  * just delete them, we don't care about them anymore.
9531                  */
9532                 start = block_group->key.objectid;
9533                 end = start + block_group->key.offset - 1;
9534                 clear_extent_bits(&fs_info->freed_extents[0], start, end,
9535                                   EXTENT_DIRTY, GFP_NOFS);
9536                 clear_extent_bits(&fs_info->freed_extents[1], start, end,
9537                                   EXTENT_DIRTY, GFP_NOFS);
9538
9539                 /* Reset pinned so btrfs_put_block_group doesn't complain */
9540                 block_group->pinned = 0;
9541
9542                 /*
9543                  * Btrfs_remove_chunk will abort the transaction if things go
9544                  * horribly wrong.
9545                  */
9546                 ret = btrfs_remove_chunk(trans, root,
9547                                          block_group->key.objectid);
9548                 btrfs_end_transaction(trans, root);
9549 next:
9550                 btrfs_put_block_group(block_group);
9551                 spin_lock(&fs_info->unused_bgs_lock);
9552         }
9553         spin_unlock(&fs_info->unused_bgs_lock);
9554 }
9555
9556 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
9557 {
9558         struct btrfs_space_info *space_info;
9559         struct btrfs_super_block *disk_super;
9560         u64 features;
9561         u64 flags;
9562         int mixed = 0;
9563         int ret;
9564
9565         disk_super = fs_info->super_copy;
9566         if (!btrfs_super_root(disk_super))
9567                 return 1;
9568
9569         features = btrfs_super_incompat_flags(disk_super);
9570         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
9571                 mixed = 1;
9572
9573         flags = BTRFS_BLOCK_GROUP_SYSTEM;
9574         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9575         if (ret)
9576                 goto out;
9577
9578         if (mixed) {
9579                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
9580                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9581         } else {
9582                 flags = BTRFS_BLOCK_GROUP_METADATA;
9583                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9584                 if (ret)
9585                         goto out;
9586
9587                 flags = BTRFS_BLOCK_GROUP_DATA;
9588                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
9589         }
9590 out:
9591         return ret;
9592 }
9593
9594 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
9595 {
9596         return unpin_extent_range(root, start, end);
9597 }
9598
9599 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
9600                                u64 num_bytes, u64 *actual_bytes)
9601 {
9602         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
9603 }
9604
9605 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9606 {
9607         struct btrfs_fs_info *fs_info = root->fs_info;
9608         struct btrfs_block_group_cache *cache = NULL;
9609         u64 group_trimmed;
9610         u64 start;
9611         u64 end;
9612         u64 trimmed = 0;
9613         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
9614         int ret = 0;
9615
9616         /*
9617          * try to trim all FS space, our block group may start from non-zero.
9618          */
9619         if (range->len == total_bytes)
9620                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
9621         else
9622                 cache = btrfs_lookup_block_group(fs_info, range->start);
9623
9624         while (cache) {
9625                 if (cache->key.objectid >= (range->start + range->len)) {
9626                         btrfs_put_block_group(cache);
9627                         break;
9628                 }
9629
9630                 start = max(range->start, cache->key.objectid);
9631                 end = min(range->start + range->len,
9632                                 cache->key.objectid + cache->key.offset);
9633
9634                 if (end - start >= range->minlen) {
9635                         if (!block_group_cache_done(cache)) {
9636                                 ret = cache_block_group(cache, 0);
9637                                 if (ret) {
9638                                         btrfs_put_block_group(cache);
9639                                         break;
9640                                 }
9641                                 ret = wait_block_group_cache_done(cache);
9642                                 if (ret) {
9643                                         btrfs_put_block_group(cache);
9644                                         break;
9645                                 }
9646                         }
9647                         ret = btrfs_trim_block_group(cache,
9648                                                      &group_trimmed,
9649                                                      start,
9650                                                      end,
9651                                                      range->minlen);
9652
9653                         trimmed += group_trimmed;
9654                         if (ret) {
9655                                 btrfs_put_block_group(cache);
9656                                 break;
9657                         }
9658                 }
9659
9660                 cache = next_block_group(fs_info->tree_root, cache);
9661         }
9662
9663         range->len = trimmed;
9664         return ret;
9665 }
9666
9667 /*
9668  * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
9669  * they are used to prevent the some tasks writing data into the page cache
9670  * by nocow before the subvolume is snapshoted, but flush the data into
9671  * the disk after the snapshot creation.
9672  */
9673 void btrfs_end_nocow_write(struct btrfs_root *root)
9674 {
9675         percpu_counter_dec(&root->subv_writers->counter);
9676         /*
9677          * Make sure counter is updated before we wake up
9678          * waiters.
9679          */
9680         smp_mb();
9681         if (waitqueue_active(&root->subv_writers->wait))
9682                 wake_up(&root->subv_writers->wait);
9683 }
9684
9685 int btrfs_start_nocow_write(struct btrfs_root *root)
9686 {
9687         if (atomic_read(&root->will_be_snapshoted))
9688                 return 0;
9689
9690         percpu_counter_inc(&root->subv_writers->counter);
9691         /*
9692          * Make sure counter is updated before we check for snapshot creation.
9693          */
9694         smp_mb();
9695         if (atomic_read(&root->will_be_snapshoted)) {
9696                 btrfs_end_nocow_write(root);
9697                 return 0;
9698         }
9699         return 1;
9700 }