Merge tag 'sh-for-4.17' of git://git.libc.org/linux-sh
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/sched/signal.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/sort.h>
24 #include <linux/rcupdate.h>
25 #include <linux/kthread.h>
26 #include <linux/slab.h>
27 #include <linux/ratelimit.h>
28 #include <linux/percpu_counter.h>
29 #include <linux/lockdep.h>
30 #include <linux/crc32c.h>
31 #include "tree-log.h"
32 #include "disk-io.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "raid56.h"
36 #include "locking.h"
37 #include "free-space-cache.h"
38 #include "free-space-tree.h"
39 #include "math.h"
40 #include "sysfs.h"
41 #include "qgroup.h"
42 #include "ref-verify.h"
43
44 #undef SCRAMBLE_DELAYED_REFS
45
46 /*
47  * control flags for do_chunk_alloc's force field
48  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
49  * if we really need one.
50  *
51  * CHUNK_ALLOC_LIMITED means to only try and allocate one
52  * if we have very few chunks already allocated.  This is
53  * used as part of the clustering code to help make sure
54  * we have a good pool of storage to cluster in, without
55  * filling the FS with empty chunks
56  *
57  * CHUNK_ALLOC_FORCE means it must try to allocate one
58  *
59  */
60 enum {
61         CHUNK_ALLOC_NO_FORCE = 0,
62         CHUNK_ALLOC_LIMITED = 1,
63         CHUNK_ALLOC_FORCE = 2,
64 };
65
66 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
67                                struct btrfs_fs_info *fs_info,
68                                 struct btrfs_delayed_ref_node *node, u64 parent,
69                                 u64 root_objectid, u64 owner_objectid,
70                                 u64 owner_offset, int refs_to_drop,
71                                 struct btrfs_delayed_extent_op *extra_op);
72 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
73                                     struct extent_buffer *leaf,
74                                     struct btrfs_extent_item *ei);
75 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
76                                       struct btrfs_fs_info *fs_info,
77                                       u64 parent, u64 root_objectid,
78                                       u64 flags, u64 owner, u64 offset,
79                                       struct btrfs_key *ins, int ref_mod);
80 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
81                                      struct btrfs_fs_info *fs_info,
82                                      u64 parent, u64 root_objectid,
83                                      u64 flags, struct btrfs_disk_key *key,
84                                      int level, struct btrfs_key *ins);
85 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
86                           struct btrfs_fs_info *fs_info, u64 flags,
87                           int force);
88 static int find_next_key(struct btrfs_path *path, int level,
89                          struct btrfs_key *key);
90 static void dump_space_info(struct btrfs_fs_info *fs_info,
91                             struct btrfs_space_info *info, u64 bytes,
92                             int dump_block_groups);
93 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
94                                u64 num_bytes);
95 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
96                                      struct btrfs_space_info *space_info,
97                                      u64 num_bytes);
98 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
99                                      struct btrfs_space_info *space_info,
100                                      u64 num_bytes);
101
102 static noinline int
103 block_group_cache_done(struct btrfs_block_group_cache *cache)
104 {
105         smp_mb();
106         return cache->cached == BTRFS_CACHE_FINISHED ||
107                 cache->cached == BTRFS_CACHE_ERROR;
108 }
109
110 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
111 {
112         return (cache->flags & bits) == bits;
113 }
114
115 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
116 {
117         atomic_inc(&cache->count);
118 }
119
120 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
121 {
122         if (atomic_dec_and_test(&cache->count)) {
123                 WARN_ON(cache->pinned > 0);
124                 WARN_ON(cache->reserved > 0);
125
126                 /*
127                  * If not empty, someone is still holding mutex of
128                  * full_stripe_lock, which can only be released by caller.
129                  * And it will definitely cause use-after-free when caller
130                  * tries to release full stripe lock.
131                  *
132                  * No better way to resolve, but only to warn.
133                  */
134                 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_fs_info *fs_info,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE);
232         set_extent_bits(&fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_fs_info *fs_info,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE);
247         clear_extent_bits(&fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE);
249 }
250
251 static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(fs_info, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(fs_info, cache->key.objectid,
271                                        bytenr, 0, &logical, &nr, &stripe_len);
272                 if (ret)
273                         return ret;
274
275                 while (nr--) {
276                         u64 start, len;
277
278                         if (logical[nr] > cache->key.objectid +
279                             cache->key.offset)
280                                 continue;
281
282                         if (logical[nr] + stripe_len <= cache->key.objectid)
283                                 continue;
284
285                         start = logical[nr];
286                         if (start < cache->key.objectid) {
287                                 start = cache->key.objectid;
288                                 len = (logical[nr] + stripe_len) - start;
289                         } else {
290                                 len = min_t(u64, stripe_len,
291                                             cache->key.objectid +
292                                             cache->key.offset - start);
293                         }
294
295                         cache->bytes_super += len;
296                         ret = add_excluded_extent(fs_info, start, len);
297                         if (ret) {
298                                 kfree(logical);
299                                 return ret;
300                         }
301                 }
302
303                 kfree(logical);
304         }
305         return 0;
306 }
307
308 static struct btrfs_caching_control *
309 get_caching_control(struct btrfs_block_group_cache *cache)
310 {
311         struct btrfs_caching_control *ctl;
312
313         spin_lock(&cache->lock);
314         if (!cache->caching_ctl) {
315                 spin_unlock(&cache->lock);
316                 return NULL;
317         }
318
319         ctl = cache->caching_ctl;
320         refcount_inc(&ctl->count);
321         spin_unlock(&cache->lock);
322         return ctl;
323 }
324
325 static void put_caching_control(struct btrfs_caching_control *ctl)
326 {
327         if (refcount_dec_and_test(&ctl->count))
328                 kfree(ctl);
329 }
330
331 #ifdef CONFIG_BTRFS_DEBUG
332 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
333 {
334         struct btrfs_fs_info *fs_info = block_group->fs_info;
335         u64 start = block_group->key.objectid;
336         u64 len = block_group->key.offset;
337         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
338                 fs_info->nodesize : fs_info->sectorsize;
339         u64 step = chunk << 1;
340
341         while (len > chunk) {
342                 btrfs_remove_free_space(block_group, start, chunk);
343                 start += step;
344                 if (len < step)
345                         len = 0;
346                 else
347                         len -= step;
348         }
349 }
350 #endif
351
352 /*
353  * this is only called by cache_block_group, since we could have freed extents
354  * we need to check the pinned_extents for any extents that can't be used yet
355  * since their free space will be released as soon as the transaction commits.
356  */
357 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
358                        struct btrfs_fs_info *info, u64 start, u64 end)
359 {
360         u64 extent_start, extent_end, size, total_added = 0;
361         int ret;
362
363         while (start < end) {
364                 ret = find_first_extent_bit(info->pinned_extents, start,
365                                             &extent_start, &extent_end,
366                                             EXTENT_DIRTY | EXTENT_UPTODATE,
367                                             NULL);
368                 if (ret)
369                         break;
370
371                 if (extent_start <= start) {
372                         start = extent_end + 1;
373                 } else if (extent_start > start && extent_start < end) {
374                         size = extent_start - start;
375                         total_added += size;
376                         ret = btrfs_add_free_space(block_group, start,
377                                                    size);
378                         BUG_ON(ret); /* -ENOMEM or logic error */
379                         start = extent_end + 1;
380                 } else {
381                         break;
382                 }
383         }
384
385         if (start < end) {
386                 size = end - start;
387                 total_added += size;
388                 ret = btrfs_add_free_space(block_group, start, size);
389                 BUG_ON(ret); /* -ENOMEM or logic error */
390         }
391
392         return total_added;
393 }
394
395 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
396 {
397         struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
398         struct btrfs_fs_info *fs_info = block_group->fs_info;
399         struct btrfs_root *extent_root = fs_info->extent_root;
400         struct btrfs_path *path;
401         struct extent_buffer *leaf;
402         struct btrfs_key key;
403         u64 total_found = 0;
404         u64 last = 0;
405         u32 nritems;
406         int ret;
407         bool wakeup = true;
408
409         path = btrfs_alloc_path();
410         if (!path)
411                 return -ENOMEM;
412
413         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
414
415 #ifdef CONFIG_BTRFS_DEBUG
416         /*
417          * If we're fragmenting we don't want to make anybody think we can
418          * allocate from this block group until we've had a chance to fragment
419          * the free space.
420          */
421         if (btrfs_should_fragment_free_space(block_group))
422                 wakeup = false;
423 #endif
424         /*
425          * We don't want to deadlock with somebody trying to allocate a new
426          * extent for the extent root while also trying to search the extent
427          * root to add free space.  So we skip locking and search the commit
428          * root, since its read-only
429          */
430         path->skip_locking = 1;
431         path->search_commit_root = 1;
432         path->reada = READA_FORWARD;
433
434         key.objectid = last;
435         key.offset = 0;
436         key.type = BTRFS_EXTENT_ITEM_KEY;
437
438 next:
439         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
440         if (ret < 0)
441                 goto out;
442
443         leaf = path->nodes[0];
444         nritems = btrfs_header_nritems(leaf);
445
446         while (1) {
447                 if (btrfs_fs_closing(fs_info) > 1) {
448                         last = (u64)-1;
449                         break;
450                 }
451
452                 if (path->slots[0] < nritems) {
453                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
454                 } else {
455                         ret = find_next_key(path, 0, &key);
456                         if (ret)
457                                 break;
458
459                         if (need_resched() ||
460                             rwsem_is_contended(&fs_info->commit_root_sem)) {
461                                 if (wakeup)
462                                         caching_ctl->progress = last;
463                                 btrfs_release_path(path);
464                                 up_read(&fs_info->commit_root_sem);
465                                 mutex_unlock(&caching_ctl->mutex);
466                                 cond_resched();
467                                 mutex_lock(&caching_ctl->mutex);
468                                 down_read(&fs_info->commit_root_sem);
469                                 goto next;
470                         }
471
472                         ret = btrfs_next_leaf(extent_root, path);
473                         if (ret < 0)
474                                 goto out;
475                         if (ret)
476                                 break;
477                         leaf = path->nodes[0];
478                         nritems = btrfs_header_nritems(leaf);
479                         continue;
480                 }
481
482                 if (key.objectid < last) {
483                         key.objectid = last;
484                         key.offset = 0;
485                         key.type = BTRFS_EXTENT_ITEM_KEY;
486
487                         if (wakeup)
488                                 caching_ctl->progress = last;
489                         btrfs_release_path(path);
490                         goto next;
491                 }
492
493                 if (key.objectid < block_group->key.objectid) {
494                         path->slots[0]++;
495                         continue;
496                 }
497
498                 if (key.objectid >= block_group->key.objectid +
499                     block_group->key.offset)
500                         break;
501
502                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
503                     key.type == BTRFS_METADATA_ITEM_KEY) {
504                         total_found += add_new_free_space(block_group,
505                                                           fs_info, last,
506                                                           key.objectid);
507                         if (key.type == BTRFS_METADATA_ITEM_KEY)
508                                 last = key.objectid +
509                                         fs_info->nodesize;
510                         else
511                                 last = key.objectid + key.offset;
512
513                         if (total_found > CACHING_CTL_WAKE_UP) {
514                                 total_found = 0;
515                                 if (wakeup)
516                                         wake_up(&caching_ctl->wait);
517                         }
518                 }
519                 path->slots[0]++;
520         }
521         ret = 0;
522
523         total_found += add_new_free_space(block_group, fs_info, last,
524                                           block_group->key.objectid +
525                                           block_group->key.offset);
526         caching_ctl->progress = (u64)-1;
527
528 out:
529         btrfs_free_path(path);
530         return ret;
531 }
532
533 static noinline void caching_thread(struct btrfs_work *work)
534 {
535         struct btrfs_block_group_cache *block_group;
536         struct btrfs_fs_info *fs_info;
537         struct btrfs_caching_control *caching_ctl;
538         int ret;
539
540         caching_ctl = container_of(work, struct btrfs_caching_control, work);
541         block_group = caching_ctl->block_group;
542         fs_info = block_group->fs_info;
543
544         mutex_lock(&caching_ctl->mutex);
545         down_read(&fs_info->commit_root_sem);
546
547         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
548                 ret = load_free_space_tree(caching_ctl);
549         else
550                 ret = load_extent_tree_free(caching_ctl);
551
552         spin_lock(&block_group->lock);
553         block_group->caching_ctl = NULL;
554         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
555         spin_unlock(&block_group->lock);
556
557 #ifdef CONFIG_BTRFS_DEBUG
558         if (btrfs_should_fragment_free_space(block_group)) {
559                 u64 bytes_used;
560
561                 spin_lock(&block_group->space_info->lock);
562                 spin_lock(&block_group->lock);
563                 bytes_used = block_group->key.offset -
564                         btrfs_block_group_used(&block_group->item);
565                 block_group->space_info->bytes_used += bytes_used >> 1;
566                 spin_unlock(&block_group->lock);
567                 spin_unlock(&block_group->space_info->lock);
568                 fragment_free_space(block_group);
569         }
570 #endif
571
572         caching_ctl->progress = (u64)-1;
573
574         up_read(&fs_info->commit_root_sem);
575         free_excluded_extents(fs_info, block_group);
576         mutex_unlock(&caching_ctl->mutex);
577
578         wake_up(&caching_ctl->wait);
579
580         put_caching_control(caching_ctl);
581         btrfs_put_block_group(block_group);
582 }
583
584 static int cache_block_group(struct btrfs_block_group_cache *cache,
585                              int load_cache_only)
586 {
587         DEFINE_WAIT(wait);
588         struct btrfs_fs_info *fs_info = cache->fs_info;
589         struct btrfs_caching_control *caching_ctl;
590         int ret = 0;
591
592         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
593         if (!caching_ctl)
594                 return -ENOMEM;
595
596         INIT_LIST_HEAD(&caching_ctl->list);
597         mutex_init(&caching_ctl->mutex);
598         init_waitqueue_head(&caching_ctl->wait);
599         caching_ctl->block_group = cache;
600         caching_ctl->progress = cache->key.objectid;
601         refcount_set(&caching_ctl->count, 1);
602         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
603                         caching_thread, NULL, NULL);
604
605         spin_lock(&cache->lock);
606         /*
607          * This should be a rare occasion, but this could happen I think in the
608          * case where one thread starts to load the space cache info, and then
609          * some other thread starts a transaction commit which tries to do an
610          * allocation while the other thread is still loading the space cache
611          * info.  The previous loop should have kept us from choosing this block
612          * group, but if we've moved to the state where we will wait on caching
613          * block groups we need to first check if we're doing a fast load here,
614          * so we can wait for it to finish, otherwise we could end up allocating
615          * from a block group who's cache gets evicted for one reason or
616          * another.
617          */
618         while (cache->cached == BTRFS_CACHE_FAST) {
619                 struct btrfs_caching_control *ctl;
620
621                 ctl = cache->caching_ctl;
622                 refcount_inc(&ctl->count);
623                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
624                 spin_unlock(&cache->lock);
625
626                 schedule();
627
628                 finish_wait(&ctl->wait, &wait);
629                 put_caching_control(ctl);
630                 spin_lock(&cache->lock);
631         }
632
633         if (cache->cached != BTRFS_CACHE_NO) {
634                 spin_unlock(&cache->lock);
635                 kfree(caching_ctl);
636                 return 0;
637         }
638         WARN_ON(cache->caching_ctl);
639         cache->caching_ctl = caching_ctl;
640         cache->cached = BTRFS_CACHE_FAST;
641         spin_unlock(&cache->lock);
642
643         if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
644                 mutex_lock(&caching_ctl->mutex);
645                 ret = load_free_space_cache(fs_info, cache);
646
647                 spin_lock(&cache->lock);
648                 if (ret == 1) {
649                         cache->caching_ctl = NULL;
650                         cache->cached = BTRFS_CACHE_FINISHED;
651                         cache->last_byte_to_unpin = (u64)-1;
652                         caching_ctl->progress = (u64)-1;
653                 } else {
654                         if (load_cache_only) {
655                                 cache->caching_ctl = NULL;
656                                 cache->cached = BTRFS_CACHE_NO;
657                         } else {
658                                 cache->cached = BTRFS_CACHE_STARTED;
659                                 cache->has_caching_ctl = 1;
660                         }
661                 }
662                 spin_unlock(&cache->lock);
663 #ifdef CONFIG_BTRFS_DEBUG
664                 if (ret == 1 &&
665                     btrfs_should_fragment_free_space(cache)) {
666                         u64 bytes_used;
667
668                         spin_lock(&cache->space_info->lock);
669                         spin_lock(&cache->lock);
670                         bytes_used = cache->key.offset -
671                                 btrfs_block_group_used(&cache->item);
672                         cache->space_info->bytes_used += bytes_used >> 1;
673                         spin_unlock(&cache->lock);
674                         spin_unlock(&cache->space_info->lock);
675                         fragment_free_space(cache);
676                 }
677 #endif
678                 mutex_unlock(&caching_ctl->mutex);
679
680                 wake_up(&caching_ctl->wait);
681                 if (ret == 1) {
682                         put_caching_control(caching_ctl);
683                         free_excluded_extents(fs_info, cache);
684                         return 0;
685                 }
686         } else {
687                 /*
688                  * We're either using the free space tree or no caching at all.
689                  * Set cached to the appropriate value and wakeup any waiters.
690                  */
691                 spin_lock(&cache->lock);
692                 if (load_cache_only) {
693                         cache->caching_ctl = NULL;
694                         cache->cached = BTRFS_CACHE_NO;
695                 } else {
696                         cache->cached = BTRFS_CACHE_STARTED;
697                         cache->has_caching_ctl = 1;
698                 }
699                 spin_unlock(&cache->lock);
700                 wake_up(&caching_ctl->wait);
701         }
702
703         if (load_cache_only) {
704                 put_caching_control(caching_ctl);
705                 return 0;
706         }
707
708         down_write(&fs_info->commit_root_sem);
709         refcount_inc(&caching_ctl->count);
710         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
711         up_write(&fs_info->commit_root_sem);
712
713         btrfs_get_block_group(cache);
714
715         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
716
717         return ret;
718 }
719
720 /*
721  * return the block group that starts at or after bytenr
722  */
723 static struct btrfs_block_group_cache *
724 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
725 {
726         return block_group_cache_tree_search(info, bytenr, 0);
727 }
728
729 /*
730  * return the block group that contains the given bytenr
731  */
732 struct btrfs_block_group_cache *btrfs_lookup_block_group(
733                                                  struct btrfs_fs_info *info,
734                                                  u64 bytenr)
735 {
736         return block_group_cache_tree_search(info, bytenr, 1);
737 }
738
739 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
740                                                   u64 flags)
741 {
742         struct list_head *head = &info->space_info;
743         struct btrfs_space_info *found;
744
745         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
746
747         rcu_read_lock();
748         list_for_each_entry_rcu(found, head, list) {
749                 if (found->flags & flags) {
750                         rcu_read_unlock();
751                         return found;
752                 }
753         }
754         rcu_read_unlock();
755         return NULL;
756 }
757
758 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
759                              u64 owner, u64 root_objectid)
760 {
761         struct btrfs_space_info *space_info;
762         u64 flags;
763
764         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
765                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
766                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
767                 else
768                         flags = BTRFS_BLOCK_GROUP_METADATA;
769         } else {
770                 flags = BTRFS_BLOCK_GROUP_DATA;
771         }
772
773         space_info = __find_space_info(fs_info, flags);
774         ASSERT(space_info);
775         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
776 }
777
778 /*
779  * after adding space to the filesystem, we need to clear the full flags
780  * on all the space infos.
781  */
782 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
783 {
784         struct list_head *head = &info->space_info;
785         struct btrfs_space_info *found;
786
787         rcu_read_lock();
788         list_for_each_entry_rcu(found, head, list)
789                 found->full = 0;
790         rcu_read_unlock();
791 }
792
793 /* simple helper to search for an existing data extent at a given offset */
794 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
795 {
796         int ret;
797         struct btrfs_key key;
798         struct btrfs_path *path;
799
800         path = btrfs_alloc_path();
801         if (!path)
802                 return -ENOMEM;
803
804         key.objectid = start;
805         key.offset = len;
806         key.type = BTRFS_EXTENT_ITEM_KEY;
807         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
808         btrfs_free_path(path);
809         return ret;
810 }
811
812 /*
813  * helper function to lookup reference count and flags of a tree block.
814  *
815  * the head node for delayed ref is used to store the sum of all the
816  * reference count modifications queued up in the rbtree. the head
817  * node may also store the extent flags to set. This way you can check
818  * to see what the reference count and extent flags would be if all of
819  * the delayed refs are not processed.
820  */
821 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
822                              struct btrfs_fs_info *fs_info, u64 bytenr,
823                              u64 offset, int metadata, u64 *refs, u64 *flags)
824 {
825         struct btrfs_delayed_ref_head *head;
826         struct btrfs_delayed_ref_root *delayed_refs;
827         struct btrfs_path *path;
828         struct btrfs_extent_item *ei;
829         struct extent_buffer *leaf;
830         struct btrfs_key key;
831         u32 item_size;
832         u64 num_refs;
833         u64 extent_flags;
834         int ret;
835
836         /*
837          * If we don't have skinny metadata, don't bother doing anything
838          * different
839          */
840         if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
841                 offset = fs_info->nodesize;
842                 metadata = 0;
843         }
844
845         path = btrfs_alloc_path();
846         if (!path)
847                 return -ENOMEM;
848
849         if (!trans) {
850                 path->skip_locking = 1;
851                 path->search_commit_root = 1;
852         }
853
854 search_again:
855         key.objectid = bytenr;
856         key.offset = offset;
857         if (metadata)
858                 key.type = BTRFS_METADATA_ITEM_KEY;
859         else
860                 key.type = BTRFS_EXTENT_ITEM_KEY;
861
862         ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
863         if (ret < 0)
864                 goto out_free;
865
866         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
867                 if (path->slots[0]) {
868                         path->slots[0]--;
869                         btrfs_item_key_to_cpu(path->nodes[0], &key,
870                                               path->slots[0]);
871                         if (key.objectid == bytenr &&
872                             key.type == BTRFS_EXTENT_ITEM_KEY &&
873                             key.offset == fs_info->nodesize)
874                                 ret = 0;
875                 }
876         }
877
878         if (ret == 0) {
879                 leaf = path->nodes[0];
880                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
881                 if (item_size >= sizeof(*ei)) {
882                         ei = btrfs_item_ptr(leaf, path->slots[0],
883                                             struct btrfs_extent_item);
884                         num_refs = btrfs_extent_refs(leaf, ei);
885                         extent_flags = btrfs_extent_flags(leaf, ei);
886                 } else {
887 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
888                         struct btrfs_extent_item_v0 *ei0;
889                         BUG_ON(item_size != sizeof(*ei0));
890                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
891                                              struct btrfs_extent_item_v0);
892                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
893                         /* FIXME: this isn't correct for data */
894                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
895 #else
896                         BUG();
897 #endif
898                 }
899                 BUG_ON(num_refs == 0);
900         } else {
901                 num_refs = 0;
902                 extent_flags = 0;
903                 ret = 0;
904         }
905
906         if (!trans)
907                 goto out;
908
909         delayed_refs = &trans->transaction->delayed_refs;
910         spin_lock(&delayed_refs->lock);
911         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
912         if (head) {
913                 if (!mutex_trylock(&head->mutex)) {
914                         refcount_inc(&head->refs);
915                         spin_unlock(&delayed_refs->lock);
916
917                         btrfs_release_path(path);
918
919                         /*
920                          * Mutex was contended, block until it's released and try
921                          * again
922                          */
923                         mutex_lock(&head->mutex);
924                         mutex_unlock(&head->mutex);
925                         btrfs_put_delayed_ref_head(head);
926                         goto search_again;
927                 }
928                 spin_lock(&head->lock);
929                 if (head->extent_op && head->extent_op->update_flags)
930                         extent_flags |= head->extent_op->flags_to_set;
931                 else
932                         BUG_ON(num_refs == 0);
933
934                 num_refs += head->ref_mod;
935                 spin_unlock(&head->lock);
936                 mutex_unlock(&head->mutex);
937         }
938         spin_unlock(&delayed_refs->lock);
939 out:
940         WARN_ON(num_refs == 0);
941         if (refs)
942                 *refs = num_refs;
943         if (flags)
944                 *flags = extent_flags;
945 out_free:
946         btrfs_free_path(path);
947         return ret;
948 }
949
950 /*
951  * Back reference rules.  Back refs have three main goals:
952  *
953  * 1) differentiate between all holders of references to an extent so that
954  *    when a reference is dropped we can make sure it was a valid reference
955  *    before freeing the extent.
956  *
957  * 2) Provide enough information to quickly find the holders of an extent
958  *    if we notice a given block is corrupted or bad.
959  *
960  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
961  *    maintenance.  This is actually the same as #2, but with a slightly
962  *    different use case.
963  *
964  * There are two kinds of back refs. The implicit back refs is optimized
965  * for pointers in non-shared tree blocks. For a given pointer in a block,
966  * back refs of this kind provide information about the block's owner tree
967  * and the pointer's key. These information allow us to find the block by
968  * b-tree searching. The full back refs is for pointers in tree blocks not
969  * referenced by their owner trees. The location of tree block is recorded
970  * in the back refs. Actually the full back refs is generic, and can be
971  * used in all cases the implicit back refs is used. The major shortcoming
972  * of the full back refs is its overhead. Every time a tree block gets
973  * COWed, we have to update back refs entry for all pointers in it.
974  *
975  * For a newly allocated tree block, we use implicit back refs for
976  * pointers in it. This means most tree related operations only involve
977  * implicit back refs. For a tree block created in old transaction, the
978  * only way to drop a reference to it is COW it. So we can detect the
979  * event that tree block loses its owner tree's reference and do the
980  * back refs conversion.
981  *
982  * When a tree block is COWed through a tree, there are four cases:
983  *
984  * The reference count of the block is one and the tree is the block's
985  * owner tree. Nothing to do in this case.
986  *
987  * The reference count of the block is one and the tree is not the
988  * block's owner tree. In this case, full back refs is used for pointers
989  * in the block. Remove these full back refs, add implicit back refs for
990  * every pointers in the new block.
991  *
992  * The reference count of the block is greater than one and the tree is
993  * the block's owner tree. In this case, implicit back refs is used for
994  * pointers in the block. Add full back refs for every pointers in the
995  * block, increase lower level extents' reference counts. The original
996  * implicit back refs are entailed to the new block.
997  *
998  * The reference count of the block is greater than one and the tree is
999  * not the block's owner tree. Add implicit back refs for every pointer in
1000  * the new block, increase lower level extents' reference count.
1001  *
1002  * Back Reference Key composing:
1003  *
1004  * The key objectid corresponds to the first byte in the extent,
1005  * The key type is used to differentiate between types of back refs.
1006  * There are different meanings of the key offset for different types
1007  * of back refs.
1008  *
1009  * File extents can be referenced by:
1010  *
1011  * - multiple snapshots, subvolumes, or different generations in one subvol
1012  * - different files inside a single subvolume
1013  * - different offsets inside a file (bookend extents in file.c)
1014  *
1015  * The extent ref structure for the implicit back refs has fields for:
1016  *
1017  * - Objectid of the subvolume root
1018  * - objectid of the file holding the reference
1019  * - original offset in the file
1020  * - how many bookend extents
1021  *
1022  * The key offset for the implicit back refs is hash of the first
1023  * three fields.
1024  *
1025  * The extent ref structure for the full back refs has field for:
1026  *
1027  * - number of pointers in the tree leaf
1028  *
1029  * The key offset for the implicit back refs is the first byte of
1030  * the tree leaf
1031  *
1032  * When a file extent is allocated, The implicit back refs is used.
1033  * the fields are filled in:
1034  *
1035  *     (root_key.objectid, inode objectid, offset in file, 1)
1036  *
1037  * When a file extent is removed file truncation, we find the
1038  * corresponding implicit back refs and check the following fields:
1039  *
1040  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1041  *
1042  * Btree extents can be referenced by:
1043  *
1044  * - Different subvolumes
1045  *
1046  * Both the implicit back refs and the full back refs for tree blocks
1047  * only consist of key. The key offset for the implicit back refs is
1048  * objectid of block's owner tree. The key offset for the full back refs
1049  * is the first byte of parent block.
1050  *
1051  * When implicit back refs is used, information about the lowest key and
1052  * level of the tree block are required. These information are stored in
1053  * tree block info structure.
1054  */
1055
1056 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1057 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1058                                   struct btrfs_fs_info *fs_info,
1059                                   struct btrfs_path *path,
1060                                   u64 owner, u32 extra_size)
1061 {
1062         struct btrfs_root *root = fs_info->extent_root;
1063         struct btrfs_extent_item *item;
1064         struct btrfs_extent_item_v0 *ei0;
1065         struct btrfs_extent_ref_v0 *ref0;
1066         struct btrfs_tree_block_info *bi;
1067         struct extent_buffer *leaf;
1068         struct btrfs_key key;
1069         struct btrfs_key found_key;
1070         u32 new_size = sizeof(*item);
1071         u64 refs;
1072         int ret;
1073
1074         leaf = path->nodes[0];
1075         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1076
1077         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1078         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1079                              struct btrfs_extent_item_v0);
1080         refs = btrfs_extent_refs_v0(leaf, ei0);
1081
1082         if (owner == (u64)-1) {
1083                 while (1) {
1084                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1085                                 ret = btrfs_next_leaf(root, path);
1086                                 if (ret < 0)
1087                                         return ret;
1088                                 BUG_ON(ret > 0); /* Corruption */
1089                                 leaf = path->nodes[0];
1090                         }
1091                         btrfs_item_key_to_cpu(leaf, &found_key,
1092                                               path->slots[0]);
1093                         BUG_ON(key.objectid != found_key.objectid);
1094                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1095                                 path->slots[0]++;
1096                                 continue;
1097                         }
1098                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1099                                               struct btrfs_extent_ref_v0);
1100                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1101                         break;
1102                 }
1103         }
1104         btrfs_release_path(path);
1105
1106         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1107                 new_size += sizeof(*bi);
1108
1109         new_size -= sizeof(*ei0);
1110         ret = btrfs_search_slot(trans, root, &key, path,
1111                                 new_size + extra_size, 1);
1112         if (ret < 0)
1113                 return ret;
1114         BUG_ON(ret); /* Corruption */
1115
1116         btrfs_extend_item(fs_info, path, new_size);
1117
1118         leaf = path->nodes[0];
1119         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1120         btrfs_set_extent_refs(leaf, item, refs);
1121         /* FIXME: get real generation */
1122         btrfs_set_extent_generation(leaf, item, 0);
1123         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1124                 btrfs_set_extent_flags(leaf, item,
1125                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1126                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1127                 bi = (struct btrfs_tree_block_info *)(item + 1);
1128                 /* FIXME: get first key of the block */
1129                 memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
1130                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1131         } else {
1132                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1133         }
1134         btrfs_mark_buffer_dirty(leaf);
1135         return 0;
1136 }
1137 #endif
1138
1139 /*
1140  * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
1141  * is_data == BTRFS_REF_TYPE_DATA, data type is requried,
1142  * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
1143  */
1144 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
1145                                      struct btrfs_extent_inline_ref *iref,
1146                                      enum btrfs_inline_ref_type is_data)
1147 {
1148         int type = btrfs_extent_inline_ref_type(eb, iref);
1149         u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
1150
1151         if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1152             type == BTRFS_SHARED_BLOCK_REF_KEY ||
1153             type == BTRFS_SHARED_DATA_REF_KEY ||
1154             type == BTRFS_EXTENT_DATA_REF_KEY) {
1155                 if (is_data == BTRFS_REF_TYPE_BLOCK) {
1156                         if (type == BTRFS_TREE_BLOCK_REF_KEY)
1157                                 return type;
1158                         if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1159                                 ASSERT(eb->fs_info);
1160                                 /*
1161                                  * Every shared one has parent tree
1162                                  * block, which must be aligned to
1163                                  * nodesize.
1164                                  */
1165                                 if (offset &&
1166                                     IS_ALIGNED(offset, eb->fs_info->nodesize))
1167                                         return type;
1168                         }
1169                 } else if (is_data == BTRFS_REF_TYPE_DATA) {
1170                         if (type == BTRFS_EXTENT_DATA_REF_KEY)
1171                                 return type;
1172                         if (type == BTRFS_SHARED_DATA_REF_KEY) {
1173                                 ASSERT(eb->fs_info);
1174                                 /*
1175                                  * Every shared one has parent tree
1176                                  * block, which must be aligned to
1177                                  * nodesize.
1178                                  */
1179                                 if (offset &&
1180                                     IS_ALIGNED(offset, eb->fs_info->nodesize))
1181                                         return type;
1182                         }
1183                 } else {
1184                         ASSERT(is_data == BTRFS_REF_TYPE_ANY);
1185                         return type;
1186                 }
1187         }
1188
1189         btrfs_print_leaf((struct extent_buffer *)eb);
1190         btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
1191                   eb->start, type);
1192         WARN_ON(1);
1193
1194         return BTRFS_REF_TYPE_INVALID;
1195 }
1196
1197 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1198 {
1199         u32 high_crc = ~(u32)0;
1200         u32 low_crc = ~(u32)0;
1201         __le64 lenum;
1202
1203         lenum = cpu_to_le64(root_objectid);
1204         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1205         lenum = cpu_to_le64(owner);
1206         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1207         lenum = cpu_to_le64(offset);
1208         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1209
1210         return ((u64)high_crc << 31) ^ (u64)low_crc;
1211 }
1212
1213 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1214                                      struct btrfs_extent_data_ref *ref)
1215 {
1216         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1217                                     btrfs_extent_data_ref_objectid(leaf, ref),
1218                                     btrfs_extent_data_ref_offset(leaf, ref));
1219 }
1220
1221 static int match_extent_data_ref(struct extent_buffer *leaf,
1222                                  struct btrfs_extent_data_ref *ref,
1223                                  u64 root_objectid, u64 owner, u64 offset)
1224 {
1225         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1226             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1227             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1228                 return 0;
1229         return 1;
1230 }
1231
1232 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1233                                            struct btrfs_fs_info *fs_info,
1234                                            struct btrfs_path *path,
1235                                            u64 bytenr, u64 parent,
1236                                            u64 root_objectid,
1237                                            u64 owner, u64 offset)
1238 {
1239         struct btrfs_root *root = fs_info->extent_root;
1240         struct btrfs_key key;
1241         struct btrfs_extent_data_ref *ref;
1242         struct extent_buffer *leaf;
1243         u32 nritems;
1244         int ret;
1245         int recow;
1246         int err = -ENOENT;
1247
1248         key.objectid = bytenr;
1249         if (parent) {
1250                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1251                 key.offset = parent;
1252         } else {
1253                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1254                 key.offset = hash_extent_data_ref(root_objectid,
1255                                                   owner, offset);
1256         }
1257 again:
1258         recow = 0;
1259         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1260         if (ret < 0) {
1261                 err = ret;
1262                 goto fail;
1263         }
1264
1265         if (parent) {
1266                 if (!ret)
1267                         return 0;
1268 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1269                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1270                 btrfs_release_path(path);
1271                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1272                 if (ret < 0) {
1273                         err = ret;
1274                         goto fail;
1275                 }
1276                 if (!ret)
1277                         return 0;
1278 #endif
1279                 goto fail;
1280         }
1281
1282         leaf = path->nodes[0];
1283         nritems = btrfs_header_nritems(leaf);
1284         while (1) {
1285                 if (path->slots[0] >= nritems) {
1286                         ret = btrfs_next_leaf(root, path);
1287                         if (ret < 0)
1288                                 err = ret;
1289                         if (ret)
1290                                 goto fail;
1291
1292                         leaf = path->nodes[0];
1293                         nritems = btrfs_header_nritems(leaf);
1294                         recow = 1;
1295                 }
1296
1297                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1298                 if (key.objectid != bytenr ||
1299                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1300                         goto fail;
1301
1302                 ref = btrfs_item_ptr(leaf, path->slots[0],
1303                                      struct btrfs_extent_data_ref);
1304
1305                 if (match_extent_data_ref(leaf, ref, root_objectid,
1306                                           owner, offset)) {
1307                         if (recow) {
1308                                 btrfs_release_path(path);
1309                                 goto again;
1310                         }
1311                         err = 0;
1312                         break;
1313                 }
1314                 path->slots[0]++;
1315         }
1316 fail:
1317         return err;
1318 }
1319
1320 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1321                                            struct btrfs_fs_info *fs_info,
1322                                            struct btrfs_path *path,
1323                                            u64 bytenr, u64 parent,
1324                                            u64 root_objectid, u64 owner,
1325                                            u64 offset, int refs_to_add)
1326 {
1327         struct btrfs_root *root = fs_info->extent_root;
1328         struct btrfs_key key;
1329         struct extent_buffer *leaf;
1330         u32 size;
1331         u32 num_refs;
1332         int ret;
1333
1334         key.objectid = bytenr;
1335         if (parent) {
1336                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1337                 key.offset = parent;
1338                 size = sizeof(struct btrfs_shared_data_ref);
1339         } else {
1340                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1341                 key.offset = hash_extent_data_ref(root_objectid,
1342                                                   owner, offset);
1343                 size = sizeof(struct btrfs_extent_data_ref);
1344         }
1345
1346         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1347         if (ret && ret != -EEXIST)
1348                 goto fail;
1349
1350         leaf = path->nodes[0];
1351         if (parent) {
1352                 struct btrfs_shared_data_ref *ref;
1353                 ref = btrfs_item_ptr(leaf, path->slots[0],
1354                                      struct btrfs_shared_data_ref);
1355                 if (ret == 0) {
1356                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1357                 } else {
1358                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1359                         num_refs += refs_to_add;
1360                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1361                 }
1362         } else {
1363                 struct btrfs_extent_data_ref *ref;
1364                 while (ret == -EEXIST) {
1365                         ref = btrfs_item_ptr(leaf, path->slots[0],
1366                                              struct btrfs_extent_data_ref);
1367                         if (match_extent_data_ref(leaf, ref, root_objectid,
1368                                                   owner, offset))
1369                                 break;
1370                         btrfs_release_path(path);
1371                         key.offset++;
1372                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1373                                                       size);
1374                         if (ret && ret != -EEXIST)
1375                                 goto fail;
1376
1377                         leaf = path->nodes[0];
1378                 }
1379                 ref = btrfs_item_ptr(leaf, path->slots[0],
1380                                      struct btrfs_extent_data_ref);
1381                 if (ret == 0) {
1382                         btrfs_set_extent_data_ref_root(leaf, ref,
1383                                                        root_objectid);
1384                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1385                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1386                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1387                 } else {
1388                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1389                         num_refs += refs_to_add;
1390                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1391                 }
1392         }
1393         btrfs_mark_buffer_dirty(leaf);
1394         ret = 0;
1395 fail:
1396         btrfs_release_path(path);
1397         return ret;
1398 }
1399
1400 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1401                                            struct btrfs_fs_info *fs_info,
1402                                            struct btrfs_path *path,
1403                                            int refs_to_drop, int *last_ref)
1404 {
1405         struct btrfs_key key;
1406         struct btrfs_extent_data_ref *ref1 = NULL;
1407         struct btrfs_shared_data_ref *ref2 = NULL;
1408         struct extent_buffer *leaf;
1409         u32 num_refs = 0;
1410         int ret = 0;
1411
1412         leaf = path->nodes[0];
1413         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1414
1415         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1416                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1417                                       struct btrfs_extent_data_ref);
1418                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1419         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1420                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1421                                       struct btrfs_shared_data_ref);
1422                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1423 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1424         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1425                 struct btrfs_extent_ref_v0 *ref0;
1426                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1427                                       struct btrfs_extent_ref_v0);
1428                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1429 #endif
1430         } else {
1431                 BUG();
1432         }
1433
1434         BUG_ON(num_refs < refs_to_drop);
1435         num_refs -= refs_to_drop;
1436
1437         if (num_refs == 0) {
1438                 ret = btrfs_del_item(trans, fs_info->extent_root, path);
1439                 *last_ref = 1;
1440         } else {
1441                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1442                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1443                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1444                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1445 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1446                 else {
1447                         struct btrfs_extent_ref_v0 *ref0;
1448                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1449                                         struct btrfs_extent_ref_v0);
1450                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1451                 }
1452 #endif
1453                 btrfs_mark_buffer_dirty(leaf);
1454         }
1455         return ret;
1456 }
1457
1458 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1459                                           struct btrfs_extent_inline_ref *iref)
1460 {
1461         struct btrfs_key key;
1462         struct extent_buffer *leaf;
1463         struct btrfs_extent_data_ref *ref1;
1464         struct btrfs_shared_data_ref *ref2;
1465         u32 num_refs = 0;
1466         int type;
1467
1468         leaf = path->nodes[0];
1469         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1470         if (iref) {
1471                 /*
1472                  * If type is invalid, we should have bailed out earlier than
1473                  * this call.
1474                  */
1475                 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
1476                 ASSERT(type != BTRFS_REF_TYPE_INVALID);
1477                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1478                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1479                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1480                 } else {
1481                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1482                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1483                 }
1484         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1485                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1486                                       struct btrfs_extent_data_ref);
1487                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1488         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1489                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1490                                       struct btrfs_shared_data_ref);
1491                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1492 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1493         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1494                 struct btrfs_extent_ref_v0 *ref0;
1495                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1496                                       struct btrfs_extent_ref_v0);
1497                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1498 #endif
1499         } else {
1500                 WARN_ON(1);
1501         }
1502         return num_refs;
1503 }
1504
1505 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1506                                           struct btrfs_fs_info *fs_info,
1507                                           struct btrfs_path *path,
1508                                           u64 bytenr, u64 parent,
1509                                           u64 root_objectid)
1510 {
1511         struct btrfs_root *root = fs_info->extent_root;
1512         struct btrfs_key key;
1513         int ret;
1514
1515         key.objectid = bytenr;
1516         if (parent) {
1517                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1518                 key.offset = parent;
1519         } else {
1520                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1521                 key.offset = root_objectid;
1522         }
1523
1524         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1525         if (ret > 0)
1526                 ret = -ENOENT;
1527 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1528         if (ret == -ENOENT && parent) {
1529                 btrfs_release_path(path);
1530                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1531                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1532                 if (ret > 0)
1533                         ret = -ENOENT;
1534         }
1535 #endif
1536         return ret;
1537 }
1538
1539 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1540                                           struct btrfs_fs_info *fs_info,
1541                                           struct btrfs_path *path,
1542                                           u64 bytenr, u64 parent,
1543                                           u64 root_objectid)
1544 {
1545         struct btrfs_key key;
1546         int ret;
1547
1548         key.objectid = bytenr;
1549         if (parent) {
1550                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1551                 key.offset = parent;
1552         } else {
1553                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1554                 key.offset = root_objectid;
1555         }
1556
1557         ret = btrfs_insert_empty_item(trans, fs_info->extent_root,
1558                                       path, &key, 0);
1559         btrfs_release_path(path);
1560         return ret;
1561 }
1562
1563 static inline int extent_ref_type(u64 parent, u64 owner)
1564 {
1565         int type;
1566         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1567                 if (parent > 0)
1568                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1569                 else
1570                         type = BTRFS_TREE_BLOCK_REF_KEY;
1571         } else {
1572                 if (parent > 0)
1573                         type = BTRFS_SHARED_DATA_REF_KEY;
1574                 else
1575                         type = BTRFS_EXTENT_DATA_REF_KEY;
1576         }
1577         return type;
1578 }
1579
1580 static int find_next_key(struct btrfs_path *path, int level,
1581                          struct btrfs_key *key)
1582
1583 {
1584         for (; level < BTRFS_MAX_LEVEL; level++) {
1585                 if (!path->nodes[level])
1586                         break;
1587                 if (path->slots[level] + 1 >=
1588                     btrfs_header_nritems(path->nodes[level]))
1589                         continue;
1590                 if (level == 0)
1591                         btrfs_item_key_to_cpu(path->nodes[level], key,
1592                                               path->slots[level] + 1);
1593                 else
1594                         btrfs_node_key_to_cpu(path->nodes[level], key,
1595                                               path->slots[level] + 1);
1596                 return 0;
1597         }
1598         return 1;
1599 }
1600
1601 /*
1602  * look for inline back ref. if back ref is found, *ref_ret is set
1603  * to the address of inline back ref, and 0 is returned.
1604  *
1605  * if back ref isn't found, *ref_ret is set to the address where it
1606  * should be inserted, and -ENOENT is returned.
1607  *
1608  * if insert is true and there are too many inline back refs, the path
1609  * points to the extent item, and -EAGAIN is returned.
1610  *
1611  * NOTE: inline back refs are ordered in the same way that back ref
1612  *       items in the tree are ordered.
1613  */
1614 static noinline_for_stack
1615 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1616                                  struct btrfs_fs_info *fs_info,
1617                                  struct btrfs_path *path,
1618                                  struct btrfs_extent_inline_ref **ref_ret,
1619                                  u64 bytenr, u64 num_bytes,
1620                                  u64 parent, u64 root_objectid,
1621                                  u64 owner, u64 offset, int insert)
1622 {
1623         struct btrfs_root *root = fs_info->extent_root;
1624         struct btrfs_key key;
1625         struct extent_buffer *leaf;
1626         struct btrfs_extent_item *ei;
1627         struct btrfs_extent_inline_ref *iref;
1628         u64 flags;
1629         u64 item_size;
1630         unsigned long ptr;
1631         unsigned long end;
1632         int extra_size;
1633         int type;
1634         int want;
1635         int ret;
1636         int err = 0;
1637         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
1638         int needed;
1639
1640         key.objectid = bytenr;
1641         key.type = BTRFS_EXTENT_ITEM_KEY;
1642         key.offset = num_bytes;
1643
1644         want = extent_ref_type(parent, owner);
1645         if (insert) {
1646                 extra_size = btrfs_extent_inline_ref_size(want);
1647                 path->keep_locks = 1;
1648         } else
1649                 extra_size = -1;
1650
1651         /*
1652          * Owner is our parent level, so we can just add one to get the level
1653          * for the block we are interested in.
1654          */
1655         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1656                 key.type = BTRFS_METADATA_ITEM_KEY;
1657                 key.offset = owner;
1658         }
1659
1660 again:
1661         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1662         if (ret < 0) {
1663                 err = ret;
1664                 goto out;
1665         }
1666
1667         /*
1668          * We may be a newly converted file system which still has the old fat
1669          * extent entries for metadata, so try and see if we have one of those.
1670          */
1671         if (ret > 0 && skinny_metadata) {
1672                 skinny_metadata = false;
1673                 if (path->slots[0]) {
1674                         path->slots[0]--;
1675                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1676                                               path->slots[0]);
1677                         if (key.objectid == bytenr &&
1678                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1679                             key.offset == num_bytes)
1680                                 ret = 0;
1681                 }
1682                 if (ret) {
1683                         key.objectid = bytenr;
1684                         key.type = BTRFS_EXTENT_ITEM_KEY;
1685                         key.offset = num_bytes;
1686                         btrfs_release_path(path);
1687                         goto again;
1688                 }
1689         }
1690
1691         if (ret && !insert) {
1692                 err = -ENOENT;
1693                 goto out;
1694         } else if (WARN_ON(ret)) {
1695                 err = -EIO;
1696                 goto out;
1697         }
1698
1699         leaf = path->nodes[0];
1700         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1701 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1702         if (item_size < sizeof(*ei)) {
1703                 if (!insert) {
1704                         err = -ENOENT;
1705                         goto out;
1706                 }
1707                 ret = convert_extent_item_v0(trans, fs_info, path, owner,
1708                                              extra_size);
1709                 if (ret < 0) {
1710                         err = ret;
1711                         goto out;
1712                 }
1713                 leaf = path->nodes[0];
1714                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1715         }
1716 #endif
1717         BUG_ON(item_size < sizeof(*ei));
1718
1719         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1720         flags = btrfs_extent_flags(leaf, ei);
1721
1722         ptr = (unsigned long)(ei + 1);
1723         end = (unsigned long)ei + item_size;
1724
1725         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1726                 ptr += sizeof(struct btrfs_tree_block_info);
1727                 BUG_ON(ptr > end);
1728         }
1729
1730         if (owner >= BTRFS_FIRST_FREE_OBJECTID)
1731                 needed = BTRFS_REF_TYPE_DATA;
1732         else
1733                 needed = BTRFS_REF_TYPE_BLOCK;
1734
1735         err = -ENOENT;
1736         while (1) {
1737                 if (ptr >= end) {
1738                         WARN_ON(ptr > end);
1739                         break;
1740                 }
1741                 iref = (struct btrfs_extent_inline_ref *)ptr;
1742                 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
1743                 if (type == BTRFS_REF_TYPE_INVALID) {
1744                         err = -EINVAL;
1745                         goto out;
1746                 }
1747
1748                 if (want < type)
1749                         break;
1750                 if (want > type) {
1751                         ptr += btrfs_extent_inline_ref_size(type);
1752                         continue;
1753                 }
1754
1755                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1756                         struct btrfs_extent_data_ref *dref;
1757                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1758                         if (match_extent_data_ref(leaf, dref, root_objectid,
1759                                                   owner, offset)) {
1760                                 err = 0;
1761                                 break;
1762                         }
1763                         if (hash_extent_data_ref_item(leaf, dref) <
1764                             hash_extent_data_ref(root_objectid, owner, offset))
1765                                 break;
1766                 } else {
1767                         u64 ref_offset;
1768                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1769                         if (parent > 0) {
1770                                 if (parent == ref_offset) {
1771                                         err = 0;
1772                                         break;
1773                                 }
1774                                 if (ref_offset < parent)
1775                                         break;
1776                         } else {
1777                                 if (root_objectid == ref_offset) {
1778                                         err = 0;
1779                                         break;
1780                                 }
1781                                 if (ref_offset < root_objectid)
1782                                         break;
1783                         }
1784                 }
1785                 ptr += btrfs_extent_inline_ref_size(type);
1786         }
1787         if (err == -ENOENT && insert) {
1788                 if (item_size + extra_size >=
1789                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1790                         err = -EAGAIN;
1791                         goto out;
1792                 }
1793                 /*
1794                  * To add new inline back ref, we have to make sure
1795                  * there is no corresponding back ref item.
1796                  * For simplicity, we just do not add new inline back
1797                  * ref if there is any kind of item for this block
1798                  */
1799                 if (find_next_key(path, 0, &key) == 0 &&
1800                     key.objectid == bytenr &&
1801                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1802                         err = -EAGAIN;
1803                         goto out;
1804                 }
1805         }
1806         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1807 out:
1808         if (insert) {
1809                 path->keep_locks = 0;
1810                 btrfs_unlock_up_safe(path, 1);
1811         }
1812         return err;
1813 }
1814
1815 /*
1816  * helper to add new inline back ref
1817  */
1818 static noinline_for_stack
1819 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
1820                                  struct btrfs_path *path,
1821                                  struct btrfs_extent_inline_ref *iref,
1822                                  u64 parent, u64 root_objectid,
1823                                  u64 owner, u64 offset, int refs_to_add,
1824                                  struct btrfs_delayed_extent_op *extent_op)
1825 {
1826         struct extent_buffer *leaf;
1827         struct btrfs_extent_item *ei;
1828         unsigned long ptr;
1829         unsigned long end;
1830         unsigned long item_offset;
1831         u64 refs;
1832         int size;
1833         int type;
1834
1835         leaf = path->nodes[0];
1836         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1837         item_offset = (unsigned long)iref - (unsigned long)ei;
1838
1839         type = extent_ref_type(parent, owner);
1840         size = btrfs_extent_inline_ref_size(type);
1841
1842         btrfs_extend_item(fs_info, path, size);
1843
1844         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1845         refs = btrfs_extent_refs(leaf, ei);
1846         refs += refs_to_add;
1847         btrfs_set_extent_refs(leaf, ei, refs);
1848         if (extent_op)
1849                 __run_delayed_extent_op(extent_op, leaf, ei);
1850
1851         ptr = (unsigned long)ei + item_offset;
1852         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1853         if (ptr < end - size)
1854                 memmove_extent_buffer(leaf, ptr + size, ptr,
1855                                       end - size - ptr);
1856
1857         iref = (struct btrfs_extent_inline_ref *)ptr;
1858         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1859         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1860                 struct btrfs_extent_data_ref *dref;
1861                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1862                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1863                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1864                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1865                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1866         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1867                 struct btrfs_shared_data_ref *sref;
1868                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1869                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1870                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1871         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1872                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1873         } else {
1874                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1875         }
1876         btrfs_mark_buffer_dirty(leaf);
1877 }
1878
1879 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1880                                  struct btrfs_fs_info *fs_info,
1881                                  struct btrfs_path *path,
1882                                  struct btrfs_extent_inline_ref **ref_ret,
1883                                  u64 bytenr, u64 num_bytes, u64 parent,
1884                                  u64 root_objectid, u64 owner, u64 offset)
1885 {
1886         int ret;
1887
1888         ret = lookup_inline_extent_backref(trans, fs_info, path, ref_ret,
1889                                            bytenr, num_bytes, parent,
1890                                            root_objectid, owner, offset, 0);
1891         if (ret != -ENOENT)
1892                 return ret;
1893
1894         btrfs_release_path(path);
1895         *ref_ret = NULL;
1896
1897         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1898                 ret = lookup_tree_block_ref(trans, fs_info, path, bytenr,
1899                                             parent, root_objectid);
1900         } else {
1901                 ret = lookup_extent_data_ref(trans, fs_info, path, bytenr,
1902                                              parent, root_objectid, owner,
1903                                              offset);
1904         }
1905         return ret;
1906 }
1907
1908 /*
1909  * helper to update/remove inline back ref
1910  */
1911 static noinline_for_stack
1912 void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
1913                                   struct btrfs_path *path,
1914                                   struct btrfs_extent_inline_ref *iref,
1915                                   int refs_to_mod,
1916                                   struct btrfs_delayed_extent_op *extent_op,
1917                                   int *last_ref)
1918 {
1919         struct extent_buffer *leaf;
1920         struct btrfs_extent_item *ei;
1921         struct btrfs_extent_data_ref *dref = NULL;
1922         struct btrfs_shared_data_ref *sref = NULL;
1923         unsigned long ptr;
1924         unsigned long end;
1925         u32 item_size;
1926         int size;
1927         int type;
1928         u64 refs;
1929
1930         leaf = path->nodes[0];
1931         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1932         refs = btrfs_extent_refs(leaf, ei);
1933         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1934         refs += refs_to_mod;
1935         btrfs_set_extent_refs(leaf, ei, refs);
1936         if (extent_op)
1937                 __run_delayed_extent_op(extent_op, leaf, ei);
1938
1939         /*
1940          * If type is invalid, we should have bailed out after
1941          * lookup_inline_extent_backref().
1942          */
1943         type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1944         ASSERT(type != BTRFS_REF_TYPE_INVALID);
1945
1946         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1947                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1948                 refs = btrfs_extent_data_ref_count(leaf, dref);
1949         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1950                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1951                 refs = btrfs_shared_data_ref_count(leaf, sref);
1952         } else {
1953                 refs = 1;
1954                 BUG_ON(refs_to_mod != -1);
1955         }
1956
1957         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1958         refs += refs_to_mod;
1959
1960         if (refs > 0) {
1961                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1962                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1963                 else
1964                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1965         } else {
1966                 *last_ref = 1;
1967                 size =  btrfs_extent_inline_ref_size(type);
1968                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1969                 ptr = (unsigned long)iref;
1970                 end = (unsigned long)ei + item_size;
1971                 if (ptr + size < end)
1972                         memmove_extent_buffer(leaf, ptr, ptr + size,
1973                                               end - ptr - size);
1974                 item_size -= size;
1975                 btrfs_truncate_item(fs_info, path, item_size, 1);
1976         }
1977         btrfs_mark_buffer_dirty(leaf);
1978 }
1979
1980 static noinline_for_stack
1981 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1982                                  struct btrfs_fs_info *fs_info,
1983                                  struct btrfs_path *path,
1984                                  u64 bytenr, u64 num_bytes, u64 parent,
1985                                  u64 root_objectid, u64 owner,
1986                                  u64 offset, int refs_to_add,
1987                                  struct btrfs_delayed_extent_op *extent_op)
1988 {
1989         struct btrfs_extent_inline_ref *iref;
1990         int ret;
1991
1992         ret = lookup_inline_extent_backref(trans, fs_info, path, &iref,
1993                                            bytenr, num_bytes, parent,
1994                                            root_objectid, owner, offset, 1);
1995         if (ret == 0) {
1996                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1997                 update_inline_extent_backref(fs_info, path, iref,
1998                                              refs_to_add, extent_op, NULL);
1999         } else if (ret == -ENOENT) {
2000                 setup_inline_extent_backref(fs_info, path, iref, parent,
2001                                             root_objectid, owner, offset,
2002                                             refs_to_add, extent_op);
2003                 ret = 0;
2004         }
2005         return ret;
2006 }
2007
2008 static int insert_extent_backref(struct btrfs_trans_handle *trans,
2009                                  struct btrfs_fs_info *fs_info,
2010                                  struct btrfs_path *path,
2011                                  u64 bytenr, u64 parent, u64 root_objectid,
2012                                  u64 owner, u64 offset, int refs_to_add)
2013 {
2014         int ret;
2015         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2016                 BUG_ON(refs_to_add != 1);
2017                 ret = insert_tree_block_ref(trans, fs_info, path, bytenr,
2018                                             parent, root_objectid);
2019         } else {
2020                 ret = insert_extent_data_ref(trans, fs_info, path, bytenr,
2021                                              parent, root_objectid,
2022                                              owner, offset, refs_to_add);
2023         }
2024         return ret;
2025 }
2026
2027 static int remove_extent_backref(struct btrfs_trans_handle *trans,
2028                                  struct btrfs_fs_info *fs_info,
2029                                  struct btrfs_path *path,
2030                                  struct btrfs_extent_inline_ref *iref,
2031                                  int refs_to_drop, int is_data, int *last_ref)
2032 {
2033         int ret = 0;
2034
2035         BUG_ON(!is_data && refs_to_drop != 1);
2036         if (iref) {
2037                 update_inline_extent_backref(fs_info, path, iref,
2038                                              -refs_to_drop, NULL, last_ref);
2039         } else if (is_data) {
2040                 ret = remove_extent_data_ref(trans, fs_info, path, refs_to_drop,
2041                                              last_ref);
2042         } else {
2043                 *last_ref = 1;
2044                 ret = btrfs_del_item(trans, fs_info->extent_root, path);
2045         }
2046         return ret;
2047 }
2048
2049 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
2050 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
2051                                u64 *discarded_bytes)
2052 {
2053         int j, ret = 0;
2054         u64 bytes_left, end;
2055         u64 aligned_start = ALIGN(start, 1 << 9);
2056
2057         if (WARN_ON(start != aligned_start)) {
2058                 len -= aligned_start - start;
2059                 len = round_down(len, 1 << 9);
2060                 start = aligned_start;
2061         }
2062
2063         *discarded_bytes = 0;
2064
2065         if (!len)
2066                 return 0;
2067
2068         end = start + len;
2069         bytes_left = len;
2070
2071         /* Skip any superblocks on this device. */
2072         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
2073                 u64 sb_start = btrfs_sb_offset(j);
2074                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
2075                 u64 size = sb_start - start;
2076
2077                 if (!in_range(sb_start, start, bytes_left) &&
2078                     !in_range(sb_end, start, bytes_left) &&
2079                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
2080                         continue;
2081
2082                 /*
2083                  * Superblock spans beginning of range.  Adjust start and
2084                  * try again.
2085                  */
2086                 if (sb_start <= start) {
2087                         start += sb_end - start;
2088                         if (start > end) {
2089                                 bytes_left = 0;
2090                                 break;
2091                         }
2092                         bytes_left = end - start;
2093                         continue;
2094                 }
2095
2096                 if (size) {
2097                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
2098                                                    GFP_NOFS, 0);
2099                         if (!ret)
2100                                 *discarded_bytes += size;
2101                         else if (ret != -EOPNOTSUPP)
2102                                 return ret;
2103                 }
2104
2105                 start = sb_end;
2106                 if (start > end) {
2107                         bytes_left = 0;
2108                         break;
2109                 }
2110                 bytes_left = end - start;
2111         }
2112
2113         if (bytes_left) {
2114                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2115                                            GFP_NOFS, 0);
2116                 if (!ret)
2117                         *discarded_bytes += bytes_left;
2118         }
2119         return ret;
2120 }
2121
2122 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
2123                          u64 num_bytes, u64 *actual_bytes)
2124 {
2125         int ret;
2126         u64 discarded_bytes = 0;
2127         struct btrfs_bio *bbio = NULL;
2128
2129
2130         /*
2131          * Avoid races with device replace and make sure our bbio has devices
2132          * associated to its stripes that don't go away while we are discarding.
2133          */
2134         btrfs_bio_counter_inc_blocked(fs_info);
2135         /* Tell the block device(s) that the sectors can be discarded */
2136         ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
2137                               &bbio, 0);
2138         /* Error condition is -ENOMEM */
2139         if (!ret) {
2140                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2141                 int i;
2142
2143
2144                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2145                         u64 bytes;
2146                         struct request_queue *req_q;
2147
2148                         if (!stripe->dev->bdev) {
2149                                 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
2150                                 continue;
2151                         }
2152                         req_q = bdev_get_queue(stripe->dev->bdev);
2153                         if (!blk_queue_discard(req_q))
2154                                 continue;
2155
2156                         ret = btrfs_issue_discard(stripe->dev->bdev,
2157                                                   stripe->physical,
2158                                                   stripe->length,
2159                                                   &bytes);
2160                         if (!ret)
2161                                 discarded_bytes += bytes;
2162                         else if (ret != -EOPNOTSUPP)
2163                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2164
2165                         /*
2166                          * Just in case we get back EOPNOTSUPP for some reason,
2167                          * just ignore the return value so we don't screw up
2168                          * people calling discard_extent.
2169                          */
2170                         ret = 0;
2171                 }
2172                 btrfs_put_bbio(bbio);
2173         }
2174         btrfs_bio_counter_dec(fs_info);
2175
2176         if (actual_bytes)
2177                 *actual_bytes = discarded_bytes;
2178
2179
2180         if (ret == -EOPNOTSUPP)
2181                 ret = 0;
2182         return ret;
2183 }
2184
2185 /* Can return -ENOMEM */
2186 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2187                          struct btrfs_root *root,
2188                          u64 bytenr, u64 num_bytes, u64 parent,
2189                          u64 root_objectid, u64 owner, u64 offset)
2190 {
2191         struct btrfs_fs_info *fs_info = root->fs_info;
2192         int old_ref_mod, new_ref_mod;
2193         int ret;
2194
2195         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2196                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2197
2198         btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
2199                            owner, offset, BTRFS_ADD_DELAYED_REF);
2200
2201         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2202                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2203                                                  num_bytes, parent,
2204                                                  root_objectid, (int)owner,
2205                                                  BTRFS_ADD_DELAYED_REF, NULL,
2206                                                  &old_ref_mod, &new_ref_mod);
2207         } else {
2208                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2209                                                  num_bytes, parent,
2210                                                  root_objectid, owner, offset,
2211                                                  0, BTRFS_ADD_DELAYED_REF,
2212                                                  &old_ref_mod, &new_ref_mod);
2213         }
2214
2215         if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
2216                 add_pinned_bytes(fs_info, -num_bytes, owner, root_objectid);
2217
2218         return ret;
2219 }
2220
2221 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2222                                   struct btrfs_fs_info *fs_info,
2223                                   struct btrfs_delayed_ref_node *node,
2224                                   u64 parent, u64 root_objectid,
2225                                   u64 owner, u64 offset, int refs_to_add,
2226                                   struct btrfs_delayed_extent_op *extent_op)
2227 {
2228         struct btrfs_path *path;
2229         struct extent_buffer *leaf;
2230         struct btrfs_extent_item *item;
2231         struct btrfs_key key;
2232         u64 bytenr = node->bytenr;
2233         u64 num_bytes = node->num_bytes;
2234         u64 refs;
2235         int ret;
2236
2237         path = btrfs_alloc_path();
2238         if (!path)
2239                 return -ENOMEM;
2240
2241         path->reada = READA_FORWARD;
2242         path->leave_spinning = 1;
2243         /* this will setup the path even if it fails to insert the back ref */
2244         ret = insert_inline_extent_backref(trans, fs_info, path, bytenr,
2245                                            num_bytes, parent, root_objectid,
2246                                            owner, offset,
2247                                            refs_to_add, extent_op);
2248         if ((ret < 0 && ret != -EAGAIN) || !ret)
2249                 goto out;
2250
2251         /*
2252          * Ok we had -EAGAIN which means we didn't have space to insert and
2253          * inline extent ref, so just update the reference count and add a
2254          * normal backref.
2255          */
2256         leaf = path->nodes[0];
2257         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2258         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2259         refs = btrfs_extent_refs(leaf, item);
2260         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2261         if (extent_op)
2262                 __run_delayed_extent_op(extent_op, leaf, item);
2263
2264         btrfs_mark_buffer_dirty(leaf);
2265         btrfs_release_path(path);
2266
2267         path->reada = READA_FORWARD;
2268         path->leave_spinning = 1;
2269         /* now insert the actual backref */
2270         ret = insert_extent_backref(trans, fs_info, path, bytenr, parent,
2271                                     root_objectid, owner, offset, refs_to_add);
2272         if (ret)
2273                 btrfs_abort_transaction(trans, ret);
2274 out:
2275         btrfs_free_path(path);
2276         return ret;
2277 }
2278
2279 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2280                                 struct btrfs_fs_info *fs_info,
2281                                 struct btrfs_delayed_ref_node *node,
2282                                 struct btrfs_delayed_extent_op *extent_op,
2283                                 int insert_reserved)
2284 {
2285         int ret = 0;
2286         struct btrfs_delayed_data_ref *ref;
2287         struct btrfs_key ins;
2288         u64 parent = 0;
2289         u64 ref_root = 0;
2290         u64 flags = 0;
2291
2292         ins.objectid = node->bytenr;
2293         ins.offset = node->num_bytes;
2294         ins.type = BTRFS_EXTENT_ITEM_KEY;
2295
2296         ref = btrfs_delayed_node_to_data_ref(node);
2297         trace_run_delayed_data_ref(fs_info, node, ref, node->action);
2298
2299         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2300                 parent = ref->parent;
2301         ref_root = ref->root;
2302
2303         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2304                 if (extent_op)
2305                         flags |= extent_op->flags_to_set;
2306                 ret = alloc_reserved_file_extent(trans, fs_info,
2307                                                  parent, ref_root, flags,
2308                                                  ref->objectid, ref->offset,
2309                                                  &ins, node->ref_mod);
2310         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2311                 ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
2312                                              ref_root, ref->objectid,
2313                                              ref->offset, node->ref_mod,
2314                                              extent_op);
2315         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2316                 ret = __btrfs_free_extent(trans, fs_info, node, parent,
2317                                           ref_root, ref->objectid,
2318                                           ref->offset, node->ref_mod,
2319                                           extent_op);
2320         } else {
2321                 BUG();
2322         }
2323         return ret;
2324 }
2325
2326 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2327                                     struct extent_buffer *leaf,
2328                                     struct btrfs_extent_item *ei)
2329 {
2330         u64 flags = btrfs_extent_flags(leaf, ei);
2331         if (extent_op->update_flags) {
2332                 flags |= extent_op->flags_to_set;
2333                 btrfs_set_extent_flags(leaf, ei, flags);
2334         }
2335
2336         if (extent_op->update_key) {
2337                 struct btrfs_tree_block_info *bi;
2338                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2339                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2340                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2341         }
2342 }
2343
2344 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2345                                  struct btrfs_fs_info *fs_info,
2346                                  struct btrfs_delayed_ref_head *head,
2347                                  struct btrfs_delayed_extent_op *extent_op)
2348 {
2349         struct btrfs_key key;
2350         struct btrfs_path *path;
2351         struct btrfs_extent_item *ei;
2352         struct extent_buffer *leaf;
2353         u32 item_size;
2354         int ret;
2355         int err = 0;
2356         int metadata = !extent_op->is_data;
2357
2358         if (trans->aborted)
2359                 return 0;
2360
2361         if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2362                 metadata = 0;
2363
2364         path = btrfs_alloc_path();
2365         if (!path)
2366                 return -ENOMEM;
2367
2368         key.objectid = head->bytenr;
2369
2370         if (metadata) {
2371                 key.type = BTRFS_METADATA_ITEM_KEY;
2372                 key.offset = extent_op->level;
2373         } else {
2374                 key.type = BTRFS_EXTENT_ITEM_KEY;
2375                 key.offset = head->num_bytes;
2376         }
2377
2378 again:
2379         path->reada = READA_FORWARD;
2380         path->leave_spinning = 1;
2381         ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
2382         if (ret < 0) {
2383                 err = ret;
2384                 goto out;
2385         }
2386         if (ret > 0) {
2387                 if (metadata) {
2388                         if (path->slots[0] > 0) {
2389                                 path->slots[0]--;
2390                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2391                                                       path->slots[0]);
2392                                 if (key.objectid == head->bytenr &&
2393                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2394                                     key.offset == head->num_bytes)
2395                                         ret = 0;
2396                         }
2397                         if (ret > 0) {
2398                                 btrfs_release_path(path);
2399                                 metadata = 0;
2400
2401                                 key.objectid = head->bytenr;
2402                                 key.offset = head->num_bytes;
2403                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2404                                 goto again;
2405                         }
2406                 } else {
2407                         err = -EIO;
2408                         goto out;
2409                 }
2410         }
2411
2412         leaf = path->nodes[0];
2413         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2414 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2415         if (item_size < sizeof(*ei)) {
2416                 ret = convert_extent_item_v0(trans, fs_info, path, (u64)-1, 0);
2417                 if (ret < 0) {
2418                         err = ret;
2419                         goto out;
2420                 }
2421                 leaf = path->nodes[0];
2422                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2423         }
2424 #endif
2425         BUG_ON(item_size < sizeof(*ei));
2426         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2427         __run_delayed_extent_op(extent_op, leaf, ei);
2428
2429         btrfs_mark_buffer_dirty(leaf);
2430 out:
2431         btrfs_free_path(path);
2432         return err;
2433 }
2434
2435 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2436                                 struct btrfs_fs_info *fs_info,
2437                                 struct btrfs_delayed_ref_node *node,
2438                                 struct btrfs_delayed_extent_op *extent_op,
2439                                 int insert_reserved)
2440 {
2441         int ret = 0;
2442         struct btrfs_delayed_tree_ref *ref;
2443         struct btrfs_key ins;
2444         u64 parent = 0;
2445         u64 ref_root = 0;
2446         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
2447
2448         ref = btrfs_delayed_node_to_tree_ref(node);
2449         trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
2450
2451         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2452                 parent = ref->parent;
2453         ref_root = ref->root;
2454
2455         ins.objectid = node->bytenr;
2456         if (skinny_metadata) {
2457                 ins.offset = ref->level;
2458                 ins.type = BTRFS_METADATA_ITEM_KEY;
2459         } else {
2460                 ins.offset = node->num_bytes;
2461                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2462         }
2463
2464         if (node->ref_mod != 1) {
2465                 btrfs_err(fs_info,
2466         "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2467                           node->bytenr, node->ref_mod, node->action, ref_root,
2468                           parent);
2469                 return -EIO;
2470         }
2471         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2472                 BUG_ON(!extent_op || !extent_op->update_flags);
2473                 ret = alloc_reserved_tree_block(trans, fs_info,
2474                                                 parent, ref_root,
2475                                                 extent_op->flags_to_set,
2476                                                 &extent_op->key,
2477                                                 ref->level, &ins);
2478         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2479                 ret = __btrfs_inc_extent_ref(trans, fs_info, node,
2480                                              parent, ref_root,
2481                                              ref->level, 0, 1,
2482                                              extent_op);
2483         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2484                 ret = __btrfs_free_extent(trans, fs_info, node,
2485                                           parent, ref_root,
2486                                           ref->level, 0, 1, extent_op);
2487         } else {
2488                 BUG();
2489         }
2490         return ret;
2491 }
2492
2493 /* helper function to actually process a single delayed ref entry */
2494 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2495                                struct btrfs_fs_info *fs_info,
2496                                struct btrfs_delayed_ref_node *node,
2497                                struct btrfs_delayed_extent_op *extent_op,
2498                                int insert_reserved)
2499 {
2500         int ret = 0;
2501
2502         if (trans->aborted) {
2503                 if (insert_reserved)
2504                         btrfs_pin_extent(fs_info, node->bytenr,
2505                                          node->num_bytes, 1);
2506                 return 0;
2507         }
2508
2509         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2510             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2511                 ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
2512                                            insert_reserved);
2513         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2514                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2515                 ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
2516                                            insert_reserved);
2517         else
2518                 BUG();
2519         return ret;
2520 }
2521
2522 static inline struct btrfs_delayed_ref_node *
2523 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2524 {
2525         struct btrfs_delayed_ref_node *ref;
2526
2527         if (RB_EMPTY_ROOT(&head->ref_tree))
2528                 return NULL;
2529
2530         /*
2531          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2532          * This is to prevent a ref count from going down to zero, which deletes
2533          * the extent item from the extent tree, when there still are references
2534          * to add, which would fail because they would not find the extent item.
2535          */
2536         if (!list_empty(&head->ref_add_list))
2537                 return list_first_entry(&head->ref_add_list,
2538                                 struct btrfs_delayed_ref_node, add_list);
2539
2540         ref = rb_entry(rb_first(&head->ref_tree),
2541                        struct btrfs_delayed_ref_node, ref_node);
2542         ASSERT(list_empty(&ref->add_list));
2543         return ref;
2544 }
2545
2546 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
2547                                       struct btrfs_delayed_ref_head *head)
2548 {
2549         spin_lock(&delayed_refs->lock);
2550         head->processing = 0;
2551         delayed_refs->num_heads_ready++;
2552         spin_unlock(&delayed_refs->lock);
2553         btrfs_delayed_ref_unlock(head);
2554 }
2555
2556 static int cleanup_extent_op(struct btrfs_trans_handle *trans,
2557                              struct btrfs_fs_info *fs_info,
2558                              struct btrfs_delayed_ref_head *head)
2559 {
2560         struct btrfs_delayed_extent_op *extent_op = head->extent_op;
2561         int ret;
2562
2563         if (!extent_op)
2564                 return 0;
2565         head->extent_op = NULL;
2566         if (head->must_insert_reserved) {
2567                 btrfs_free_delayed_extent_op(extent_op);
2568                 return 0;
2569         }
2570         spin_unlock(&head->lock);
2571         ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
2572         btrfs_free_delayed_extent_op(extent_op);
2573         return ret ? ret : 1;
2574 }
2575
2576 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
2577                             struct btrfs_fs_info *fs_info,
2578                             struct btrfs_delayed_ref_head *head)
2579 {
2580         struct btrfs_delayed_ref_root *delayed_refs;
2581         int ret;
2582
2583         delayed_refs = &trans->transaction->delayed_refs;
2584
2585         ret = cleanup_extent_op(trans, fs_info, head);
2586         if (ret < 0) {
2587                 unselect_delayed_ref_head(delayed_refs, head);
2588                 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2589                 return ret;
2590         } else if (ret) {
2591                 return ret;
2592         }
2593
2594         /*
2595          * Need to drop our head ref lock and re-acquire the delayed ref lock
2596          * and then re-check to make sure nobody got added.
2597          */
2598         spin_unlock(&head->lock);
2599         spin_lock(&delayed_refs->lock);
2600         spin_lock(&head->lock);
2601         if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
2602                 spin_unlock(&head->lock);
2603                 spin_unlock(&delayed_refs->lock);
2604                 return 1;
2605         }
2606         delayed_refs->num_heads--;
2607         rb_erase(&head->href_node, &delayed_refs->href_root);
2608         RB_CLEAR_NODE(&head->href_node);
2609         spin_unlock(&delayed_refs->lock);
2610         spin_unlock(&head->lock);
2611         atomic_dec(&delayed_refs->num_entries);
2612
2613         trace_run_delayed_ref_head(fs_info, head, 0);
2614
2615         if (head->total_ref_mod < 0) {
2616                 struct btrfs_block_group_cache *cache;
2617
2618                 cache = btrfs_lookup_block_group(fs_info, head->bytenr);
2619                 ASSERT(cache);
2620                 percpu_counter_add(&cache->space_info->total_bytes_pinned,
2621                                    -head->num_bytes);
2622                 btrfs_put_block_group(cache);
2623
2624                 if (head->is_data) {
2625                         spin_lock(&delayed_refs->lock);
2626                         delayed_refs->pending_csums -= head->num_bytes;
2627                         spin_unlock(&delayed_refs->lock);
2628                 }
2629         }
2630
2631         if (head->must_insert_reserved) {
2632                 btrfs_pin_extent(fs_info, head->bytenr,
2633                                  head->num_bytes, 1);
2634                 if (head->is_data) {
2635                         ret = btrfs_del_csums(trans, fs_info, head->bytenr,
2636                                               head->num_bytes);
2637                 }
2638         }
2639
2640         /* Also free its reserved qgroup space */
2641         btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
2642                                       head->qgroup_reserved);
2643         btrfs_delayed_ref_unlock(head);
2644         btrfs_put_delayed_ref_head(head);
2645         return 0;
2646 }
2647
2648 /*
2649  * Returns 0 on success or if called with an already aborted transaction.
2650  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2651  */
2652 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2653                                              unsigned long nr)
2654 {
2655         struct btrfs_fs_info *fs_info = trans->fs_info;
2656         struct btrfs_delayed_ref_root *delayed_refs;
2657         struct btrfs_delayed_ref_node *ref;
2658         struct btrfs_delayed_ref_head *locked_ref = NULL;
2659         struct btrfs_delayed_extent_op *extent_op;
2660         ktime_t start = ktime_get();
2661         int ret;
2662         unsigned long count = 0;
2663         unsigned long actual_count = 0;
2664         int must_insert_reserved = 0;
2665
2666         delayed_refs = &trans->transaction->delayed_refs;
2667         while (1) {
2668                 if (!locked_ref) {
2669                         if (count >= nr)
2670                                 break;
2671
2672                         spin_lock(&delayed_refs->lock);
2673                         locked_ref = btrfs_select_ref_head(trans);
2674                         if (!locked_ref) {
2675                                 spin_unlock(&delayed_refs->lock);
2676                                 break;
2677                         }
2678
2679                         /* grab the lock that says we are going to process
2680                          * all the refs for this head */
2681                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2682                         spin_unlock(&delayed_refs->lock);
2683                         /*
2684                          * we may have dropped the spin lock to get the head
2685                          * mutex lock, and that might have given someone else
2686                          * time to free the head.  If that's true, it has been
2687                          * removed from our list and we can move on.
2688                          */
2689                         if (ret == -EAGAIN) {
2690                                 locked_ref = NULL;
2691                                 count++;
2692                                 continue;
2693                         }
2694                 }
2695
2696                 /*
2697                  * We need to try and merge add/drops of the same ref since we
2698                  * can run into issues with relocate dropping the implicit ref
2699                  * and then it being added back again before the drop can
2700                  * finish.  If we merged anything we need to re-loop so we can
2701                  * get a good ref.
2702                  * Or we can get node references of the same type that weren't
2703                  * merged when created due to bumps in the tree mod seq, and
2704                  * we need to merge them to prevent adding an inline extent
2705                  * backref before dropping it (triggering a BUG_ON at
2706                  * insert_inline_extent_backref()).
2707                  */
2708                 spin_lock(&locked_ref->lock);
2709                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2710                                          locked_ref);
2711
2712                 /*
2713                  * locked_ref is the head node, so we have to go one
2714                  * node back for any delayed ref updates
2715                  */
2716                 ref = select_delayed_ref(locked_ref);
2717
2718                 if (ref && ref->seq &&
2719                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2720                         spin_unlock(&locked_ref->lock);
2721                         unselect_delayed_ref_head(delayed_refs, locked_ref);
2722                         locked_ref = NULL;
2723                         cond_resched();
2724                         count++;
2725                         continue;
2726                 }
2727
2728                 /*
2729                  * We're done processing refs in this ref_head, clean everything
2730                  * up and move on to the next ref_head.
2731                  */
2732                 if (!ref) {
2733                         ret = cleanup_ref_head(trans, fs_info, locked_ref);
2734                         if (ret > 0 ) {
2735                                 /* We dropped our lock, we need to loop. */
2736                                 ret = 0;
2737                                 continue;
2738                         } else if (ret) {
2739                                 return ret;
2740                         }
2741                         locked_ref = NULL;
2742                         count++;
2743                         continue;
2744                 }
2745
2746                 actual_count++;
2747                 ref->in_tree = 0;
2748                 rb_erase(&ref->ref_node, &locked_ref->ref_tree);
2749                 RB_CLEAR_NODE(&ref->ref_node);
2750                 if (!list_empty(&ref->add_list))
2751                         list_del(&ref->add_list);
2752                 /*
2753                  * When we play the delayed ref, also correct the ref_mod on
2754                  * head
2755                  */
2756                 switch (ref->action) {
2757                 case BTRFS_ADD_DELAYED_REF:
2758                 case BTRFS_ADD_DELAYED_EXTENT:
2759                         locked_ref->ref_mod -= ref->ref_mod;
2760                         break;
2761                 case BTRFS_DROP_DELAYED_REF:
2762                         locked_ref->ref_mod += ref->ref_mod;
2763                         break;
2764                 default:
2765                         WARN_ON(1);
2766                 }
2767                 atomic_dec(&delayed_refs->num_entries);
2768
2769                 /*
2770                  * Record the must-insert_reserved flag before we drop the spin
2771                  * lock.
2772                  */
2773                 must_insert_reserved = locked_ref->must_insert_reserved;
2774                 locked_ref->must_insert_reserved = 0;
2775
2776                 extent_op = locked_ref->extent_op;
2777                 locked_ref->extent_op = NULL;
2778                 spin_unlock(&locked_ref->lock);
2779
2780                 ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
2781                                           must_insert_reserved);
2782
2783                 btrfs_free_delayed_extent_op(extent_op);
2784                 if (ret) {
2785                         unselect_delayed_ref_head(delayed_refs, locked_ref);
2786                         btrfs_put_delayed_ref(ref);
2787                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2788                                     ret);
2789                         return ret;
2790                 }
2791
2792                 btrfs_put_delayed_ref(ref);
2793                 count++;
2794                 cond_resched();
2795         }
2796
2797         /*
2798          * We don't want to include ref heads since we can have empty ref heads
2799          * and those will drastically skew our runtime down since we just do
2800          * accounting, no actual extent tree updates.
2801          */
2802         if (actual_count > 0) {
2803                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2804                 u64 avg;
2805
2806                 /*
2807                  * We weigh the current average higher than our current runtime
2808                  * to avoid large swings in the average.
2809                  */
2810                 spin_lock(&delayed_refs->lock);
2811                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2812                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2813                 spin_unlock(&delayed_refs->lock);
2814         }
2815         return 0;
2816 }
2817
2818 #ifdef SCRAMBLE_DELAYED_REFS
2819 /*
2820  * Normally delayed refs get processed in ascending bytenr order. This
2821  * correlates in most cases to the order added. To expose dependencies on this
2822  * order, we start to process the tree in the middle instead of the beginning
2823  */
2824 static u64 find_middle(struct rb_root *root)
2825 {
2826         struct rb_node *n = root->rb_node;
2827         struct btrfs_delayed_ref_node *entry;
2828         int alt = 1;
2829         u64 middle;
2830         u64 first = 0, last = 0;
2831
2832         n = rb_first(root);
2833         if (n) {
2834                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2835                 first = entry->bytenr;
2836         }
2837         n = rb_last(root);
2838         if (n) {
2839                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2840                 last = entry->bytenr;
2841         }
2842         n = root->rb_node;
2843
2844         while (n) {
2845                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2846                 WARN_ON(!entry->in_tree);
2847
2848                 middle = entry->bytenr;
2849
2850                 if (alt)
2851                         n = n->rb_left;
2852                 else
2853                         n = n->rb_right;
2854
2855                 alt = 1 - alt;
2856         }
2857         return middle;
2858 }
2859 #endif
2860
2861 static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
2862 {
2863         u64 num_bytes;
2864
2865         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2866                              sizeof(struct btrfs_extent_inline_ref));
2867         if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2868                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2869
2870         /*
2871          * We don't ever fill up leaves all the way so multiply by 2 just to be
2872          * closer to what we're really going to want to use.
2873          */
2874         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
2875 }
2876
2877 /*
2878  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2879  * would require to store the csums for that many bytes.
2880  */
2881 u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
2882 {
2883         u64 csum_size;
2884         u64 num_csums_per_leaf;
2885         u64 num_csums;
2886
2887         csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
2888         num_csums_per_leaf = div64_u64(csum_size,
2889                         (u64)btrfs_super_csum_size(fs_info->super_copy));
2890         num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
2891         num_csums += num_csums_per_leaf - 1;
2892         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2893         return num_csums;
2894 }
2895
2896 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2897                                        struct btrfs_fs_info *fs_info)
2898 {
2899         struct btrfs_block_rsv *global_rsv;
2900         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2901         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2902         unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
2903         u64 num_bytes, num_dirty_bgs_bytes;
2904         int ret = 0;
2905
2906         num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
2907         num_heads = heads_to_leaves(fs_info, num_heads);
2908         if (num_heads > 1)
2909                 num_bytes += (num_heads - 1) * fs_info->nodesize;
2910         num_bytes <<= 1;
2911         num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
2912                                                         fs_info->nodesize;
2913         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
2914                                                              num_dirty_bgs);
2915         global_rsv = &fs_info->global_block_rsv;
2916
2917         /*
2918          * If we can't allocate any more chunks lets make sure we have _lots_ of
2919          * wiggle room since running delayed refs can create more delayed refs.
2920          */
2921         if (global_rsv->space_info->full) {
2922                 num_dirty_bgs_bytes <<= 1;
2923                 num_bytes <<= 1;
2924         }
2925
2926         spin_lock(&global_rsv->lock);
2927         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2928                 ret = 1;
2929         spin_unlock(&global_rsv->lock);
2930         return ret;
2931 }
2932
2933 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2934                                        struct btrfs_fs_info *fs_info)
2935 {
2936         u64 num_entries =
2937                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2938         u64 avg_runtime;
2939         u64 val;
2940
2941         smp_mb();
2942         avg_runtime = fs_info->avg_delayed_ref_runtime;
2943         val = num_entries * avg_runtime;
2944         if (val >= NSEC_PER_SEC)
2945                 return 1;
2946         if (val >= NSEC_PER_SEC / 2)
2947                 return 2;
2948
2949         return btrfs_check_space_for_delayed_refs(trans, fs_info);
2950 }
2951
2952 struct async_delayed_refs {
2953         struct btrfs_root *root;
2954         u64 transid;
2955         int count;
2956         int error;
2957         int sync;
2958         struct completion wait;
2959         struct btrfs_work work;
2960 };
2961
2962 static inline struct async_delayed_refs *
2963 to_async_delayed_refs(struct btrfs_work *work)
2964 {
2965         return container_of(work, struct async_delayed_refs, work);
2966 }
2967
2968 static void delayed_ref_async_start(struct btrfs_work *work)
2969 {
2970         struct async_delayed_refs *async = to_async_delayed_refs(work);
2971         struct btrfs_trans_handle *trans;
2972         struct btrfs_fs_info *fs_info = async->root->fs_info;
2973         int ret;
2974
2975         /* if the commit is already started, we don't need to wait here */
2976         if (btrfs_transaction_blocked(fs_info))
2977                 goto done;
2978
2979         trans = btrfs_join_transaction(async->root);
2980         if (IS_ERR(trans)) {
2981                 async->error = PTR_ERR(trans);
2982                 goto done;
2983         }
2984
2985         /*
2986          * trans->sync means that when we call end_transaction, we won't
2987          * wait on delayed refs
2988          */
2989         trans->sync = true;
2990
2991         /* Don't bother flushing if we got into a different transaction */
2992         if (trans->transid > async->transid)
2993                 goto end;
2994
2995         ret = btrfs_run_delayed_refs(trans, async->count);
2996         if (ret)
2997                 async->error = ret;
2998 end:
2999         ret = btrfs_end_transaction(trans);
3000         if (ret && !async->error)
3001                 async->error = ret;
3002 done:
3003         if (async->sync)
3004                 complete(&async->wait);
3005         else
3006                 kfree(async);
3007 }
3008
3009 int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
3010                                  unsigned long count, u64 transid, int wait)
3011 {
3012         struct async_delayed_refs *async;
3013         int ret;
3014
3015         async = kmalloc(sizeof(*async), GFP_NOFS);
3016         if (!async)
3017                 return -ENOMEM;
3018
3019         async->root = fs_info->tree_root;
3020         async->count = count;
3021         async->error = 0;
3022         async->transid = transid;
3023         if (wait)
3024                 async->sync = 1;
3025         else
3026                 async->sync = 0;
3027         init_completion(&async->wait);
3028
3029         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
3030                         delayed_ref_async_start, NULL, NULL);
3031
3032         btrfs_queue_work(fs_info->extent_workers, &async->work);
3033
3034         if (wait) {
3035                 wait_for_completion(&async->wait);
3036                 ret = async->error;
3037                 kfree(async);
3038                 return ret;
3039         }
3040         return 0;
3041 }
3042
3043 /*
3044  * this starts processing the delayed reference count updates and
3045  * extent insertions we have queued up so far.  count can be
3046  * 0, which means to process everything in the tree at the start
3047  * of the run (but not newly added entries), or it can be some target
3048  * number you'd like to process.
3049  *
3050  * Returns 0 on success or if called with an aborted transaction
3051  * Returns <0 on error and aborts the transaction
3052  */
3053 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
3054                            unsigned long count)
3055 {
3056         struct btrfs_fs_info *fs_info = trans->fs_info;
3057         struct rb_node *node;
3058         struct btrfs_delayed_ref_root *delayed_refs;
3059         struct btrfs_delayed_ref_head *head;
3060         int ret;
3061         int run_all = count == (unsigned long)-1;
3062         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
3063
3064         /* We'll clean this up in btrfs_cleanup_transaction */
3065         if (trans->aborted)
3066                 return 0;
3067
3068         if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
3069                 return 0;
3070
3071         delayed_refs = &trans->transaction->delayed_refs;
3072         if (count == 0)
3073                 count = atomic_read(&delayed_refs->num_entries) * 2;
3074
3075 again:
3076 #ifdef SCRAMBLE_DELAYED_REFS
3077         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
3078 #endif
3079         trans->can_flush_pending_bgs = false;
3080         ret = __btrfs_run_delayed_refs(trans, count);
3081         if (ret < 0) {
3082                 btrfs_abort_transaction(trans, ret);
3083                 return ret;
3084         }
3085
3086         if (run_all) {
3087                 if (!list_empty(&trans->new_bgs))
3088                         btrfs_create_pending_block_groups(trans);
3089
3090                 spin_lock(&delayed_refs->lock);
3091                 node = rb_first(&delayed_refs->href_root);
3092                 if (!node) {
3093                         spin_unlock(&delayed_refs->lock);
3094                         goto out;
3095                 }
3096                 head = rb_entry(node, struct btrfs_delayed_ref_head,
3097                                 href_node);
3098                 refcount_inc(&head->refs);
3099                 spin_unlock(&delayed_refs->lock);
3100
3101                 /* Mutex was contended, block until it's released and retry. */
3102                 mutex_lock(&head->mutex);
3103                 mutex_unlock(&head->mutex);
3104
3105                 btrfs_put_delayed_ref_head(head);
3106                 cond_resched();
3107                 goto again;
3108         }
3109 out:
3110         trans->can_flush_pending_bgs = can_flush_pending_bgs;
3111         return 0;
3112 }
3113
3114 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3115                                 struct btrfs_fs_info *fs_info,
3116                                 u64 bytenr, u64 num_bytes, u64 flags,
3117                                 int level, int is_data)
3118 {
3119         struct btrfs_delayed_extent_op *extent_op;
3120         int ret;
3121
3122         extent_op = btrfs_alloc_delayed_extent_op();
3123         if (!extent_op)
3124                 return -ENOMEM;
3125
3126         extent_op->flags_to_set = flags;
3127         extent_op->update_flags = true;
3128         extent_op->update_key = false;
3129         extent_op->is_data = is_data ? true : false;
3130         extent_op->level = level;
3131
3132         ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
3133                                           num_bytes, extent_op);
3134         if (ret)
3135                 btrfs_free_delayed_extent_op(extent_op);
3136         return ret;
3137 }
3138
3139 static noinline int check_delayed_ref(struct btrfs_root *root,
3140                                       struct btrfs_path *path,
3141                                       u64 objectid, u64 offset, u64 bytenr)
3142 {
3143         struct btrfs_delayed_ref_head *head;
3144         struct btrfs_delayed_ref_node *ref;
3145         struct btrfs_delayed_data_ref *data_ref;
3146         struct btrfs_delayed_ref_root *delayed_refs;
3147         struct btrfs_transaction *cur_trans;
3148         struct rb_node *node;
3149         int ret = 0;
3150
3151         cur_trans = root->fs_info->running_transaction;
3152         if (!cur_trans)
3153                 return 0;
3154
3155         delayed_refs = &cur_trans->delayed_refs;
3156         spin_lock(&delayed_refs->lock);
3157         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
3158         if (!head) {
3159                 spin_unlock(&delayed_refs->lock);
3160                 return 0;
3161         }
3162
3163         if (!mutex_trylock(&head->mutex)) {
3164                 refcount_inc(&head->refs);
3165                 spin_unlock(&delayed_refs->lock);
3166
3167                 btrfs_release_path(path);
3168
3169                 /*
3170                  * Mutex was contended, block until it's released and let
3171                  * caller try again
3172                  */
3173                 mutex_lock(&head->mutex);
3174                 mutex_unlock(&head->mutex);
3175                 btrfs_put_delayed_ref_head(head);
3176                 return -EAGAIN;
3177         }
3178         spin_unlock(&delayed_refs->lock);
3179
3180         spin_lock(&head->lock);
3181         /*
3182          * XXX: We should replace this with a proper search function in the
3183          * future.
3184          */
3185         for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
3186                 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
3187                 /* If it's a shared ref we know a cross reference exists */
3188                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3189                         ret = 1;
3190                         break;
3191                 }
3192
3193                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3194
3195                 /*
3196                  * If our ref doesn't match the one we're currently looking at
3197                  * then we have a cross reference.
3198                  */
3199                 if (data_ref->root != root->root_key.objectid ||
3200                     data_ref->objectid != objectid ||
3201                     data_ref->offset != offset) {
3202                         ret = 1;
3203                         break;
3204                 }
3205         }
3206         spin_unlock(&head->lock);
3207         mutex_unlock(&head->mutex);
3208         return ret;
3209 }
3210
3211 static noinline int check_committed_ref(struct btrfs_root *root,
3212                                         struct btrfs_path *path,
3213                                         u64 objectid, u64 offset, u64 bytenr)
3214 {
3215         struct btrfs_fs_info *fs_info = root->fs_info;
3216         struct btrfs_root *extent_root = fs_info->extent_root;
3217         struct extent_buffer *leaf;
3218         struct btrfs_extent_data_ref *ref;
3219         struct btrfs_extent_inline_ref *iref;
3220         struct btrfs_extent_item *ei;
3221         struct btrfs_key key;
3222         u32 item_size;
3223         int type;
3224         int ret;
3225
3226         key.objectid = bytenr;
3227         key.offset = (u64)-1;
3228         key.type = BTRFS_EXTENT_ITEM_KEY;
3229
3230         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3231         if (ret < 0)
3232                 goto out;
3233         BUG_ON(ret == 0); /* Corruption */
3234
3235         ret = -ENOENT;
3236         if (path->slots[0] == 0)
3237                 goto out;
3238
3239         path->slots[0]--;
3240         leaf = path->nodes[0];
3241         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3242
3243         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3244                 goto out;
3245
3246         ret = 1;
3247         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3248 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3249         if (item_size < sizeof(*ei)) {
3250                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3251                 goto out;
3252         }
3253 #endif
3254         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3255
3256         if (item_size != sizeof(*ei) +
3257             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3258                 goto out;
3259
3260         if (btrfs_extent_generation(leaf, ei) <=
3261             btrfs_root_last_snapshot(&root->root_item))
3262                 goto out;
3263
3264         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3265
3266         type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
3267         if (type != BTRFS_EXTENT_DATA_REF_KEY)
3268                 goto out;
3269
3270         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3271         if (btrfs_extent_refs(leaf, ei) !=
3272             btrfs_extent_data_ref_count(leaf, ref) ||
3273             btrfs_extent_data_ref_root(leaf, ref) !=
3274             root->root_key.objectid ||
3275             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3276             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3277                 goto out;
3278
3279         ret = 0;
3280 out:
3281         return ret;
3282 }
3283
3284 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
3285                           u64 bytenr)
3286 {
3287         struct btrfs_path *path;
3288         int ret;
3289         int ret2;
3290
3291         path = btrfs_alloc_path();
3292         if (!path)
3293                 return -ENOENT;
3294
3295         do {
3296                 ret = check_committed_ref(root, path, objectid,
3297                                           offset, bytenr);
3298                 if (ret && ret != -ENOENT)
3299                         goto out;
3300
3301                 ret2 = check_delayed_ref(root, path, objectid,
3302                                          offset, bytenr);
3303         } while (ret2 == -EAGAIN);
3304
3305         if (ret2 && ret2 != -ENOENT) {
3306                 ret = ret2;
3307                 goto out;
3308         }
3309
3310         if (ret != -ENOENT || ret2 != -ENOENT)
3311                 ret = 0;
3312 out:
3313         btrfs_free_path(path);
3314         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3315                 WARN_ON(ret > 0);
3316         return ret;
3317 }
3318
3319 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3320                            struct btrfs_root *root,
3321                            struct extent_buffer *buf,
3322                            int full_backref, int inc)
3323 {
3324         struct btrfs_fs_info *fs_info = root->fs_info;
3325         u64 bytenr;
3326         u64 num_bytes;
3327         u64 parent;
3328         u64 ref_root;
3329         u32 nritems;
3330         struct btrfs_key key;
3331         struct btrfs_file_extent_item *fi;
3332         int i;
3333         int level;
3334         int ret = 0;
3335         int (*process_func)(struct btrfs_trans_handle *,
3336                             struct btrfs_root *,
3337                             u64, u64, u64, u64, u64, u64);
3338
3339
3340         if (btrfs_is_testing(fs_info))
3341                 return 0;
3342
3343         ref_root = btrfs_header_owner(buf);
3344         nritems = btrfs_header_nritems(buf);
3345         level = btrfs_header_level(buf);
3346
3347         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3348                 return 0;
3349
3350         if (inc)
3351                 process_func = btrfs_inc_extent_ref;
3352         else
3353                 process_func = btrfs_free_extent;
3354
3355         if (full_backref)
3356                 parent = buf->start;
3357         else
3358                 parent = 0;
3359
3360         for (i = 0; i < nritems; i++) {
3361                 if (level == 0) {
3362                         btrfs_item_key_to_cpu(buf, &key, i);
3363                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3364                                 continue;
3365                         fi = btrfs_item_ptr(buf, i,
3366                                             struct btrfs_file_extent_item);
3367                         if (btrfs_file_extent_type(buf, fi) ==
3368                             BTRFS_FILE_EXTENT_INLINE)
3369                                 continue;
3370                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3371                         if (bytenr == 0)
3372                                 continue;
3373
3374                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3375                         key.offset -= btrfs_file_extent_offset(buf, fi);
3376                         ret = process_func(trans, root, bytenr, num_bytes,
3377                                            parent, ref_root, key.objectid,
3378                                            key.offset);
3379                         if (ret)
3380                                 goto fail;
3381                 } else {
3382                         bytenr = btrfs_node_blockptr(buf, i);
3383                         num_bytes = fs_info->nodesize;
3384                         ret = process_func(trans, root, bytenr, num_bytes,
3385                                            parent, ref_root, level - 1, 0);
3386                         if (ret)
3387                                 goto fail;
3388                 }
3389         }
3390         return 0;
3391 fail:
3392         return ret;
3393 }
3394
3395 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3396                   struct extent_buffer *buf, int full_backref)
3397 {
3398         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3399 }
3400
3401 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3402                   struct extent_buffer *buf, int full_backref)
3403 {
3404         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3405 }
3406
3407 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3408                                  struct btrfs_fs_info *fs_info,
3409                                  struct btrfs_path *path,
3410                                  struct btrfs_block_group_cache *cache)
3411 {
3412         int ret;
3413         struct btrfs_root *extent_root = fs_info->extent_root;
3414         unsigned long bi;
3415         struct extent_buffer *leaf;
3416
3417         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3418         if (ret) {
3419                 if (ret > 0)
3420                         ret = -ENOENT;
3421                 goto fail;
3422         }
3423
3424         leaf = path->nodes[0];
3425         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3426         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3427         btrfs_mark_buffer_dirty(leaf);
3428 fail:
3429         btrfs_release_path(path);
3430         return ret;
3431
3432 }
3433
3434 static struct btrfs_block_group_cache *
3435 next_block_group(struct btrfs_fs_info *fs_info,
3436                  struct btrfs_block_group_cache *cache)
3437 {
3438         struct rb_node *node;
3439
3440         spin_lock(&fs_info->block_group_cache_lock);
3441
3442         /* If our block group was removed, we need a full search. */
3443         if (RB_EMPTY_NODE(&cache->cache_node)) {
3444                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3445
3446                 spin_unlock(&fs_info->block_group_cache_lock);
3447                 btrfs_put_block_group(cache);
3448                 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
3449         }
3450         node = rb_next(&cache->cache_node);
3451         btrfs_put_block_group(cache);
3452         if (node) {
3453                 cache = rb_entry(node, struct btrfs_block_group_cache,
3454                                  cache_node);
3455                 btrfs_get_block_group(cache);
3456         } else
3457                 cache = NULL;
3458         spin_unlock(&fs_info->block_group_cache_lock);
3459         return cache;
3460 }
3461
3462 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3463                             struct btrfs_trans_handle *trans,
3464                             struct btrfs_path *path)
3465 {
3466         struct btrfs_fs_info *fs_info = block_group->fs_info;
3467         struct btrfs_root *root = fs_info->tree_root;
3468         struct inode *inode = NULL;
3469         struct extent_changeset *data_reserved = NULL;
3470         u64 alloc_hint = 0;
3471         int dcs = BTRFS_DC_ERROR;
3472         u64 num_pages = 0;
3473         int retries = 0;
3474         int ret = 0;
3475
3476         /*
3477          * If this block group is smaller than 100 megs don't bother caching the
3478          * block group.
3479          */
3480         if (block_group->key.offset < (100 * SZ_1M)) {
3481                 spin_lock(&block_group->lock);
3482                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3483                 spin_unlock(&block_group->lock);
3484                 return 0;
3485         }
3486
3487         if (trans->aborted)
3488                 return 0;
3489 again:
3490         inode = lookup_free_space_inode(fs_info, block_group, path);
3491         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3492                 ret = PTR_ERR(inode);
3493                 btrfs_release_path(path);
3494                 goto out;
3495         }
3496
3497         if (IS_ERR(inode)) {
3498                 BUG_ON(retries);
3499                 retries++;
3500
3501                 if (block_group->ro)
3502                         goto out_free;
3503
3504                 ret = create_free_space_inode(fs_info, trans, block_group,
3505                                               path);
3506                 if (ret)
3507                         goto out_free;
3508                 goto again;
3509         }
3510
3511         /*
3512          * We want to set the generation to 0, that way if anything goes wrong
3513          * from here on out we know not to trust this cache when we load up next
3514          * time.
3515          */
3516         BTRFS_I(inode)->generation = 0;
3517         ret = btrfs_update_inode(trans, root, inode);
3518         if (ret) {
3519                 /*
3520                  * So theoretically we could recover from this, simply set the
3521                  * super cache generation to 0 so we know to invalidate the
3522                  * cache, but then we'd have to keep track of the block groups
3523                  * that fail this way so we know we _have_ to reset this cache
3524                  * before the next commit or risk reading stale cache.  So to
3525                  * limit our exposure to horrible edge cases lets just abort the
3526                  * transaction, this only happens in really bad situations
3527                  * anyway.
3528                  */
3529                 btrfs_abort_transaction(trans, ret);
3530                 goto out_put;
3531         }
3532         WARN_ON(ret);
3533
3534         /* We've already setup this transaction, go ahead and exit */
3535         if (block_group->cache_generation == trans->transid &&
3536             i_size_read(inode)) {
3537                 dcs = BTRFS_DC_SETUP;
3538                 goto out_put;
3539         }
3540
3541         if (i_size_read(inode) > 0) {
3542                 ret = btrfs_check_trunc_cache_free_space(fs_info,
3543                                         &fs_info->global_block_rsv);
3544                 if (ret)
3545                         goto out_put;
3546
3547                 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
3548                 if (ret)
3549                         goto out_put;
3550         }
3551
3552         spin_lock(&block_group->lock);
3553         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3554             !btrfs_test_opt(fs_info, SPACE_CACHE)) {
3555                 /*
3556                  * don't bother trying to write stuff out _if_
3557                  * a) we're not cached,
3558                  * b) we're with nospace_cache mount option,
3559                  * c) we're with v2 space_cache (FREE_SPACE_TREE).
3560                  */
3561                 dcs = BTRFS_DC_WRITTEN;
3562                 spin_unlock(&block_group->lock);
3563                 goto out_put;
3564         }
3565         spin_unlock(&block_group->lock);
3566
3567         /*
3568          * We hit an ENOSPC when setting up the cache in this transaction, just
3569          * skip doing the setup, we've already cleared the cache so we're safe.
3570          */
3571         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3572                 ret = -ENOSPC;
3573                 goto out_put;
3574         }
3575
3576         /*
3577          * Try to preallocate enough space based on how big the block group is.
3578          * Keep in mind this has to include any pinned space which could end up
3579          * taking up quite a bit since it's not folded into the other space
3580          * cache.
3581          */
3582         num_pages = div_u64(block_group->key.offset, SZ_256M);
3583         if (!num_pages)
3584                 num_pages = 1;
3585
3586         num_pages *= 16;
3587         num_pages *= PAGE_SIZE;
3588
3589         ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
3590         if (ret)
3591                 goto out_put;
3592
3593         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3594                                               num_pages, num_pages,
3595                                               &alloc_hint);
3596         /*
3597          * Our cache requires contiguous chunks so that we don't modify a bunch
3598          * of metadata or split extents when writing the cache out, which means
3599          * we can enospc if we are heavily fragmented in addition to just normal
3600          * out of space conditions.  So if we hit this just skip setting up any
3601          * other block groups for this transaction, maybe we'll unpin enough
3602          * space the next time around.
3603          */
3604         if (!ret)
3605                 dcs = BTRFS_DC_SETUP;
3606         else if (ret == -ENOSPC)
3607                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3608
3609 out_put:
3610         iput(inode);
3611 out_free:
3612         btrfs_release_path(path);
3613 out:
3614         spin_lock(&block_group->lock);
3615         if (!ret && dcs == BTRFS_DC_SETUP)
3616                 block_group->cache_generation = trans->transid;
3617         block_group->disk_cache_state = dcs;
3618         spin_unlock(&block_group->lock);
3619
3620         extent_changeset_free(data_reserved);
3621         return ret;
3622 }
3623
3624 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3625                             struct btrfs_fs_info *fs_info)
3626 {
3627         struct btrfs_block_group_cache *cache, *tmp;
3628         struct btrfs_transaction *cur_trans = trans->transaction;
3629         struct btrfs_path *path;
3630
3631         if (list_empty(&cur_trans->dirty_bgs) ||
3632             !btrfs_test_opt(fs_info, SPACE_CACHE))
3633                 return 0;
3634
3635         path = btrfs_alloc_path();
3636         if (!path)
3637                 return -ENOMEM;
3638
3639         /* Could add new block groups, use _safe just in case */
3640         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3641                                  dirty_list) {
3642                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3643                         cache_save_setup(cache, trans, path);
3644         }
3645
3646         btrfs_free_path(path);
3647         return 0;
3648 }
3649
3650 /*
3651  * transaction commit does final block group cache writeback during a
3652  * critical section where nothing is allowed to change the FS.  This is
3653  * required in order for the cache to actually match the block group,
3654  * but can introduce a lot of latency into the commit.
3655  *
3656  * So, btrfs_start_dirty_block_groups is here to kick off block group
3657  * cache IO.  There's a chance we'll have to redo some of it if the
3658  * block group changes again during the commit, but it greatly reduces
3659  * the commit latency by getting rid of the easy block groups while
3660  * we're still allowing others to join the commit.
3661  */
3662 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
3663 {
3664         struct btrfs_fs_info *fs_info = trans->fs_info;
3665         struct btrfs_block_group_cache *cache;
3666         struct btrfs_transaction *cur_trans = trans->transaction;
3667         int ret = 0;
3668         int should_put;
3669         struct btrfs_path *path = NULL;
3670         LIST_HEAD(dirty);
3671         struct list_head *io = &cur_trans->io_bgs;
3672         int num_started = 0;
3673         int loops = 0;
3674
3675         spin_lock(&cur_trans->dirty_bgs_lock);
3676         if (list_empty(&cur_trans->dirty_bgs)) {
3677                 spin_unlock(&cur_trans->dirty_bgs_lock);
3678                 return 0;
3679         }
3680         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3681         spin_unlock(&cur_trans->dirty_bgs_lock);
3682
3683 again:
3684         /*
3685          * make sure all the block groups on our dirty list actually
3686          * exist
3687          */
3688         btrfs_create_pending_block_groups(trans);
3689
3690         if (!path) {
3691                 path = btrfs_alloc_path();
3692                 if (!path)
3693                         return -ENOMEM;
3694         }
3695
3696         /*
3697          * cache_write_mutex is here only to save us from balance or automatic
3698          * removal of empty block groups deleting this block group while we are
3699          * writing out the cache
3700          */
3701         mutex_lock(&trans->transaction->cache_write_mutex);
3702         while (!list_empty(&dirty)) {
3703                 cache = list_first_entry(&dirty,
3704                                          struct btrfs_block_group_cache,
3705                                          dirty_list);
3706                 /*
3707                  * this can happen if something re-dirties a block
3708                  * group that is already under IO.  Just wait for it to
3709                  * finish and then do it all again
3710                  */
3711                 if (!list_empty(&cache->io_list)) {
3712                         list_del_init(&cache->io_list);
3713                         btrfs_wait_cache_io(trans, cache, path);
3714                         btrfs_put_block_group(cache);
3715                 }
3716
3717
3718                 /*
3719                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3720                  * if it should update the cache_state.  Don't delete
3721                  * until after we wait.
3722                  *
3723                  * Since we're not running in the commit critical section
3724                  * we need the dirty_bgs_lock to protect from update_block_group
3725                  */
3726                 spin_lock(&cur_trans->dirty_bgs_lock);
3727                 list_del_init(&cache->dirty_list);
3728                 spin_unlock(&cur_trans->dirty_bgs_lock);
3729
3730                 should_put = 1;
3731
3732                 cache_save_setup(cache, trans, path);
3733
3734                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3735                         cache->io_ctl.inode = NULL;
3736                         ret = btrfs_write_out_cache(fs_info, trans,
3737                                                     cache, path);
3738                         if (ret == 0 && cache->io_ctl.inode) {
3739                                 num_started++;
3740                                 should_put = 0;
3741
3742                                 /*
3743                                  * The cache_write_mutex is protecting the
3744                                  * io_list, also refer to the definition of
3745                                  * btrfs_transaction::io_bgs for more details
3746                                  */
3747                                 list_add_tail(&cache->io_list, io);
3748                         } else {
3749                                 /*
3750                                  * if we failed to write the cache, the
3751                                  * generation will be bad and life goes on
3752                                  */
3753                                 ret = 0;
3754                         }
3755                 }
3756                 if (!ret) {
3757                         ret = write_one_cache_group(trans, fs_info,
3758                                                     path, cache);
3759                         /*
3760                          * Our block group might still be attached to the list
3761                          * of new block groups in the transaction handle of some
3762                          * other task (struct btrfs_trans_handle->new_bgs). This
3763                          * means its block group item isn't yet in the extent
3764                          * tree. If this happens ignore the error, as we will
3765                          * try again later in the critical section of the
3766                          * transaction commit.
3767                          */
3768                         if (ret == -ENOENT) {
3769                                 ret = 0;
3770                                 spin_lock(&cur_trans->dirty_bgs_lock);
3771                                 if (list_empty(&cache->dirty_list)) {
3772                                         list_add_tail(&cache->dirty_list,
3773                                                       &cur_trans->dirty_bgs);
3774                                         btrfs_get_block_group(cache);
3775                                 }
3776                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3777                         } else if (ret) {
3778                                 btrfs_abort_transaction(trans, ret);
3779                         }
3780                 }
3781
3782                 /* if its not on the io list, we need to put the block group */
3783                 if (should_put)
3784                         btrfs_put_block_group(cache);
3785
3786                 if (ret)
3787                         break;
3788
3789                 /*
3790                  * Avoid blocking other tasks for too long. It might even save
3791                  * us from writing caches for block groups that are going to be
3792                  * removed.
3793                  */
3794                 mutex_unlock(&trans->transaction->cache_write_mutex);
3795                 mutex_lock(&trans->transaction->cache_write_mutex);
3796         }
3797         mutex_unlock(&trans->transaction->cache_write_mutex);
3798
3799         /*
3800          * go through delayed refs for all the stuff we've just kicked off
3801          * and then loop back (just once)
3802          */
3803         ret = btrfs_run_delayed_refs(trans, 0);
3804         if (!ret && loops == 0) {
3805                 loops++;
3806                 spin_lock(&cur_trans->dirty_bgs_lock);
3807                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3808                 /*
3809                  * dirty_bgs_lock protects us from concurrent block group
3810                  * deletes too (not just cache_write_mutex).
3811                  */
3812                 if (!list_empty(&dirty)) {
3813                         spin_unlock(&cur_trans->dirty_bgs_lock);
3814                         goto again;
3815                 }
3816                 spin_unlock(&cur_trans->dirty_bgs_lock);
3817         } else if (ret < 0) {
3818                 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
3819         }
3820
3821         btrfs_free_path(path);
3822         return ret;
3823 }
3824
3825 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3826                                    struct btrfs_fs_info *fs_info)
3827 {
3828         struct btrfs_block_group_cache *cache;
3829         struct btrfs_transaction *cur_trans = trans->transaction;
3830         int ret = 0;
3831         int should_put;
3832         struct btrfs_path *path;
3833         struct list_head *io = &cur_trans->io_bgs;
3834         int num_started = 0;
3835
3836         path = btrfs_alloc_path();
3837         if (!path)
3838                 return -ENOMEM;
3839
3840         /*
3841          * Even though we are in the critical section of the transaction commit,
3842          * we can still have concurrent tasks adding elements to this
3843          * transaction's list of dirty block groups. These tasks correspond to
3844          * endio free space workers started when writeback finishes for a
3845          * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3846          * allocate new block groups as a result of COWing nodes of the root
3847          * tree when updating the free space inode. The writeback for the space
3848          * caches is triggered by an earlier call to
3849          * btrfs_start_dirty_block_groups() and iterations of the following
3850          * loop.
3851          * Also we want to do the cache_save_setup first and then run the
3852          * delayed refs to make sure we have the best chance at doing this all
3853          * in one shot.
3854          */
3855         spin_lock(&cur_trans->dirty_bgs_lock);
3856         while (!list_empty(&cur_trans->dirty_bgs)) {
3857                 cache = list_first_entry(&cur_trans->dirty_bgs,
3858                                          struct btrfs_block_group_cache,
3859                                          dirty_list);
3860
3861                 /*
3862                  * this can happen if cache_save_setup re-dirties a block
3863                  * group that is already under IO.  Just wait for it to
3864                  * finish and then do it all again
3865                  */
3866                 if (!list_empty(&cache->io_list)) {
3867                         spin_unlock(&cur_trans->dirty_bgs_lock);
3868                         list_del_init(&cache->io_list);
3869                         btrfs_wait_cache_io(trans, cache, path);
3870                         btrfs_put_block_group(cache);
3871                         spin_lock(&cur_trans->dirty_bgs_lock);
3872                 }
3873
3874                 /*
3875                  * don't remove from the dirty list until after we've waited
3876                  * on any pending IO
3877                  */
3878                 list_del_init(&cache->dirty_list);
3879                 spin_unlock(&cur_trans->dirty_bgs_lock);
3880                 should_put = 1;
3881
3882                 cache_save_setup(cache, trans, path);
3883
3884                 if (!ret)
3885                         ret = btrfs_run_delayed_refs(trans,
3886                                                      (unsigned long) -1);
3887
3888                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3889                         cache->io_ctl.inode = NULL;
3890                         ret = btrfs_write_out_cache(fs_info, trans,
3891                                                     cache, path);
3892                         if (ret == 0 && cache->io_ctl.inode) {
3893                                 num_started++;
3894                                 should_put = 0;
3895                                 list_add_tail(&cache->io_list, io);
3896                         } else {
3897                                 /*
3898                                  * if we failed to write the cache, the
3899                                  * generation will be bad and life goes on
3900                                  */
3901                                 ret = 0;
3902                         }
3903                 }
3904                 if (!ret) {
3905                         ret = write_one_cache_group(trans, fs_info,
3906                                                     path, cache);
3907                         /*
3908                          * One of the free space endio workers might have
3909                          * created a new block group while updating a free space
3910                          * cache's inode (at inode.c:btrfs_finish_ordered_io())
3911                          * and hasn't released its transaction handle yet, in
3912                          * which case the new block group is still attached to
3913                          * its transaction handle and its creation has not
3914                          * finished yet (no block group item in the extent tree
3915                          * yet, etc). If this is the case, wait for all free
3916                          * space endio workers to finish and retry. This is a
3917                          * a very rare case so no need for a more efficient and
3918                          * complex approach.
3919                          */
3920                         if (ret == -ENOENT) {
3921                                 wait_event(cur_trans->writer_wait,
3922                                    atomic_read(&cur_trans->num_writers) == 1);
3923                                 ret = write_one_cache_group(trans, fs_info,
3924                                                             path, cache);
3925                         }
3926                         if (ret)
3927                                 btrfs_abort_transaction(trans, ret);
3928                 }
3929
3930                 /* if its not on the io list, we need to put the block group */
3931                 if (should_put)
3932                         btrfs_put_block_group(cache);
3933                 spin_lock(&cur_trans->dirty_bgs_lock);
3934         }
3935         spin_unlock(&cur_trans->dirty_bgs_lock);
3936
3937         /*
3938          * Refer to the definition of io_bgs member for details why it's safe
3939          * to use it without any locking
3940          */
3941         while (!list_empty(io)) {
3942                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3943                                          io_list);
3944                 list_del_init(&cache->io_list);
3945                 btrfs_wait_cache_io(trans, cache, path);
3946                 btrfs_put_block_group(cache);
3947         }
3948
3949         btrfs_free_path(path);
3950         return ret;
3951 }
3952
3953 int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
3954 {
3955         struct btrfs_block_group_cache *block_group;
3956         int readonly = 0;
3957
3958         block_group = btrfs_lookup_block_group(fs_info, bytenr);
3959         if (!block_group || block_group->ro)
3960                 readonly = 1;
3961         if (block_group)
3962                 btrfs_put_block_group(block_group);
3963         return readonly;
3964 }
3965
3966 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3967 {
3968         struct btrfs_block_group_cache *bg;
3969         bool ret = true;
3970
3971         bg = btrfs_lookup_block_group(fs_info, bytenr);
3972         if (!bg)
3973                 return false;
3974
3975         spin_lock(&bg->lock);
3976         if (bg->ro)
3977                 ret = false;
3978         else
3979                 atomic_inc(&bg->nocow_writers);
3980         spin_unlock(&bg->lock);
3981
3982         /* no put on block group, done by btrfs_dec_nocow_writers */
3983         if (!ret)
3984                 btrfs_put_block_group(bg);
3985
3986         return ret;
3987
3988 }
3989
3990 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3991 {
3992         struct btrfs_block_group_cache *bg;
3993
3994         bg = btrfs_lookup_block_group(fs_info, bytenr);
3995         ASSERT(bg);
3996         if (atomic_dec_and_test(&bg->nocow_writers))
3997                 wake_up_var(&bg->nocow_writers);
3998         /*
3999          * Once for our lookup and once for the lookup done by a previous call
4000          * to btrfs_inc_nocow_writers()
4001          */
4002         btrfs_put_block_group(bg);
4003         btrfs_put_block_group(bg);
4004 }
4005
4006 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
4007 {
4008         wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
4009 }
4010
4011 static const char *alloc_name(u64 flags)
4012 {
4013         switch (flags) {
4014         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
4015                 return "mixed";
4016         case BTRFS_BLOCK_GROUP_METADATA:
4017                 return "metadata";
4018         case BTRFS_BLOCK_GROUP_DATA:
4019                 return "data";
4020         case BTRFS_BLOCK_GROUP_SYSTEM:
4021                 return "system";
4022         default:
4023                 WARN_ON(1);
4024                 return "invalid-combination";
4025         };
4026 }
4027
4028 static int create_space_info(struct btrfs_fs_info *info, u64 flags,
4029                              struct btrfs_space_info **new)
4030 {
4031
4032         struct btrfs_space_info *space_info;
4033         int i;
4034         int ret;
4035
4036         space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
4037         if (!space_info)
4038                 return -ENOMEM;
4039
4040         ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
4041                                  GFP_KERNEL);
4042         if (ret) {
4043                 kfree(space_info);
4044                 return ret;
4045         }
4046
4047         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
4048                 INIT_LIST_HEAD(&space_info->block_groups[i]);
4049         init_rwsem(&space_info->groups_sem);
4050         spin_lock_init(&space_info->lock);
4051         space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
4052         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4053         init_waitqueue_head(&space_info->wait);
4054         INIT_LIST_HEAD(&space_info->ro_bgs);
4055         INIT_LIST_HEAD(&space_info->tickets);
4056         INIT_LIST_HEAD(&space_info->priority_tickets);
4057
4058         ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
4059                                     info->space_info_kobj, "%s",
4060                                     alloc_name(space_info->flags));
4061         if (ret) {
4062                 percpu_counter_destroy(&space_info->total_bytes_pinned);
4063                 kfree(space_info);
4064                 return ret;
4065         }
4066
4067         *new = space_info;
4068         list_add_rcu(&space_info->list, &info->space_info);
4069         if (flags & BTRFS_BLOCK_GROUP_DATA)
4070                 info->data_sinfo = space_info;
4071
4072         return ret;
4073 }
4074
4075 static void update_space_info(struct btrfs_fs_info *info, u64 flags,
4076                              u64 total_bytes, u64 bytes_used,
4077                              u64 bytes_readonly,
4078                              struct btrfs_space_info **space_info)
4079 {
4080         struct btrfs_space_info *found;
4081         int factor;
4082
4083         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
4084                      BTRFS_BLOCK_GROUP_RAID10))
4085                 factor = 2;
4086         else
4087                 factor = 1;
4088
4089         found = __find_space_info(info, flags);
4090         ASSERT(found);
4091         spin_lock(&found->lock);
4092         found->total_bytes += total_bytes;
4093         found->disk_total += total_bytes * factor;
4094         found->bytes_used += bytes_used;
4095         found->disk_used += bytes_used * factor;
4096         found->bytes_readonly += bytes_readonly;
4097         if (total_bytes > 0)
4098                 found->full = 0;
4099         space_info_add_new_bytes(info, found, total_bytes -
4100                                  bytes_used - bytes_readonly);
4101         spin_unlock(&found->lock);
4102         *space_info = found;
4103 }
4104
4105 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
4106 {
4107         u64 extra_flags = chunk_to_extended(flags) &
4108                                 BTRFS_EXTENDED_PROFILE_MASK;
4109
4110         write_seqlock(&fs_info->profiles_lock);
4111         if (flags & BTRFS_BLOCK_GROUP_DATA)
4112                 fs_info->avail_data_alloc_bits |= extra_flags;
4113         if (flags & BTRFS_BLOCK_GROUP_METADATA)
4114                 fs_info->avail_metadata_alloc_bits |= extra_flags;
4115         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4116                 fs_info->avail_system_alloc_bits |= extra_flags;
4117         write_sequnlock(&fs_info->profiles_lock);
4118 }
4119
4120 /*
4121  * returns target flags in extended format or 0 if restripe for this
4122  * chunk_type is not in progress
4123  *
4124  * should be called with either volume_mutex or balance_lock held
4125  */
4126 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
4127 {
4128         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4129         u64 target = 0;
4130
4131         if (!bctl)
4132                 return 0;
4133
4134         if (flags & BTRFS_BLOCK_GROUP_DATA &&
4135             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4136                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
4137         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
4138                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4139                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
4140         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
4141                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4142                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
4143         }
4144
4145         return target;
4146 }
4147
4148 /*
4149  * @flags: available profiles in extended format (see ctree.h)
4150  *
4151  * Returns reduced profile in chunk format.  If profile changing is in
4152  * progress (either running or paused) picks the target profile (if it's
4153  * already available), otherwise falls back to plain reducing.
4154  */
4155 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
4156 {
4157         u64 num_devices = fs_info->fs_devices->rw_devices;
4158         u64 target;
4159         u64 raid_type;
4160         u64 allowed = 0;
4161
4162         /*
4163          * see if restripe for this chunk_type is in progress, if so
4164          * try to reduce to the target profile
4165          */
4166         spin_lock(&fs_info->balance_lock);
4167         target = get_restripe_target(fs_info, flags);
4168         if (target) {
4169                 /* pick target profile only if it's already available */
4170                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
4171                         spin_unlock(&fs_info->balance_lock);
4172                         return extended_to_chunk(target);
4173                 }
4174         }
4175         spin_unlock(&fs_info->balance_lock);
4176
4177         /* First, mask out the RAID levels which aren't possible */
4178         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4179                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
4180                         allowed |= btrfs_raid_group[raid_type];
4181         }
4182         allowed &= flags;
4183
4184         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
4185                 allowed = BTRFS_BLOCK_GROUP_RAID6;
4186         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4187                 allowed = BTRFS_BLOCK_GROUP_RAID5;
4188         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4189                 allowed = BTRFS_BLOCK_GROUP_RAID10;
4190         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4191                 allowed = BTRFS_BLOCK_GROUP_RAID1;
4192         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4193                 allowed = BTRFS_BLOCK_GROUP_RAID0;
4194
4195         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4196
4197         return extended_to_chunk(flags | allowed);
4198 }
4199
4200 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
4201 {
4202         unsigned seq;
4203         u64 flags;
4204
4205         do {
4206                 flags = orig_flags;
4207                 seq = read_seqbegin(&fs_info->profiles_lock);
4208
4209                 if (flags & BTRFS_BLOCK_GROUP_DATA)
4210                         flags |= fs_info->avail_data_alloc_bits;
4211                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4212                         flags |= fs_info->avail_system_alloc_bits;
4213                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4214                         flags |= fs_info->avail_metadata_alloc_bits;
4215         } while (read_seqretry(&fs_info->profiles_lock, seq));
4216
4217         return btrfs_reduce_alloc_profile(fs_info, flags);
4218 }
4219
4220 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
4221 {
4222         struct btrfs_fs_info *fs_info = root->fs_info;
4223         u64 flags;
4224         u64 ret;
4225
4226         if (data)
4227                 flags = BTRFS_BLOCK_GROUP_DATA;
4228         else if (root == fs_info->chunk_root)
4229                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
4230         else
4231                 flags = BTRFS_BLOCK_GROUP_METADATA;
4232
4233         ret = get_alloc_profile(fs_info, flags);
4234         return ret;
4235 }
4236
4237 u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
4238 {
4239         return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
4240 }
4241
4242 u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
4243 {
4244         return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4245 }
4246
4247 u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
4248 {
4249         return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4250 }
4251
4252 static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
4253                                  bool may_use_included)
4254 {
4255         ASSERT(s_info);
4256         return s_info->bytes_used + s_info->bytes_reserved +
4257                 s_info->bytes_pinned + s_info->bytes_readonly +
4258                 (may_use_included ? s_info->bytes_may_use : 0);
4259 }
4260
4261 int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
4262 {
4263         struct btrfs_root *root = inode->root;
4264         struct btrfs_fs_info *fs_info = root->fs_info;
4265         struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
4266         u64 used;
4267         int ret = 0;
4268         int need_commit = 2;
4269         int have_pinned_space;
4270
4271         /* make sure bytes are sectorsize aligned */
4272         bytes = ALIGN(bytes, fs_info->sectorsize);
4273
4274         if (btrfs_is_free_space_inode(inode)) {
4275                 need_commit = 0;
4276                 ASSERT(current->journal_info);
4277         }
4278
4279 again:
4280         /* make sure we have enough space to handle the data first */
4281         spin_lock(&data_sinfo->lock);
4282         used = btrfs_space_info_used(data_sinfo, true);
4283
4284         if (used + bytes > data_sinfo->total_bytes) {
4285                 struct btrfs_trans_handle *trans;
4286
4287                 /*
4288                  * if we don't have enough free bytes in this space then we need
4289                  * to alloc a new chunk.
4290                  */
4291                 if (!data_sinfo->full) {
4292                         u64 alloc_target;
4293
4294                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4295                         spin_unlock(&data_sinfo->lock);
4296
4297                         alloc_target = btrfs_data_alloc_profile(fs_info);
4298                         /*
4299                          * It is ugly that we don't call nolock join
4300                          * transaction for the free space inode case here.
4301                          * But it is safe because we only do the data space
4302                          * reservation for the free space cache in the
4303                          * transaction context, the common join transaction
4304                          * just increase the counter of the current transaction
4305                          * handler, doesn't try to acquire the trans_lock of
4306                          * the fs.
4307                          */
4308                         trans = btrfs_join_transaction(root);
4309                         if (IS_ERR(trans))
4310                                 return PTR_ERR(trans);
4311
4312                         ret = do_chunk_alloc(trans, fs_info, alloc_target,
4313                                              CHUNK_ALLOC_NO_FORCE);
4314                         btrfs_end_transaction(trans);
4315                         if (ret < 0) {
4316                                 if (ret != -ENOSPC)
4317                                         return ret;
4318                                 else {
4319                                         have_pinned_space = 1;
4320                                         goto commit_trans;
4321                                 }
4322                         }
4323
4324                         goto again;
4325                 }
4326
4327                 /*
4328                  * If we don't have enough pinned space to deal with this
4329                  * allocation, and no removed chunk in current transaction,
4330                  * don't bother committing the transaction.
4331                  */
4332                 have_pinned_space = percpu_counter_compare(
4333                         &data_sinfo->total_bytes_pinned,
4334                         used + bytes - data_sinfo->total_bytes);
4335                 spin_unlock(&data_sinfo->lock);
4336
4337                 /* commit the current transaction and try again */
4338 commit_trans:
4339                 if (need_commit) {
4340                         need_commit--;
4341
4342                         if (need_commit > 0) {
4343                                 btrfs_start_delalloc_roots(fs_info, 0, -1);
4344                                 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
4345                                                          (u64)-1);
4346                         }
4347
4348                         trans = btrfs_join_transaction(root);
4349                         if (IS_ERR(trans))
4350                                 return PTR_ERR(trans);
4351                         if (have_pinned_space >= 0 ||
4352                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4353                                      &trans->transaction->flags) ||
4354                             need_commit > 0) {
4355                                 ret = btrfs_commit_transaction(trans);
4356                                 if (ret)
4357                                         return ret;
4358                                 /*
4359                                  * The cleaner kthread might still be doing iput
4360                                  * operations. Wait for it to finish so that
4361                                  * more space is released.
4362                                  */
4363                                 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
4364                                 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
4365                                 goto again;
4366                         } else {
4367                                 btrfs_end_transaction(trans);
4368                         }
4369                 }
4370
4371                 trace_btrfs_space_reservation(fs_info,
4372                                               "space_info:enospc",
4373                                               data_sinfo->flags, bytes, 1);
4374                 return -ENOSPC;
4375         }
4376         data_sinfo->bytes_may_use += bytes;
4377         trace_btrfs_space_reservation(fs_info, "space_info",
4378                                       data_sinfo->flags, bytes, 1);
4379         spin_unlock(&data_sinfo->lock);
4380
4381         return ret;
4382 }
4383
4384 int btrfs_check_data_free_space(struct inode *inode,
4385                         struct extent_changeset **reserved, u64 start, u64 len)
4386 {
4387         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4388         int ret;
4389
4390         /* align the range */
4391         len = round_up(start + len, fs_info->sectorsize) -
4392               round_down(start, fs_info->sectorsize);
4393         start = round_down(start, fs_info->sectorsize);
4394
4395         ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
4396         if (ret < 0)
4397                 return ret;
4398
4399         /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
4400         ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
4401         if (ret < 0)
4402                 btrfs_free_reserved_data_space_noquota(inode, start, len);
4403         else
4404                 ret = 0;
4405         return ret;
4406 }
4407
4408 /*
4409  * Called if we need to clear a data reservation for this inode
4410  * Normally in a error case.
4411  *
4412  * This one will *NOT* use accurate qgroup reserved space API, just for case
4413  * which we can't sleep and is sure it won't affect qgroup reserved space.
4414  * Like clear_bit_hook().
4415  */
4416 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4417                                             u64 len)
4418 {
4419         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4420         struct btrfs_space_info *data_sinfo;
4421
4422         /* Make sure the range is aligned to sectorsize */
4423         len = round_up(start + len, fs_info->sectorsize) -
4424               round_down(start, fs_info->sectorsize);
4425         start = round_down(start, fs_info->sectorsize);
4426
4427         data_sinfo = fs_info->data_sinfo;
4428         spin_lock(&data_sinfo->lock);
4429         if (WARN_ON(data_sinfo->bytes_may_use < len))
4430                 data_sinfo->bytes_may_use = 0;
4431         else
4432                 data_sinfo->bytes_may_use -= len;
4433         trace_btrfs_space_reservation(fs_info, "space_info",
4434                                       data_sinfo->flags, len, 0);
4435         spin_unlock(&data_sinfo->lock);
4436 }
4437
4438 /*
4439  * Called if we need to clear a data reservation for this inode
4440  * Normally in a error case.
4441  *
4442  * This one will handle the per-inode data rsv map for accurate reserved
4443  * space framework.
4444  */
4445 void btrfs_free_reserved_data_space(struct inode *inode,
4446                         struct extent_changeset *reserved, u64 start, u64 len)
4447 {
4448         struct btrfs_root *root = BTRFS_I(inode)->root;
4449
4450         /* Make sure the range is aligned to sectorsize */
4451         len = round_up(start + len, root->fs_info->sectorsize) -
4452               round_down(start, root->fs_info->sectorsize);
4453         start = round_down(start, root->fs_info->sectorsize);
4454
4455         btrfs_free_reserved_data_space_noquota(inode, start, len);
4456         btrfs_qgroup_free_data(inode, reserved, start, len);
4457 }
4458
4459 static void force_metadata_allocation(struct btrfs_fs_info *info)
4460 {
4461         struct list_head *head = &info->space_info;
4462         struct btrfs_space_info *found;
4463
4464         rcu_read_lock();
4465         list_for_each_entry_rcu(found, head, list) {
4466                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4467                         found->force_alloc = CHUNK_ALLOC_FORCE;
4468         }
4469         rcu_read_unlock();
4470 }
4471
4472 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4473 {
4474         return (global->size << 1);
4475 }
4476
4477 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
4478                               struct btrfs_space_info *sinfo, int force)
4479 {
4480         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4481         u64 bytes_used = btrfs_space_info_used(sinfo, false);
4482         u64 thresh;
4483
4484         if (force == CHUNK_ALLOC_FORCE)
4485                 return 1;
4486
4487         /*
4488          * We need to take into account the global rsv because for all intents
4489          * and purposes it's used space.  Don't worry about locking the
4490          * global_rsv, it doesn't change except when the transaction commits.
4491          */
4492         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4493                 bytes_used += calc_global_rsv_need_space(global_rsv);
4494
4495         /*
4496          * in limited mode, we want to have some free space up to
4497          * about 1% of the FS size.
4498          */
4499         if (force == CHUNK_ALLOC_LIMITED) {
4500                 thresh = btrfs_super_total_bytes(fs_info->super_copy);
4501                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4502
4503                 if (sinfo->total_bytes - bytes_used < thresh)
4504                         return 1;
4505         }
4506
4507         if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
4508                 return 0;
4509         return 1;
4510 }
4511
4512 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
4513 {
4514         u64 num_dev;
4515
4516         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4517                     BTRFS_BLOCK_GROUP_RAID0 |
4518                     BTRFS_BLOCK_GROUP_RAID5 |
4519                     BTRFS_BLOCK_GROUP_RAID6))
4520                 num_dev = fs_info->fs_devices->rw_devices;
4521         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4522                 num_dev = 2;
4523         else
4524                 num_dev = 1;    /* DUP or single */
4525
4526         return num_dev;
4527 }
4528
4529 /*
4530  * If @is_allocation is true, reserve space in the system space info necessary
4531  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4532  * removing a chunk.
4533  */
4534 void check_system_chunk(struct btrfs_trans_handle *trans,
4535                         struct btrfs_fs_info *fs_info, u64 type)
4536 {
4537         struct btrfs_space_info *info;
4538         u64 left;
4539         u64 thresh;
4540         int ret = 0;
4541         u64 num_devs;
4542
4543         /*
4544          * Needed because we can end up allocating a system chunk and for an
4545          * atomic and race free space reservation in the chunk block reserve.
4546          */
4547         lockdep_assert_held(&fs_info->chunk_mutex);
4548
4549         info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4550         spin_lock(&info->lock);
4551         left = info->total_bytes - btrfs_space_info_used(info, true);
4552         spin_unlock(&info->lock);
4553
4554         num_devs = get_profile_num_devs(fs_info, type);
4555
4556         /* num_devs device items to update and 1 chunk item to add or remove */
4557         thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
4558                 btrfs_calc_trans_metadata_size(fs_info, 1);
4559
4560         if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4561                 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
4562                            left, thresh, type);
4563                 dump_space_info(fs_info, info, 0, 0);
4564         }
4565
4566         if (left < thresh) {
4567                 u64 flags = btrfs_system_alloc_profile(fs_info);
4568
4569                 /*
4570                  * Ignore failure to create system chunk. We might end up not
4571                  * needing it, as we might not need to COW all nodes/leafs from
4572                  * the paths we visit in the chunk tree (they were already COWed
4573                  * or created in the current transaction for example).
4574                  */
4575                 ret = btrfs_alloc_chunk(trans, fs_info, flags);
4576         }
4577
4578         if (!ret) {
4579                 ret = btrfs_block_rsv_add(fs_info->chunk_root,
4580                                           &fs_info->chunk_block_rsv,
4581                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4582                 if (!ret)
4583                         trans->chunk_bytes_reserved += thresh;
4584         }
4585 }
4586
4587 /*
4588  * If force is CHUNK_ALLOC_FORCE:
4589  *    - return 1 if it successfully allocates a chunk,
4590  *    - return errors including -ENOSPC otherwise.
4591  * If force is NOT CHUNK_ALLOC_FORCE:
4592  *    - return 0 if it doesn't need to allocate a new chunk,
4593  *    - return 1 if it successfully allocates a chunk,
4594  *    - return errors including -ENOSPC otherwise.
4595  */
4596 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4597                           struct btrfs_fs_info *fs_info, u64 flags, int force)
4598 {
4599         struct btrfs_space_info *space_info;
4600         int wait_for_alloc = 0;
4601         int ret = 0;
4602
4603         /* Don't re-enter if we're already allocating a chunk */
4604         if (trans->allocating_chunk)
4605                 return -ENOSPC;
4606
4607         space_info = __find_space_info(fs_info, flags);
4608         ASSERT(space_info);
4609
4610 again:
4611         spin_lock(&space_info->lock);
4612         if (force < space_info->force_alloc)
4613                 force = space_info->force_alloc;
4614         if (space_info->full) {
4615                 if (should_alloc_chunk(fs_info, space_info, force))
4616                         ret = -ENOSPC;
4617                 else
4618                         ret = 0;
4619                 spin_unlock(&space_info->lock);
4620                 return ret;
4621         }
4622
4623         if (!should_alloc_chunk(fs_info, space_info, force)) {
4624                 spin_unlock(&space_info->lock);
4625                 return 0;
4626         } else if (space_info->chunk_alloc) {
4627                 wait_for_alloc = 1;
4628         } else {
4629                 space_info->chunk_alloc = 1;
4630         }
4631
4632         spin_unlock(&space_info->lock);
4633
4634         mutex_lock(&fs_info->chunk_mutex);
4635
4636         /*
4637          * The chunk_mutex is held throughout the entirety of a chunk
4638          * allocation, so once we've acquired the chunk_mutex we know that the
4639          * other guy is done and we need to recheck and see if we should
4640          * allocate.
4641          */
4642         if (wait_for_alloc) {
4643                 mutex_unlock(&fs_info->chunk_mutex);
4644                 wait_for_alloc = 0;
4645                 goto again;
4646         }
4647
4648         trans->allocating_chunk = true;
4649
4650         /*
4651          * If we have mixed data/metadata chunks we want to make sure we keep
4652          * allocating mixed chunks instead of individual chunks.
4653          */
4654         if (btrfs_mixed_space_info(space_info))
4655                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4656
4657         /*
4658          * if we're doing a data chunk, go ahead and make sure that
4659          * we keep a reasonable number of metadata chunks allocated in the
4660          * FS as well.
4661          */
4662         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4663                 fs_info->data_chunk_allocations++;
4664                 if (!(fs_info->data_chunk_allocations %
4665                       fs_info->metadata_ratio))
4666                         force_metadata_allocation(fs_info);
4667         }
4668
4669         /*
4670          * Check if we have enough space in SYSTEM chunk because we may need
4671          * to update devices.
4672          */
4673         check_system_chunk(trans, fs_info, flags);
4674
4675         ret = btrfs_alloc_chunk(trans, fs_info, flags);
4676         trans->allocating_chunk = false;
4677
4678         spin_lock(&space_info->lock);
4679         if (ret < 0 && ret != -ENOSPC)
4680                 goto out;
4681         if (ret)
4682                 space_info->full = 1;
4683         else
4684                 ret = 1;
4685
4686         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4687 out:
4688         space_info->chunk_alloc = 0;
4689         spin_unlock(&space_info->lock);
4690         mutex_unlock(&fs_info->chunk_mutex);
4691         /*
4692          * When we allocate a new chunk we reserve space in the chunk block
4693          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4694          * add new nodes/leafs to it if we end up needing to do it when
4695          * inserting the chunk item and updating device items as part of the
4696          * second phase of chunk allocation, performed by
4697          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4698          * large number of new block groups to create in our transaction
4699          * handle's new_bgs list to avoid exhausting the chunk block reserve
4700          * in extreme cases - like having a single transaction create many new
4701          * block groups when starting to write out the free space caches of all
4702          * the block groups that were made dirty during the lifetime of the
4703          * transaction.
4704          */
4705         if (trans->can_flush_pending_bgs &&
4706             trans->chunk_bytes_reserved >= (u64)SZ_2M) {
4707                 btrfs_create_pending_block_groups(trans);
4708                 btrfs_trans_release_chunk_metadata(trans);
4709         }
4710         return ret;
4711 }
4712
4713 static int can_overcommit(struct btrfs_fs_info *fs_info,
4714                           struct btrfs_space_info *space_info, u64 bytes,
4715                           enum btrfs_reserve_flush_enum flush,
4716                           bool system_chunk)
4717 {
4718         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4719         u64 profile;
4720         u64 space_size;
4721         u64 avail;
4722         u64 used;
4723
4724         /* Don't overcommit when in mixed mode. */
4725         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
4726                 return 0;
4727
4728         if (system_chunk)
4729                 profile = btrfs_system_alloc_profile(fs_info);
4730         else
4731                 profile = btrfs_metadata_alloc_profile(fs_info);
4732
4733         used = btrfs_space_info_used(space_info, false);
4734
4735         /*
4736          * We only want to allow over committing if we have lots of actual space
4737          * free, but if we don't have enough space to handle the global reserve
4738          * space then we could end up having a real enospc problem when trying
4739          * to allocate a chunk or some other such important allocation.
4740          */
4741         spin_lock(&global_rsv->lock);
4742         space_size = calc_global_rsv_need_space(global_rsv);
4743         spin_unlock(&global_rsv->lock);
4744         if (used + space_size >= space_info->total_bytes)
4745                 return 0;
4746
4747         used += space_info->bytes_may_use;
4748
4749         avail = atomic64_read(&fs_info->free_chunk_space);
4750
4751         /*
4752          * If we have dup, raid1 or raid10 then only half of the free
4753          * space is actually useable.  For raid56, the space info used
4754          * doesn't include the parity drive, so we don't have to
4755          * change the math
4756          */
4757         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4758                        BTRFS_BLOCK_GROUP_RAID1 |
4759                        BTRFS_BLOCK_GROUP_RAID10))
4760                 avail >>= 1;
4761
4762         /*
4763          * If we aren't flushing all things, let us overcommit up to
4764          * 1/2th of the space. If we can flush, don't let us overcommit
4765          * too much, let it overcommit up to 1/8 of the space.
4766          */
4767         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4768                 avail >>= 3;
4769         else
4770                 avail >>= 1;
4771
4772         if (used + bytes < space_info->total_bytes + avail)
4773                 return 1;
4774         return 0;
4775 }
4776
4777 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
4778                                          unsigned long nr_pages, int nr_items)
4779 {
4780         struct super_block *sb = fs_info->sb;
4781
4782         if (down_read_trylock(&sb->s_umount)) {
4783                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4784                 up_read(&sb->s_umount);
4785         } else {
4786                 /*
4787                  * We needn't worry the filesystem going from r/w to r/o though
4788                  * we don't acquire ->s_umount mutex, because the filesystem
4789                  * should guarantee the delalloc inodes list be empty after
4790                  * the filesystem is readonly(all dirty pages are written to
4791                  * the disk).
4792                  */
4793                 btrfs_start_delalloc_roots(fs_info, 0, nr_items);
4794                 if (!current->journal_info)
4795                         btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
4796         }
4797 }
4798
4799 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
4800                                         u64 to_reclaim)
4801 {
4802         u64 bytes;
4803         u64 nr;
4804
4805         bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
4806         nr = div64_u64(to_reclaim, bytes);
4807         if (!nr)
4808                 nr = 1;
4809         return nr;
4810 }
4811
4812 #define EXTENT_SIZE_PER_ITEM    SZ_256K
4813
4814 /*
4815  * shrink metadata reservation for delalloc
4816  */
4817 static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
4818                             u64 orig, bool wait_ordered)
4819 {
4820         struct btrfs_space_info *space_info;
4821         struct btrfs_trans_handle *trans;
4822         u64 delalloc_bytes;
4823         u64 max_reclaim;
4824         u64 items;
4825         long time_left;
4826         unsigned long nr_pages;
4827         int loops;
4828
4829         /* Calc the number of the pages we need flush for space reservation */
4830         items = calc_reclaim_items_nr(fs_info, to_reclaim);
4831         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4832
4833         trans = (struct btrfs_trans_handle *)current->journal_info;
4834         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4835
4836         delalloc_bytes = percpu_counter_sum_positive(
4837                                                 &fs_info->delalloc_bytes);
4838         if (delalloc_bytes == 0) {
4839                 if (trans)
4840                         return;
4841                 if (wait_ordered)
4842                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
4843                 return;
4844         }
4845
4846         loops = 0;
4847         while (delalloc_bytes && loops < 3) {
4848                 max_reclaim = min(delalloc_bytes, to_reclaim);
4849                 nr_pages = max_reclaim >> PAGE_SHIFT;
4850                 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
4851                 /*
4852                  * We need to wait for the async pages to actually start before
4853                  * we do anything.
4854                  */
4855                 max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
4856                 if (!max_reclaim)
4857                         goto skip_async;
4858
4859                 if (max_reclaim <= nr_pages)
4860                         max_reclaim = 0;
4861                 else
4862                         max_reclaim -= nr_pages;
4863
4864                 wait_event(fs_info->async_submit_wait,
4865                            atomic_read(&fs_info->async_delalloc_pages) <=
4866                            (int)max_reclaim);
4867 skip_async:
4868                 spin_lock(&space_info->lock);
4869                 if (list_empty(&space_info->tickets) &&
4870                     list_empty(&space_info->priority_tickets)) {
4871                         spin_unlock(&space_info->lock);
4872                         break;
4873                 }
4874                 spin_unlock(&space_info->lock);
4875
4876                 loops++;
4877                 if (wait_ordered && !trans) {
4878                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
4879                 } else {
4880                         time_left = schedule_timeout_killable(1);
4881                         if (time_left)
4882                                 break;
4883                 }
4884                 delalloc_bytes = percpu_counter_sum_positive(
4885                                                 &fs_info->delalloc_bytes);
4886         }
4887 }
4888
4889 struct reserve_ticket {
4890         u64 bytes;
4891         int error;
4892         struct list_head list;
4893         wait_queue_head_t wait;
4894 };
4895
4896 /**
4897  * maybe_commit_transaction - possibly commit the transaction if its ok to
4898  * @root - the root we're allocating for
4899  * @bytes - the number of bytes we want to reserve
4900  * @force - force the commit
4901  *
4902  * This will check to make sure that committing the transaction will actually
4903  * get us somewhere and then commit the transaction if it does.  Otherwise it
4904  * will return -ENOSPC.
4905  */
4906 static int may_commit_transaction(struct btrfs_fs_info *fs_info,
4907                                   struct btrfs_space_info *space_info)
4908 {
4909         struct reserve_ticket *ticket = NULL;
4910         struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
4911         struct btrfs_trans_handle *trans;
4912         u64 bytes;
4913
4914         trans = (struct btrfs_trans_handle *)current->journal_info;
4915         if (trans)
4916                 return -EAGAIN;
4917
4918         spin_lock(&space_info->lock);
4919         if (!list_empty(&space_info->priority_tickets))
4920                 ticket = list_first_entry(&space_info->priority_tickets,
4921                                           struct reserve_ticket, list);
4922         else if (!list_empty(&space_info->tickets))
4923                 ticket = list_first_entry(&space_info->tickets,
4924                                           struct reserve_ticket, list);
4925         bytes = (ticket) ? ticket->bytes : 0;
4926         spin_unlock(&space_info->lock);
4927
4928         if (!bytes)
4929                 return 0;
4930
4931         /* See if there is enough pinned space to make this reservation */
4932         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4933                                    bytes) >= 0)
4934                 goto commit;
4935
4936         /*
4937          * See if there is some space in the delayed insertion reservation for
4938          * this reservation.
4939          */
4940         if (space_info != delayed_rsv->space_info)
4941                 return -ENOSPC;
4942
4943         spin_lock(&delayed_rsv->lock);
4944         if (delayed_rsv->size > bytes)
4945                 bytes = 0;
4946         else
4947                 bytes -= delayed_rsv->size;
4948         spin_unlock(&delayed_rsv->lock);
4949
4950         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4951                                    bytes) < 0) {
4952                 return -ENOSPC;
4953         }
4954
4955 commit:
4956         trans = btrfs_join_transaction(fs_info->extent_root);
4957         if (IS_ERR(trans))
4958                 return -ENOSPC;
4959
4960         return btrfs_commit_transaction(trans);
4961 }
4962
4963 /*
4964  * Try to flush some data based on policy set by @state. This is only advisory
4965  * and may fail for various reasons. The caller is supposed to examine the
4966  * state of @space_info to detect the outcome.
4967  */
4968 static void flush_space(struct btrfs_fs_info *fs_info,
4969                        struct btrfs_space_info *space_info, u64 num_bytes,
4970                        int state)
4971 {
4972         struct btrfs_root *root = fs_info->extent_root;
4973         struct btrfs_trans_handle *trans;
4974         int nr;
4975         int ret = 0;
4976
4977         switch (state) {
4978         case FLUSH_DELAYED_ITEMS_NR:
4979         case FLUSH_DELAYED_ITEMS:
4980                 if (state == FLUSH_DELAYED_ITEMS_NR)
4981                         nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
4982                 else
4983                         nr = -1;
4984
4985                 trans = btrfs_join_transaction(root);
4986                 if (IS_ERR(trans)) {
4987                         ret = PTR_ERR(trans);
4988                         break;
4989                 }
4990                 ret = btrfs_run_delayed_items_nr(trans, nr);
4991                 btrfs_end_transaction(trans);
4992                 break;
4993         case FLUSH_DELALLOC:
4994         case FLUSH_DELALLOC_WAIT:
4995                 shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
4996                                 state == FLUSH_DELALLOC_WAIT);
4997                 break;
4998         case ALLOC_CHUNK:
4999                 trans = btrfs_join_transaction(root);
5000                 if (IS_ERR(trans)) {
5001                         ret = PTR_ERR(trans);
5002                         break;
5003                 }
5004                 ret = do_chunk_alloc(trans, fs_info,
5005                                      btrfs_metadata_alloc_profile(fs_info),
5006                                      CHUNK_ALLOC_NO_FORCE);
5007                 btrfs_end_transaction(trans);
5008                 if (ret > 0 || ret == -ENOSPC)
5009                         ret = 0;
5010                 break;
5011         case COMMIT_TRANS:
5012                 ret = may_commit_transaction(fs_info, space_info);
5013                 break;
5014         default:
5015                 ret = -ENOSPC;
5016                 break;
5017         }
5018
5019         trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
5020                                 ret);
5021         return;
5022 }
5023
5024 static inline u64
5025 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
5026                                  struct btrfs_space_info *space_info,
5027                                  bool system_chunk)
5028 {
5029         struct reserve_ticket *ticket;
5030         u64 used;
5031         u64 expected;
5032         u64 to_reclaim = 0;
5033
5034         list_for_each_entry(ticket, &space_info->tickets, list)
5035                 to_reclaim += ticket->bytes;
5036         list_for_each_entry(ticket, &space_info->priority_tickets, list)
5037                 to_reclaim += ticket->bytes;
5038         if (to_reclaim)
5039                 return to_reclaim;
5040
5041         to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
5042         if (can_overcommit(fs_info, space_info, to_reclaim,
5043                            BTRFS_RESERVE_FLUSH_ALL, system_chunk))
5044                 return 0;
5045
5046         used = btrfs_space_info_used(space_info, true);
5047
5048         if (can_overcommit(fs_info, space_info, SZ_1M,
5049                            BTRFS_RESERVE_FLUSH_ALL, system_chunk))
5050                 expected = div_factor_fine(space_info->total_bytes, 95);
5051         else
5052                 expected = div_factor_fine(space_info->total_bytes, 90);
5053
5054         if (used > expected)
5055                 to_reclaim = used - expected;
5056         else
5057                 to_reclaim = 0;
5058         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
5059                                      space_info->bytes_reserved);
5060         return to_reclaim;
5061 }
5062
5063 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
5064                                         struct btrfs_space_info *space_info,
5065                                         u64 used, bool system_chunk)
5066 {
5067         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
5068
5069         /* If we're just plain full then async reclaim just slows us down. */
5070         if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
5071                 return 0;
5072
5073         if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
5074                                               system_chunk))
5075                 return 0;
5076
5077         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
5078                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
5079 }
5080
5081 static void wake_all_tickets(struct list_head *head)
5082 {
5083         struct reserve_ticket *ticket;
5084
5085         while (!list_empty(head)) {
5086                 ticket = list_first_entry(head, struct reserve_ticket, list);
5087                 list_del_init(&ticket->list);
5088                 ticket->error = -ENOSPC;
5089                 wake_up(&ticket->wait);
5090         }
5091 }
5092
5093 /*
5094  * This is for normal flushers, we can wait all goddamned day if we want to.  We
5095  * will loop and continuously try to flush as long as we are making progress.
5096  * We count progress as clearing off tickets each time we have to loop.
5097  */
5098 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
5099 {
5100         struct btrfs_fs_info *fs_info;
5101         struct btrfs_space_info *space_info;
5102         u64 to_reclaim;
5103         int flush_state;
5104         int commit_cycles = 0;
5105         u64 last_tickets_id;
5106
5107         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
5108         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5109
5110         spin_lock(&space_info->lock);
5111         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
5112                                                       false);
5113         if (!to_reclaim) {
5114                 space_info->flush = 0;
5115                 spin_unlock(&space_info->lock);
5116                 return;
5117         }
5118         last_tickets_id = space_info->tickets_id;
5119         spin_unlock(&space_info->lock);
5120
5121         flush_state = FLUSH_DELAYED_ITEMS_NR;
5122         do {
5123                 flush_space(fs_info, space_info, to_reclaim, flush_state);
5124                 spin_lock(&space_info->lock);
5125                 if (list_empty(&space_info->tickets)) {
5126                         space_info->flush = 0;
5127                         spin_unlock(&space_info->lock);
5128                         return;
5129                 }
5130                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
5131                                                               space_info,
5132                                                               false);
5133                 if (last_tickets_id == space_info->tickets_id) {
5134                         flush_state++;
5135                 } else {
5136                         last_tickets_id = space_info->tickets_id;
5137                         flush_state = FLUSH_DELAYED_ITEMS_NR;
5138                         if (commit_cycles)
5139                                 commit_cycles--;
5140                 }
5141
5142                 if (flush_state > COMMIT_TRANS) {
5143                         commit_cycles++;
5144                         if (commit_cycles > 2) {
5145                                 wake_all_tickets(&space_info->tickets);
5146                                 space_info->flush = 0;
5147                         } else {
5148                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
5149                         }
5150                 }
5151                 spin_unlock(&space_info->lock);
5152         } while (flush_state <= COMMIT_TRANS);
5153 }
5154
5155 void btrfs_init_async_reclaim_work(struct work_struct *work)
5156 {
5157         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
5158 }
5159
5160 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5161                                             struct btrfs_space_info *space_info,
5162                                             struct reserve_ticket *ticket)
5163 {
5164         u64 to_reclaim;
5165         int flush_state = FLUSH_DELAYED_ITEMS_NR;
5166
5167         spin_lock(&space_info->lock);
5168         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
5169                                                       false);
5170         if (!to_reclaim) {
5171                 spin_unlock(&space_info->lock);
5172                 return;
5173         }
5174         spin_unlock(&space_info->lock);
5175
5176         do {
5177                 flush_space(fs_info, space_info, to_reclaim, flush_state);
5178                 flush_state++;
5179                 spin_lock(&space_info->lock);
5180                 if (ticket->bytes == 0) {
5181                         spin_unlock(&space_info->lock);
5182                         return;
5183                 }
5184                 spin_unlock(&space_info->lock);
5185
5186                 /*
5187                  * Priority flushers can't wait on delalloc without
5188                  * deadlocking.
5189                  */
5190                 if (flush_state == FLUSH_DELALLOC ||
5191                     flush_state == FLUSH_DELALLOC_WAIT)
5192                         flush_state = ALLOC_CHUNK;
5193         } while (flush_state < COMMIT_TRANS);
5194 }
5195
5196 static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5197                                struct btrfs_space_info *space_info,
5198                                struct reserve_ticket *ticket, u64 orig_bytes)
5199
5200 {
5201         DEFINE_WAIT(wait);
5202         int ret = 0;
5203
5204         spin_lock(&space_info->lock);
5205         while (ticket->bytes > 0 && ticket->error == 0) {
5206                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
5207                 if (ret) {
5208                         ret = -EINTR;
5209                         break;
5210                 }
5211                 spin_unlock(&space_info->lock);
5212
5213                 schedule();
5214
5215                 finish_wait(&ticket->wait, &wait);
5216                 spin_lock(&space_info->lock);
5217         }
5218         if (!ret)
5219                 ret = ticket->error;
5220         if (!list_empty(&ticket->list))
5221                 list_del_init(&ticket->list);
5222         if (ticket->bytes && ticket->bytes < orig_bytes) {
5223                 u64 num_bytes = orig_bytes - ticket->bytes;
5224                 space_info->bytes_may_use -= num_bytes;
5225                 trace_btrfs_space_reservation(fs_info, "space_info",
5226                                               space_info->flags, num_bytes, 0);
5227         }
5228         spin_unlock(&space_info->lock);
5229
5230         return ret;
5231 }
5232
5233 /**
5234  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5235  * @root - the root we're allocating for
5236  * @space_info - the space info we want to allocate from
5237  * @orig_bytes - the number of bytes we want
5238  * @flush - whether or not we can flush to make our reservation
5239  *
5240  * This will reserve orig_bytes number of bytes from the space info associated
5241  * with the block_rsv.  If there is not enough space it will make an attempt to
5242  * flush out space to make room.  It will do this by flushing delalloc if
5243  * possible or committing the transaction.  If flush is 0 then no attempts to
5244  * regain reservations will be made and this will fail if there is not enough
5245  * space already.
5246  */
5247 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
5248                                     struct btrfs_space_info *space_info,
5249                                     u64 orig_bytes,
5250                                     enum btrfs_reserve_flush_enum flush,
5251                                     bool system_chunk)
5252 {
5253         struct reserve_ticket ticket;
5254         u64 used;
5255         int ret = 0;
5256
5257         ASSERT(orig_bytes);
5258         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
5259
5260         spin_lock(&space_info->lock);
5261         ret = -ENOSPC;
5262         used = btrfs_space_info_used(space_info, true);
5263
5264         /*
5265          * If we have enough space then hooray, make our reservation and carry
5266          * on.  If not see if we can overcommit, and if we can, hooray carry on.
5267          * If not things get more complicated.
5268          */
5269         if (used + orig_bytes <= space_info->total_bytes) {
5270                 space_info->bytes_may_use += orig_bytes;
5271                 trace_btrfs_space_reservation(fs_info, "space_info",
5272                                               space_info->flags, orig_bytes, 1);
5273                 ret = 0;
5274         } else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
5275                                   system_chunk)) {
5276                 space_info->bytes_may_use += orig_bytes;
5277                 trace_btrfs_space_reservation(fs_info, "space_info",
5278                                               space_info->flags, orig_bytes, 1);
5279                 ret = 0;
5280         }
5281
5282         /*
5283          * If we couldn't make a reservation then setup our reservation ticket
5284          * and kick the async worker if it's not already running.
5285          *
5286          * If we are a priority flusher then we just need to add our ticket to
5287          * the list and we will do our own flushing further down.
5288          */
5289         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5290                 ticket.bytes = orig_bytes;
5291                 ticket.error = 0;
5292                 init_waitqueue_head(&ticket.wait);
5293                 if (flush == BTRFS_RESERVE_FLUSH_ALL) {
5294                         list_add_tail(&ticket.list, &space_info->tickets);
5295                         if (!space_info->flush) {
5296                                 space_info->flush = 1;
5297                                 trace_btrfs_trigger_flush(fs_info,
5298                                                           space_info->flags,
5299                                                           orig_bytes, flush,
5300                                                           "enospc");
5301                                 queue_work(system_unbound_wq,
5302                                            &fs_info->async_reclaim_work);
5303                         }
5304                 } else {
5305                         list_add_tail(&ticket.list,
5306                                       &space_info->priority_tickets);
5307                 }
5308         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5309                 used += orig_bytes;
5310                 /*
5311                  * We will do the space reservation dance during log replay,
5312                  * which means we won't have fs_info->fs_root set, so don't do
5313                  * the async reclaim as we will panic.
5314                  */
5315                 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
5316                     need_do_async_reclaim(fs_info, space_info,
5317                                           used, system_chunk) &&
5318                     !work_busy(&fs_info->async_reclaim_work)) {
5319                         trace_btrfs_trigger_flush(fs_info, space_info->flags,
5320                                                   orig_bytes, flush, "preempt");
5321                         queue_work(system_unbound_wq,
5322                                    &fs_info->async_reclaim_work);
5323                 }
5324         }
5325         spin_unlock(&space_info->lock);
5326         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5327                 return ret;
5328
5329         if (flush == BTRFS_RESERVE_FLUSH_ALL)
5330                 return wait_reserve_ticket(fs_info, space_info, &ticket,
5331                                            orig_bytes);
5332
5333         ret = 0;
5334         priority_reclaim_metadata_space(fs_info, space_info, &ticket);
5335         spin_lock(&space_info->lock);
5336         if (ticket.bytes) {
5337                 if (ticket.bytes < orig_bytes) {
5338                         u64 num_bytes = orig_bytes - ticket.bytes;
5339                         space_info->bytes_may_use -= num_bytes;
5340                         trace_btrfs_space_reservation(fs_info, "space_info",
5341                                                       space_info->flags,
5342                                                       num_bytes, 0);
5343
5344                 }
5345                 list_del_init(&ticket.list);
5346                 ret = -ENOSPC;
5347         }
5348         spin_unlock(&space_info->lock);
5349         ASSERT(list_empty(&ticket.list));
5350         return ret;
5351 }
5352
5353 /**
5354  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5355  * @root - the root we're allocating for
5356  * @block_rsv - the block_rsv we're allocating for
5357  * @orig_bytes - the number of bytes we want
5358  * @flush - whether or not we can flush to make our reservation
5359  *
5360  * This will reserve orgi_bytes number of bytes from the space info associated
5361  * with the block_rsv.  If there is not enough space it will make an attempt to
5362  * flush out space to make room.  It will do this by flushing delalloc if
5363  * possible or committing the transaction.  If flush is 0 then no attempts to
5364  * regain reservations will be made and this will fail if there is not enough
5365  * space already.
5366  */
5367 static int reserve_metadata_bytes(struct btrfs_root *root,
5368                                   struct btrfs_block_rsv *block_rsv,
5369                                   u64 orig_bytes,
5370                                   enum btrfs_reserve_flush_enum flush)
5371 {
5372         struct btrfs_fs_info *fs_info = root->fs_info;
5373         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5374         int ret;
5375         bool system_chunk = (root == fs_info->chunk_root);
5376
5377         ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
5378                                        orig_bytes, flush, system_chunk);
5379         if (ret == -ENOSPC &&
5380             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5381                 if (block_rsv != global_rsv &&
5382                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5383                         ret = 0;
5384         }
5385         if (ret == -ENOSPC) {
5386                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
5387                                               block_rsv->space_info->flags,
5388                                               orig_bytes, 1);
5389
5390                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
5391                         dump_space_info(fs_info, block_rsv->space_info,
5392                                         orig_bytes, 0);
5393         }
5394         return ret;
5395 }
5396
5397 static struct btrfs_block_rsv *get_block_rsv(
5398                                         const struct btrfs_trans_handle *trans,
5399                                         const struct btrfs_root *root)
5400 {
5401         struct btrfs_fs_info *fs_info = root->fs_info;
5402         struct btrfs_block_rsv *block_rsv = NULL;
5403
5404         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5405             (root == fs_info->csum_root && trans->adding_csums) ||
5406             (root == fs_info->uuid_root))
5407                 block_rsv = trans->block_rsv;
5408
5409         if (!block_rsv)
5410                 block_rsv = root->block_rsv;
5411
5412         if (!block_rsv)
5413                 block_rsv = &fs_info->empty_block_rsv;
5414
5415         return block_rsv;
5416 }
5417
5418 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5419                                u64 num_bytes)
5420 {
5421         int ret = -ENOSPC;
5422         spin_lock(&block_rsv->lock);
5423         if (block_rsv->reserved >= num_bytes) {
5424                 block_rsv->reserved -= num_bytes;
5425                 if (block_rsv->reserved < block_rsv->size)
5426                         block_rsv->full = 0;
5427                 ret = 0;
5428         }
5429         spin_unlock(&block_rsv->lock);
5430         return ret;
5431 }
5432
5433 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5434                                 u64 num_bytes, int update_size)
5435 {
5436         spin_lock(&block_rsv->lock);
5437         block_rsv->reserved += num_bytes;
5438         if (update_size)
5439                 block_rsv->size += num_bytes;
5440         else if (block_rsv->reserved >= block_rsv->size)
5441                 block_rsv->full = 1;
5442         spin_unlock(&block_rsv->lock);
5443 }
5444
5445 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5446                              struct btrfs_block_rsv *dest, u64 num_bytes,
5447                              int min_factor)
5448 {
5449         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5450         u64 min_bytes;
5451
5452         if (global_rsv->space_info != dest->space_info)
5453                 return -ENOSPC;
5454
5455         spin_lock(&global_rsv->lock);
5456         min_bytes = div_factor(global_rsv->size, min_factor);
5457         if (global_rsv->reserved < min_bytes + num_bytes) {
5458                 spin_unlock(&global_rsv->lock);
5459                 return -ENOSPC;
5460         }
5461         global_rsv->reserved -= num_bytes;
5462         if (global_rsv->reserved < global_rsv->size)
5463                 global_rsv->full = 0;
5464         spin_unlock(&global_rsv->lock);
5465
5466         block_rsv_add_bytes(dest, num_bytes, 1);
5467         return 0;
5468 }
5469
5470 /*
5471  * This is for space we already have accounted in space_info->bytes_may_use, so
5472  * basically when we're returning space from block_rsv's.
5473  */
5474 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
5475                                      struct btrfs_space_info *space_info,
5476                                      u64 num_bytes)
5477 {
5478         struct reserve_ticket *ticket;
5479         struct list_head *head;
5480         u64 used;
5481         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
5482         bool check_overcommit = false;
5483
5484         spin_lock(&space_info->lock);
5485         head = &space_info->priority_tickets;
5486
5487         /*
5488          * If we are over our limit then we need to check and see if we can
5489          * overcommit, and if we can't then we just need to free up our space
5490          * and not satisfy any requests.
5491          */
5492         used = btrfs_space_info_used(space_info, true);
5493         if (used - num_bytes >= space_info->total_bytes)
5494                 check_overcommit = true;
5495 again:
5496         while (!list_empty(head) && num_bytes) {
5497                 ticket = list_first_entry(head, struct reserve_ticket,
5498                                           list);
5499                 /*
5500                  * We use 0 bytes because this space is already reserved, so
5501                  * adding the ticket space would be a double count.
5502                  */
5503                 if (check_overcommit &&
5504                     !can_overcommit(fs_info, space_info, 0, flush, false))
5505                         break;
5506                 if (num_bytes >= ticket->bytes) {
5507                         list_del_init(&ticket->list);
5508                         num_bytes -= ticket->bytes;
5509                         ticket->bytes = 0;
5510                         space_info->tickets_id++;
5511                         wake_up(&ticket->wait);
5512                 } else {
5513                         ticket->bytes -= num_bytes;
5514                         num_bytes = 0;
5515                 }
5516         }
5517
5518         if (num_bytes && head == &space_info->priority_tickets) {
5519                 head = &space_info->tickets;
5520                 flush = BTRFS_RESERVE_FLUSH_ALL;
5521                 goto again;
5522         }
5523         space_info->bytes_may_use -= num_bytes;
5524         trace_btrfs_space_reservation(fs_info, "space_info",
5525                                       space_info->flags, num_bytes, 0);
5526         spin_unlock(&space_info->lock);
5527 }
5528
5529 /*
5530  * This is for newly allocated space that isn't accounted in
5531  * space_info->bytes_may_use yet.  So if we allocate a chunk or unpin an extent
5532  * we use this helper.
5533  */
5534 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
5535                                      struct btrfs_space_info *space_info,
5536                                      u64 num_bytes)
5537 {
5538         struct reserve_ticket *ticket;
5539         struct list_head *head = &space_info->priority_tickets;
5540
5541 again:
5542         while (!list_empty(head) && num_bytes) {
5543                 ticket = list_first_entry(head, struct reserve_ticket,
5544                                           list);
5545                 if (num_bytes >= ticket->bytes) {
5546                         trace_btrfs_space_reservation(fs_info, "space_info",
5547                                                       space_info->flags,
5548                                                       ticket->bytes, 1);
5549                         list_del_init(&ticket->list);
5550                         num_bytes -= ticket->bytes;
5551                         space_info->bytes_may_use += ticket->bytes;
5552                         ticket->bytes = 0;
5553                         space_info->tickets_id++;
5554                         wake_up(&ticket->wait);
5555                 } else {
5556                         trace_btrfs_space_reservation(fs_info, "space_info",
5557                                                       space_info->flags,
5558                                                       num_bytes, 1);
5559                         space_info->bytes_may_use += num_bytes;
5560                         ticket->bytes -= num_bytes;
5561                         num_bytes = 0;
5562                 }
5563         }
5564
5565         if (num_bytes && head == &space_info->priority_tickets) {
5566                 head = &space_info->tickets;
5567                 goto again;
5568         }
5569 }
5570
5571 static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5572                                     struct btrfs_block_rsv *block_rsv,
5573                                     struct btrfs_block_rsv *dest, u64 num_bytes)
5574 {
5575         struct btrfs_space_info *space_info = block_rsv->space_info;
5576         u64 ret;
5577
5578         spin_lock(&block_rsv->lock);
5579         if (num_bytes == (u64)-1)
5580                 num_bytes = block_rsv->size;
5581         block_rsv->size -= num_bytes;
5582         if (block_rsv->reserved >= block_rsv->size) {
5583                 num_bytes = block_rsv->reserved - block_rsv->size;
5584                 block_rsv->reserved = block_rsv->size;
5585                 block_rsv->full = 1;
5586         } else {
5587                 num_bytes = 0;
5588         }
5589         spin_unlock(&block_rsv->lock);
5590
5591         ret = num_bytes;
5592         if (num_bytes > 0) {
5593                 if (dest) {
5594                         spin_lock(&dest->lock);
5595                         if (!dest->full) {
5596                                 u64 bytes_to_add;
5597
5598                                 bytes_to_add = dest->size - dest->reserved;
5599                                 bytes_to_add = min(num_bytes, bytes_to_add);
5600                                 dest->reserved += bytes_to_add;
5601                                 if (dest->reserved >= dest->size)
5602                                         dest->full = 1;
5603                                 num_bytes -= bytes_to_add;
5604                         }
5605                         spin_unlock(&dest->lock);
5606                 }
5607                 if (num_bytes)
5608                         space_info_add_old_bytes(fs_info, space_info,
5609                                                  num_bytes);
5610         }
5611         return ret;
5612 }
5613
5614 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
5615                             struct btrfs_block_rsv *dst, u64 num_bytes,
5616                             int update_size)
5617 {
5618         int ret;
5619
5620         ret = block_rsv_use_bytes(src, num_bytes);
5621         if (ret)
5622                 return ret;
5623
5624         block_rsv_add_bytes(dst, num_bytes, update_size);
5625         return 0;
5626 }
5627
5628 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5629 {
5630         memset(rsv, 0, sizeof(*rsv));
5631         spin_lock_init(&rsv->lock);
5632         rsv->type = type;
5633 }
5634
5635 void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
5636                                    struct btrfs_block_rsv *rsv,
5637                                    unsigned short type)
5638 {
5639         btrfs_init_block_rsv(rsv, type);
5640         rsv->space_info = __find_space_info(fs_info,
5641                                             BTRFS_BLOCK_GROUP_METADATA);
5642 }
5643
5644 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
5645                                               unsigned short type)
5646 {
5647         struct btrfs_block_rsv *block_rsv;
5648
5649         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5650         if (!block_rsv)
5651                 return NULL;
5652
5653         btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
5654         return block_rsv;
5655 }
5656
5657 void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
5658                           struct btrfs_block_rsv *rsv)
5659 {
5660         if (!rsv)
5661                 return;
5662         btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
5663         kfree(rsv);
5664 }
5665
5666 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5667 {
5668         kfree(rsv);
5669 }
5670
5671 int btrfs_block_rsv_add(struct btrfs_root *root,
5672                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5673                         enum btrfs_reserve_flush_enum flush)
5674 {
5675         int ret;
5676
5677         if (num_bytes == 0)
5678                 return 0;
5679
5680         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5681         if (!ret) {
5682                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5683                 return 0;
5684         }
5685
5686         return ret;
5687 }
5688
5689 int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
5690 {
5691         u64 num_bytes = 0;
5692         int ret = -ENOSPC;
5693
5694         if (!block_rsv)
5695                 return 0;
5696
5697         spin_lock(&block_rsv->lock);
5698         num_bytes = div_factor(block_rsv->size, min_factor);
5699         if (block_rsv->reserved >= num_bytes)
5700                 ret = 0;
5701         spin_unlock(&block_rsv->lock);
5702
5703         return ret;
5704 }
5705
5706 int btrfs_block_rsv_refill(struct btrfs_root *root,
5707                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5708                            enum btrfs_reserve_flush_enum flush)
5709 {
5710         u64 num_bytes = 0;
5711         int ret = -ENOSPC;
5712
5713         if (!block_rsv)
5714                 return 0;
5715
5716         spin_lock(&block_rsv->lock);
5717         num_bytes = min_reserved;
5718         if (block_rsv->reserved >= num_bytes)
5719                 ret = 0;
5720         else
5721                 num_bytes -= block_rsv->reserved;
5722         spin_unlock(&block_rsv->lock);
5723
5724         if (!ret)
5725                 return 0;
5726
5727         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5728         if (!ret) {
5729                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5730                 return 0;
5731         }
5732
5733         return ret;
5734 }
5735
5736 /**
5737  * btrfs_inode_rsv_refill - refill the inode block rsv.
5738  * @inode - the inode we are refilling.
5739  * @flush - the flusing restriction.
5740  *
5741  * Essentially the same as btrfs_block_rsv_refill, except it uses the
5742  * block_rsv->size as the minimum size.  We'll either refill the missing amount
5743  * or return if we already have enough space.  This will also handle the resreve
5744  * tracepoint for the reserved amount.
5745  */
5746 static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
5747                                   enum btrfs_reserve_flush_enum flush)
5748 {
5749         struct btrfs_root *root = inode->root;
5750         struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5751         u64 num_bytes = 0;
5752         int ret = -ENOSPC;
5753
5754         spin_lock(&block_rsv->lock);
5755         if (block_rsv->reserved < block_rsv->size)
5756                 num_bytes = block_rsv->size - block_rsv->reserved;
5757         spin_unlock(&block_rsv->lock);
5758
5759         if (num_bytes == 0)
5760                 return 0;
5761
5762         ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
5763         if (ret)
5764                 return ret;
5765         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5766         if (!ret) {
5767                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5768                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5769                                               btrfs_ino(inode), num_bytes, 1);
5770         }
5771         return ret;
5772 }
5773
5774 /**
5775  * btrfs_inode_rsv_release - release any excessive reservation.
5776  * @inode - the inode we need to release from.
5777  * @qgroup_free - free or convert qgroup meta.
5778  *   Unlike normal operation, qgroup meta reservation needs to know if we are
5779  *   freeing qgroup reservation or just converting it into per-trans.  Normally
5780  *   @qgroup_free is true for error handling, and false for normal release.
5781  *
5782  * This is the same as btrfs_block_rsv_release, except that it handles the
5783  * tracepoint for the reservation.
5784  */
5785 static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
5786 {
5787         struct btrfs_fs_info *fs_info = inode->root->fs_info;
5788         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5789         struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5790         u64 released = 0;
5791
5792         /*
5793          * Since we statically set the block_rsv->size we just want to say we
5794          * are releasing 0 bytes, and then we'll just get the reservation over
5795          * the size free'd.
5796          */
5797         released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0);
5798         if (released > 0)
5799                 trace_btrfs_space_reservation(fs_info, "delalloc",
5800                                               btrfs_ino(inode), released, 0);
5801         if (qgroup_free)
5802                 btrfs_qgroup_free_meta_prealloc(inode->root, released);
5803         else
5804                 btrfs_qgroup_convert_reserved_meta(inode->root, released);
5805 }
5806
5807 void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
5808                              struct btrfs_block_rsv *block_rsv,
5809                              u64 num_bytes)
5810 {
5811         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5812
5813         if (global_rsv == block_rsv ||
5814             block_rsv->space_info != global_rsv->space_info)
5815                 global_rsv = NULL;
5816         block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes);
5817 }
5818
5819 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5820 {
5821         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5822         struct btrfs_space_info *sinfo = block_rsv->space_info;
5823         u64 num_bytes;
5824
5825         /*
5826          * The global block rsv is based on the size of the extent tree, the
5827          * checksum tree and the root tree.  If the fs is empty we want to set
5828          * it to a minimal amount for safety.
5829          */
5830         num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
5831                 btrfs_root_used(&fs_info->csum_root->root_item) +
5832                 btrfs_root_used(&fs_info->tree_root->root_item);
5833         num_bytes = max_t(u64, num_bytes, SZ_16M);
5834
5835         spin_lock(&sinfo->lock);
5836         spin_lock(&block_rsv->lock);
5837
5838         block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5839
5840         if (block_rsv->reserved < block_rsv->size) {
5841                 num_bytes = btrfs_space_info_used(sinfo, true);
5842                 if (sinfo->total_bytes > num_bytes) {
5843                         num_bytes = sinfo->total_bytes - num_bytes;
5844                         num_bytes = min(num_bytes,
5845                                         block_rsv->size - block_rsv->reserved);
5846                         block_rsv->reserved += num_bytes;
5847                         sinfo->bytes_may_use += num_bytes;
5848                         trace_btrfs_space_reservation(fs_info, "space_info",
5849                                                       sinfo->flags, num_bytes,
5850                                                       1);
5851                 }
5852         } else if (block_rsv->reserved > block_rsv->size) {
5853                 num_bytes = block_rsv->reserved - block_rsv->size;
5854                 sinfo->bytes_may_use -= num_bytes;
5855                 trace_btrfs_space_reservation(fs_info, "space_info",
5856                                       sinfo->flags, num_bytes, 0);
5857                 block_rsv->reserved = block_rsv->size;
5858         }
5859
5860         if (block_rsv->reserved == block_rsv->size)
5861                 block_rsv->full = 1;
5862         else
5863                 block_rsv->full = 0;
5864
5865         spin_unlock(&block_rsv->lock);
5866         spin_unlock(&sinfo->lock);
5867 }
5868
5869 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5870 {
5871         struct btrfs_space_info *space_info;
5872
5873         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5874         fs_info->chunk_block_rsv.space_info = space_info;
5875
5876         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5877         fs_info->global_block_rsv.space_info = space_info;
5878         fs_info->trans_block_rsv.space_info = space_info;
5879         fs_info->empty_block_rsv.space_info = space_info;
5880         fs_info->delayed_block_rsv.space_info = space_info;
5881
5882         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5883         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5884         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5885         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5886         if (fs_info->quota_root)
5887                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5888         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5889
5890         update_global_block_rsv(fs_info);
5891 }
5892
5893 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5894 {
5895         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5896                                 (u64)-1);
5897         WARN_ON(fs_info->trans_block_rsv.size > 0);
5898         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5899         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5900         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5901         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5902         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5903 }
5904
5905
5906 /*
5907  * To be called after all the new block groups attached to the transaction
5908  * handle have been created (btrfs_create_pending_block_groups()).
5909  */
5910 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5911 {
5912         struct btrfs_fs_info *fs_info = trans->fs_info;
5913
5914         if (!trans->chunk_bytes_reserved)
5915                 return;
5916
5917         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5918
5919         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5920                                 trans->chunk_bytes_reserved);
5921         trans->chunk_bytes_reserved = 0;
5922 }
5923
5924 /* Can only return 0 or -ENOSPC */
5925 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5926                                   struct btrfs_inode *inode)
5927 {
5928         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
5929         struct btrfs_root *root = inode->root;
5930         /*
5931          * We always use trans->block_rsv here as we will have reserved space
5932          * for our orphan when starting the transaction, using get_block_rsv()
5933          * here will sometimes make us choose the wrong block rsv as we could be
5934          * doing a reloc inode for a non refcounted root.
5935          */
5936         struct btrfs_block_rsv *src_rsv = trans->block_rsv;
5937         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5938
5939         /*
5940          * We need to hold space in order to delete our orphan item once we've
5941          * added it, so this takes the reservation so we can release it later
5942          * when we are truly done with the orphan item.
5943          */
5944         u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
5945
5946         trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
5947                         num_bytes, 1);
5948         return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
5949 }
5950
5951 void btrfs_orphan_release_metadata(struct btrfs_inode *inode)
5952 {
5953         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
5954         struct btrfs_root *root = inode->root;
5955         u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
5956
5957         trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
5958                         num_bytes, 0);
5959         btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
5960 }
5961
5962 /*
5963  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5964  * root: the root of the parent directory
5965  * rsv: block reservation
5966  * items: the number of items that we need do reservation
5967  * qgroup_reserved: used to return the reserved size in qgroup
5968  *
5969  * This function is used to reserve the space for snapshot/subvolume
5970  * creation and deletion. Those operations are different with the
5971  * common file/directory operations, they change two fs/file trees
5972  * and root tree, the number of items that the qgroup reserves is
5973  * different with the free space reservation. So we can not use
5974  * the space reservation mechanism in start_transaction().
5975  */
5976 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5977                                      struct btrfs_block_rsv *rsv,
5978                                      int items,
5979                                      u64 *qgroup_reserved,
5980                                      bool use_global_rsv)
5981 {
5982         u64 num_bytes;
5983         int ret;
5984         struct btrfs_fs_info *fs_info = root->fs_info;
5985         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5986
5987         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
5988                 /* One for parent inode, two for dir entries */
5989                 num_bytes = 3 * fs_info->nodesize;
5990                 ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
5991                 if (ret)
5992                         return ret;
5993         } else {
5994                 num_bytes = 0;
5995         }
5996
5997         *qgroup_reserved = num_bytes;
5998
5999         num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
6000         rsv->space_info = __find_space_info(fs_info,
6001                                             BTRFS_BLOCK_GROUP_METADATA);
6002         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
6003                                   BTRFS_RESERVE_FLUSH_ALL);
6004
6005         if (ret == -ENOSPC && use_global_rsv)
6006                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
6007
6008         if (ret && *qgroup_reserved)
6009                 btrfs_qgroup_free_meta_prealloc(root, *qgroup_reserved);
6010
6011         return ret;
6012 }
6013
6014 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
6015                                       struct btrfs_block_rsv *rsv)
6016 {
6017         btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
6018 }
6019
6020 static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
6021                                                  struct btrfs_inode *inode)
6022 {
6023         struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
6024         u64 reserve_size = 0;
6025         u64 csum_leaves;
6026         unsigned outstanding_extents;
6027
6028         lockdep_assert_held(&inode->lock);
6029         outstanding_extents = inode->outstanding_extents;
6030         if (outstanding_extents)
6031                 reserve_size = btrfs_calc_trans_metadata_size(fs_info,
6032                                                 outstanding_extents + 1);
6033         csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
6034                                                  inode->csum_bytes);
6035         reserve_size += btrfs_calc_trans_metadata_size(fs_info,
6036                                                        csum_leaves);
6037
6038         spin_lock(&block_rsv->lock);
6039         block_rsv->size = reserve_size;
6040         spin_unlock(&block_rsv->lock);
6041 }
6042
6043 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
6044 {
6045         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
6046         unsigned nr_extents;
6047         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
6048         int ret = 0;
6049         bool delalloc_lock = true;
6050
6051         /* If we are a free space inode we need to not flush since we will be in
6052          * the middle of a transaction commit.  We also don't need the delalloc
6053          * mutex since we won't race with anybody.  We need this mostly to make
6054          * lockdep shut its filthy mouth.
6055          *
6056          * If we have a transaction open (can happen if we call truncate_block
6057          * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
6058          */
6059         if (btrfs_is_free_space_inode(inode)) {
6060                 flush = BTRFS_RESERVE_NO_FLUSH;
6061                 delalloc_lock = false;
6062         } else {
6063                 if (current->journal_info)
6064                         flush = BTRFS_RESERVE_FLUSH_LIMIT;
6065
6066                 if (btrfs_transaction_in_commit(fs_info))
6067                         schedule_timeout(1);
6068         }
6069
6070         if (delalloc_lock)
6071                 mutex_lock(&inode->delalloc_mutex);
6072
6073         num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
6074
6075         /* Add our new extents and calculate the new rsv size. */
6076         spin_lock(&inode->lock);
6077         nr_extents = count_max_extents(num_bytes);
6078         btrfs_mod_outstanding_extents(inode, nr_extents);
6079         inode->csum_bytes += num_bytes;
6080         btrfs_calculate_inode_block_rsv_size(fs_info, inode);
6081         spin_unlock(&inode->lock);
6082
6083         ret = btrfs_inode_rsv_refill(inode, flush);
6084         if (unlikely(ret))
6085                 goto out_fail;
6086
6087         if (delalloc_lock)
6088                 mutex_unlock(&inode->delalloc_mutex);
6089         return 0;
6090
6091 out_fail:
6092         spin_lock(&inode->lock);
6093         nr_extents = count_max_extents(num_bytes);
6094         btrfs_mod_outstanding_extents(inode, -nr_extents);
6095         inode->csum_bytes -= num_bytes;
6096         btrfs_calculate_inode_block_rsv_size(fs_info, inode);
6097         spin_unlock(&inode->lock);
6098
6099         btrfs_inode_rsv_release(inode, true);
6100         if (delalloc_lock)
6101                 mutex_unlock(&inode->delalloc_mutex);
6102         return ret;
6103 }
6104
6105 /**
6106  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
6107  * @inode: the inode to release the reservation for.
6108  * @num_bytes: the number of bytes we are releasing.
6109  * @qgroup_free: free qgroup reservation or convert it to per-trans reservation
6110  *
6111  * This will release the metadata reservation for an inode.  This can be called
6112  * once we complete IO for a given set of bytes to release their metadata
6113  * reservations, or on error for the same reason.
6114  */
6115 void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
6116                                      bool qgroup_free)
6117 {
6118         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
6119
6120         num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
6121         spin_lock(&inode->lock);
6122         inode->csum_bytes -= num_bytes;
6123         btrfs_calculate_inode_block_rsv_size(fs_info, inode);
6124         spin_unlock(&inode->lock);
6125
6126         if (btrfs_is_testing(fs_info))
6127                 return;
6128
6129         btrfs_inode_rsv_release(inode, qgroup_free);
6130 }
6131
6132 /**
6133  * btrfs_delalloc_release_extents - release our outstanding_extents
6134  * @inode: the inode to balance the reservation for.
6135  * @num_bytes: the number of bytes we originally reserved with
6136  * @qgroup_free: do we need to free qgroup meta reservation or convert them.
6137  *
6138  * When we reserve space we increase outstanding_extents for the extents we may
6139  * add.  Once we've set the range as delalloc or created our ordered extents we
6140  * have outstanding_extents to track the real usage, so we use this to free our
6141  * temporarily tracked outstanding_extents.  This _must_ be used in conjunction
6142  * with btrfs_delalloc_reserve_metadata.
6143  */
6144 void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
6145                                     bool qgroup_free)
6146 {
6147         struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
6148         unsigned num_extents;
6149
6150         spin_lock(&inode->lock);
6151         num_extents = count_max_extents(num_bytes);
6152         btrfs_mod_outstanding_extents(inode, -num_extents);
6153         btrfs_calculate_inode_block_rsv_size(fs_info, inode);
6154         spin_unlock(&inode->lock);
6155
6156         if (btrfs_is_testing(fs_info))
6157                 return;
6158
6159         btrfs_inode_rsv_release(inode, qgroup_free);
6160 }
6161
6162 /**
6163  * btrfs_delalloc_reserve_space - reserve data and metadata space for
6164  * delalloc
6165  * @inode: inode we're writing to
6166  * @start: start range we are writing to
6167  * @len: how long the range we are writing to
6168  * @reserved: mandatory parameter, record actually reserved qgroup ranges of
6169  *            current reservation.
6170  *
6171  * This will do the following things
6172  *
6173  * o reserve space in data space info for num bytes
6174  *   and reserve precious corresponding qgroup space
6175  *   (Done in check_data_free_space)
6176  *
6177  * o reserve space for metadata space, based on the number of outstanding
6178  *   extents and how much csums will be needed
6179  *   also reserve metadata space in a per root over-reserve method.
6180  * o add to the inodes->delalloc_bytes
6181  * o add it to the fs_info's delalloc inodes list.
6182  *   (Above 3 all done in delalloc_reserve_metadata)
6183  *
6184  * Return 0 for success
6185  * Return <0 for error(-ENOSPC or -EQUOT)
6186  */
6187 int btrfs_delalloc_reserve_space(struct inode *inode,
6188                         struct extent_changeset **reserved, u64 start, u64 len)
6189 {
6190         int ret;
6191
6192         ret = btrfs_check_data_free_space(inode, reserved, start, len);
6193         if (ret < 0)
6194                 return ret;
6195         ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
6196         if (ret < 0)
6197                 btrfs_free_reserved_data_space(inode, *reserved, start, len);
6198         return ret;
6199 }
6200
6201 /**
6202  * btrfs_delalloc_release_space - release data and metadata space for delalloc
6203  * @inode: inode we're releasing space for
6204  * @start: start position of the space already reserved
6205  * @len: the len of the space already reserved
6206  * @release_bytes: the len of the space we consumed or didn't use
6207  *
6208  * This function will release the metadata space that was not used and will
6209  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
6210  * list if there are no delalloc bytes left.
6211  * Also it will handle the qgroup reserved space.
6212  */
6213 void btrfs_delalloc_release_space(struct inode *inode,
6214                                   struct extent_changeset *reserved,
6215                                   u64 start, u64 len, bool qgroup_free)
6216 {
6217         btrfs_delalloc_release_metadata(BTRFS_I(inode), len, qgroup_free);
6218         btrfs_free_reserved_data_space(inode, reserved, start, len);
6219 }
6220
6221 static int update_block_group(struct btrfs_trans_handle *trans,
6222                               struct btrfs_fs_info *info, u64 bytenr,
6223                               u64 num_bytes, int alloc)
6224 {
6225         struct btrfs_block_group_cache *cache = NULL;
6226         u64 total = num_bytes;
6227         u64 old_val;
6228         u64 byte_in_group;
6229         int factor;
6230
6231         /* block accounting for super block */
6232         spin_lock(&info->delalloc_root_lock);
6233         old_val = btrfs_super_bytes_used(info->super_copy);
6234         if (alloc)
6235                 old_val += num_bytes;
6236         else
6237                 old_val -= num_bytes;
6238         btrfs_set_super_bytes_used(info->super_copy, old_val);
6239         spin_unlock(&info->delalloc_root_lock);
6240
6241         while (total) {
6242                 cache = btrfs_lookup_block_group(info, bytenr);
6243                 if (!cache)
6244                         return -ENOENT;
6245                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
6246                                     BTRFS_BLOCK_GROUP_RAID1 |
6247                                     BTRFS_BLOCK_GROUP_RAID10))
6248                         factor = 2;
6249                 else
6250                         factor = 1;
6251                 /*
6252                  * If this block group has free space cache written out, we
6253                  * need to make sure to load it if we are removing space.  This
6254                  * is because we need the unpinning stage to actually add the
6255                  * space back to the block group, otherwise we will leak space.
6256                  */
6257                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
6258                         cache_block_group(cache, 1);
6259
6260                 byte_in_group = bytenr - cache->key.objectid;
6261                 WARN_ON(byte_in_group > cache->key.offset);
6262
6263                 spin_lock(&cache->space_info->lock);
6264                 spin_lock(&cache->lock);
6265
6266                 if (btrfs_test_opt(info, SPACE_CACHE) &&
6267                     cache->disk_cache_state < BTRFS_DC_CLEAR)
6268                         cache->disk_cache_state = BTRFS_DC_CLEAR;
6269
6270                 old_val = btrfs_block_group_used(&cache->item);
6271                 num_bytes = min(total, cache->key.offset - byte_in_group);
6272                 if (alloc) {
6273                         old_val += num_bytes;
6274                         btrfs_set_block_group_used(&cache->item, old_val);
6275                         cache->reserved -= num_bytes;
6276                         cache->space_info->bytes_reserved -= num_bytes;
6277                         cache->space_info->bytes_used += num_bytes;
6278                         cache->space_info->disk_used += num_bytes * factor;
6279                         spin_unlock(&cache->lock);
6280                         spin_unlock(&cache->space_info->lock);
6281                 } else {
6282                         old_val -= num_bytes;
6283                         btrfs_set_block_group_used(&cache->item, old_val);
6284                         cache->pinned += num_bytes;
6285                         cache->space_info->bytes_pinned += num_bytes;
6286                         cache->space_info->bytes_used -= num_bytes;
6287                         cache->space_info->disk_used -= num_bytes * factor;
6288                         spin_unlock(&cache->lock);
6289                         spin_unlock(&cache->space_info->lock);
6290
6291                         trace_btrfs_space_reservation(info, "pinned",
6292                                                       cache->space_info->flags,
6293                                                       num_bytes, 1);
6294                         percpu_counter_add(&cache->space_info->total_bytes_pinned,
6295                                            num_bytes);
6296                         set_extent_dirty(info->pinned_extents,
6297                                          bytenr, bytenr + num_bytes - 1,
6298                                          GFP_NOFS | __GFP_NOFAIL);
6299                 }
6300
6301                 spin_lock(&trans->transaction->dirty_bgs_lock);
6302                 if (list_empty(&cache->dirty_list)) {
6303                         list_add_tail(&cache->dirty_list,
6304                                       &trans->transaction->dirty_bgs);
6305                                 trans->transaction->num_dirty_bgs++;
6306                         btrfs_get_block_group(cache);
6307                 }
6308                 spin_unlock(&trans->transaction->dirty_bgs_lock);
6309
6310                 /*
6311                  * No longer have used bytes in this block group, queue it for
6312                  * deletion. We do this after adding the block group to the
6313                  * dirty list to avoid races between cleaner kthread and space
6314                  * cache writeout.
6315                  */
6316                 if (!alloc && old_val == 0) {
6317                         spin_lock(&info->unused_bgs_lock);
6318                         if (list_empty(&cache->bg_list)) {
6319                                 btrfs_get_block_group(cache);
6320                                 list_add_tail(&cache->bg_list,
6321                                               &info->unused_bgs);
6322                         }
6323                         spin_unlock(&info->unused_bgs_lock);
6324                 }
6325
6326                 btrfs_put_block_group(cache);
6327                 total -= num_bytes;
6328                 bytenr += num_bytes;
6329         }
6330         return 0;
6331 }
6332
6333 static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
6334 {
6335         struct btrfs_block_group_cache *cache;
6336         u64 bytenr;
6337
6338         spin_lock(&fs_info->block_group_cache_lock);
6339         bytenr = fs_info->first_logical_byte;
6340         spin_unlock(&fs_info->block_group_cache_lock);
6341
6342         if (bytenr < (u64)-1)
6343                 return bytenr;
6344
6345         cache = btrfs_lookup_first_block_group(fs_info, search_start);
6346         if (!cache)
6347                 return 0;
6348
6349         bytenr = cache->key.objectid;
6350         btrfs_put_block_group(cache);
6351
6352         return bytenr;
6353 }
6354
6355 static int pin_down_extent(struct btrfs_fs_info *fs_info,
6356                            struct btrfs_block_group_cache *cache,
6357                            u64 bytenr, u64 num_bytes, int reserved)
6358 {
6359         spin_lock(&cache->space_info->lock);
6360         spin_lock(&cache->lock);
6361         cache->pinned += num_bytes;
6362         cache->space_info->bytes_pinned += num_bytes;
6363         if (reserved) {
6364                 cache->reserved -= num_bytes;
6365                 cache->space_info->bytes_reserved -= num_bytes;
6366         }
6367         spin_unlock(&cache->lock);
6368         spin_unlock(&cache->space_info->lock);
6369
6370         trace_btrfs_space_reservation(fs_info, "pinned",
6371                                       cache->space_info->flags, num_bytes, 1);
6372         percpu_counter_add(&cache->space_info->total_bytes_pinned, num_bytes);
6373         set_extent_dirty(fs_info->pinned_extents, bytenr,
6374                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6375         return 0;
6376 }
6377
6378 /*
6379  * this function must be called within transaction
6380  */
6381 int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
6382                      u64 bytenr, u64 num_bytes, int reserved)
6383 {
6384         struct btrfs_block_group_cache *cache;
6385
6386         cache = btrfs_lookup_block_group(fs_info, bytenr);
6387         BUG_ON(!cache); /* Logic error */
6388
6389         pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
6390
6391         btrfs_put_block_group(cache);
6392         return 0;
6393 }
6394
6395 /*
6396  * this function must be called within transaction
6397  */
6398 int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
6399                                     u64 bytenr, u64 num_bytes)
6400 {
6401         struct btrfs_block_group_cache *cache;
6402         int ret;
6403
6404         cache = btrfs_lookup_block_group(fs_info, bytenr);
6405         if (!cache)
6406                 return -EINVAL;
6407
6408         /*
6409          * pull in the free space cache (if any) so that our pin
6410          * removes the free space from the cache.  We have load_only set
6411          * to one because the slow code to read in the free extents does check
6412          * the pinned extents.
6413          */
6414         cache_block_group(cache, 1);
6415
6416         pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
6417
6418         /* remove us from the free space cache (if we're there at all) */
6419         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6420         btrfs_put_block_group(cache);
6421         return ret;
6422 }
6423
6424 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
6425                                    u64 start, u64 num_bytes)
6426 {
6427         int ret;
6428         struct btrfs_block_group_cache *block_group;
6429         struct btrfs_caching_control *caching_ctl;
6430
6431         block_group = btrfs_lookup_block_group(fs_info, start);
6432         if (!block_group)
6433                 return -EINVAL;
6434
6435         cache_block_group(block_group, 0);
6436         caching_ctl = get_caching_control(block_group);
6437
6438         if (!caching_ctl) {
6439                 /* Logic error */
6440                 BUG_ON(!block_group_cache_done(block_group));
6441                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6442         } else {
6443                 mutex_lock(&caching_ctl->mutex);
6444
6445                 if (start >= caching_ctl->progress) {
6446                         ret = add_excluded_extent(fs_info, start, num_bytes);
6447                 } else if (start + num_bytes <= caching_ctl->progress) {
6448                         ret = btrfs_remove_free_space(block_group,
6449                                                       start, num_bytes);
6450                 } else {
6451                         num_bytes = caching_ctl->progress - start;
6452                         ret = btrfs_remove_free_space(block_group,
6453                                                       start, num_bytes);
6454                         if (ret)
6455                                 goto out_lock;
6456
6457                         num_bytes = (start + num_bytes) -
6458                                 caching_ctl->progress;
6459                         start = caching_ctl->progress;
6460                         ret = add_excluded_extent(fs_info, start, num_bytes);
6461                 }
6462 out_lock:
6463                 mutex_unlock(&caching_ctl->mutex);
6464                 put_caching_control(caching_ctl);
6465         }
6466         btrfs_put_block_group(block_group);
6467         return ret;
6468 }
6469
6470 int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
6471                                  struct extent_buffer *eb)
6472 {
6473         struct btrfs_file_extent_item *item;
6474         struct btrfs_key key;
6475         int found_type;
6476         int i;
6477
6478         if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
6479                 return 0;
6480
6481         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6482                 btrfs_item_key_to_cpu(eb, &key, i);
6483                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6484                         continue;
6485                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6486                 found_type = btrfs_file_extent_type(eb, item);
6487                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6488                         continue;
6489                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6490                         continue;
6491                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6492                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6493                 __exclude_logged_extent(fs_info, key.objectid, key.offset);
6494         }
6495
6496         return 0;
6497 }
6498
6499 static void
6500 btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
6501 {
6502         atomic_inc(&bg->reservations);
6503 }
6504
6505 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6506                                         const u64 start)
6507 {
6508         struct btrfs_block_group_cache *bg;
6509
6510         bg = btrfs_lookup_block_group(fs_info, start);
6511         ASSERT(bg);
6512         if (atomic_dec_and_test(&bg->reservations))
6513                 wake_up_var(&bg->reservations);
6514         btrfs_put_block_group(bg);
6515 }
6516
6517 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6518 {
6519         struct btrfs_space_info *space_info = bg->space_info;
6520
6521         ASSERT(bg->ro);
6522
6523         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
6524                 return;
6525
6526         /*
6527          * Our block group is read only but before we set it to read only,
6528          * some task might have had allocated an extent from it already, but it
6529          * has not yet created a respective ordered extent (and added it to a
6530          * root's list of ordered extents).
6531          * Therefore wait for any task currently allocating extents, since the
6532          * block group's reservations counter is incremented while a read lock
6533          * on the groups' semaphore is held and decremented after releasing
6534          * the read access on that semaphore and creating the ordered extent.
6535          */
6536         down_write(&space_info->groups_sem);
6537         up_write(&space_info->groups_sem);
6538
6539         wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
6540 }
6541
6542 /**
6543  * btrfs_add_reserved_bytes - update the block_group and space info counters
6544  * @cache:      The cache we are manipulating
6545  * @ram_bytes:  The number of bytes of file content, and will be same to
6546  *              @num_bytes except for the compress path.
6547  * @num_bytes:  The number of bytes in question
6548  * @delalloc:   The blocks are allocated for the delalloc write
6549  *
6550  * This is called by the allocator when it reserves space. If this is a
6551  * reservation and the block group has become read only we cannot make the
6552  * reservation and return -EAGAIN, otherwise this function always succeeds.
6553  */
6554 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
6555                                     u64 ram_bytes, u64 num_bytes, int delalloc)
6556 {
6557         struct btrfs_space_info *space_info = cache->space_info;
6558         int ret = 0;
6559
6560         spin_lock(&space_info->lock);
6561         spin_lock(&cache->lock);
6562         if (cache->ro) {
6563                 ret = -EAGAIN;
6564         } else {
6565                 cache->reserved += num_bytes;
6566                 space_info->bytes_reserved += num_bytes;
6567
6568                 trace_btrfs_space_reservation(cache->fs_info,
6569                                 "space_info", space_info->flags,
6570                                 ram_bytes, 0);
6571                 space_info->bytes_may_use -= ram_bytes;
6572                 if (delalloc)
6573                         cache->delalloc_bytes += num_bytes;
6574         }
6575         spin_unlock(&cache->lock);
6576         spin_unlock(&space_info->lock);
6577         return ret;
6578 }
6579
6580 /**
6581  * btrfs_free_reserved_bytes - update the block_group and space info counters
6582  * @cache:      The cache we are manipulating
6583  * @num_bytes:  The number of bytes in question
6584  * @delalloc:   The blocks are allocated for the delalloc write
6585  *
6586  * This is called by somebody who is freeing space that was never actually used
6587  * on disk.  For example if you reserve some space for a new leaf in transaction
6588  * A and before transaction A commits you free that leaf, you call this with
6589  * reserve set to 0 in order to clear the reservation.
6590  */
6591
6592 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
6593                                      u64 num_bytes, int delalloc)
6594 {
6595         struct btrfs_space_info *space_info = cache->space_info;
6596         int ret = 0;
6597
6598         spin_lock(&space_info->lock);
6599         spin_lock(&cache->lock);
6600         if (cache->ro)
6601                 space_info->bytes_readonly += num_bytes;
6602         cache->reserved -= num_bytes;
6603         space_info->bytes_reserved -= num_bytes;
6604
6605         if (delalloc)
6606                 cache->delalloc_bytes -= num_bytes;
6607         spin_unlock(&cache->lock);
6608         spin_unlock(&space_info->lock);
6609         return ret;
6610 }
6611 void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
6612 {
6613         struct btrfs_caching_control *next;
6614         struct btrfs_caching_control *caching_ctl;
6615         struct btrfs_block_group_cache *cache;
6616
6617         down_write(&fs_info->commit_root_sem);
6618
6619         list_for_each_entry_safe(caching_ctl, next,
6620                                  &fs_info->caching_block_groups, list) {
6621                 cache = caching_ctl->block_group;
6622                 if (block_group_cache_done(cache)) {
6623                         cache->last_byte_to_unpin = (u64)-1;
6624                         list_del_init(&caching_ctl->list);
6625                         put_caching_control(caching_ctl);
6626                 } else {
6627                         cache->last_byte_to_unpin = caching_ctl->progress;
6628                 }
6629         }
6630
6631         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6632                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6633         else
6634                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6635
6636         up_write(&fs_info->commit_root_sem);
6637
6638         update_global_block_rsv(fs_info);
6639 }
6640
6641 /*
6642  * Returns the free cluster for the given space info and sets empty_cluster to
6643  * what it should be based on the mount options.
6644  */
6645 static struct btrfs_free_cluster *
6646 fetch_cluster_info(struct btrfs_fs_info *fs_info,
6647                    struct btrfs_space_info *space_info, u64 *empty_cluster)
6648 {
6649         struct btrfs_free_cluster *ret = NULL;
6650
6651         *empty_cluster = 0;
6652         if (btrfs_mixed_space_info(space_info))
6653                 return ret;
6654
6655         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6656                 ret = &fs_info->meta_alloc_cluster;
6657                 if (btrfs_test_opt(fs_info, SSD))
6658                         *empty_cluster = SZ_2M;
6659                 else
6660                         *empty_cluster = SZ_64K;
6661         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
6662                    btrfs_test_opt(fs_info, SSD_SPREAD)) {
6663                 *empty_cluster = SZ_2M;
6664                 ret = &fs_info->data_alloc_cluster;
6665         }
6666
6667         return ret;
6668 }
6669
6670 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
6671                               u64 start, u64 end,
6672                               const bool return_free_space)
6673 {
6674         struct btrfs_block_group_cache *cache = NULL;
6675         struct btrfs_space_info *space_info;
6676         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6677         struct btrfs_free_cluster *cluster = NULL;
6678         u64 len;
6679         u64 total_unpinned = 0;
6680         u64 empty_cluster = 0;
6681         bool readonly;
6682
6683         while (start <= end) {
6684                 readonly = false;
6685                 if (!cache ||
6686                     start >= cache->key.objectid + cache->key.offset) {
6687                         if (cache)
6688                                 btrfs_put_block_group(cache);
6689                         total_unpinned = 0;
6690                         cache = btrfs_lookup_block_group(fs_info, start);
6691                         BUG_ON(!cache); /* Logic error */
6692
6693                         cluster = fetch_cluster_info(fs_info,
6694                                                      cache->space_info,
6695                                                      &empty_cluster);
6696                         empty_cluster <<= 1;
6697                 }
6698
6699                 len = cache->key.objectid + cache->key.offset - start;
6700                 len = min(len, end + 1 - start);
6701
6702                 if (start < cache->last_byte_to_unpin) {
6703                         len = min(len, cache->last_byte_to_unpin - start);
6704                         if (return_free_space)
6705                                 btrfs_add_free_space(cache, start, len);
6706                 }
6707
6708                 start += len;
6709                 total_unpinned += len;
6710                 space_info = cache->space_info;
6711
6712                 /*
6713                  * If this space cluster has been marked as fragmented and we've
6714                  * unpinned enough in this block group to potentially allow a
6715                  * cluster to be created inside of it go ahead and clear the
6716                  * fragmented check.
6717                  */
6718                 if (cluster && cluster->fragmented &&
6719                     total_unpinned > empty_cluster) {
6720                         spin_lock(&cluster->lock);
6721                         cluster->fragmented = 0;
6722                         spin_unlock(&cluster->lock);
6723                 }
6724
6725                 spin_lock(&space_info->lock);
6726                 spin_lock(&cache->lock);
6727                 cache->pinned -= len;
6728                 space_info->bytes_pinned -= len;
6729
6730                 trace_btrfs_space_reservation(fs_info, "pinned",
6731                                               space_info->flags, len, 0);
6732                 space_info->max_extent_size = 0;
6733                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6734                 if (cache->ro) {
6735                         space_info->bytes_readonly += len;
6736                         readonly = true;
6737                 }
6738                 spin_unlock(&cache->lock);
6739                 if (!readonly && return_free_space &&
6740                     global_rsv->space_info == space_info) {
6741                         u64 to_add = len;
6742
6743                         spin_lock(&global_rsv->lock);
6744                         if (!global_rsv->full) {
6745                                 to_add = min(len, global_rsv->size -
6746                                              global_rsv->reserved);
6747                                 global_rsv->reserved += to_add;
6748                                 space_info->bytes_may_use += to_add;
6749                                 if (global_rsv->reserved >= global_rsv->size)
6750                                         global_rsv->full = 1;
6751                                 trace_btrfs_space_reservation(fs_info,
6752                                                               "space_info",
6753                                                               space_info->flags,
6754                                                               to_add, 1);
6755                                 len -= to_add;
6756                         }
6757                         spin_unlock(&global_rsv->lock);
6758                         /* Add to any tickets we may have */
6759                         if (len)
6760                                 space_info_add_new_bytes(fs_info, space_info,
6761                                                          len);
6762                 }
6763                 spin_unlock(&space_info->lock);
6764         }
6765
6766         if (cache)
6767                 btrfs_put_block_group(cache);
6768         return 0;
6769 }
6770
6771 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
6772 {
6773         struct btrfs_fs_info *fs_info = trans->fs_info;
6774         struct btrfs_block_group_cache *block_group, *tmp;
6775         struct list_head *deleted_bgs;
6776         struct extent_io_tree *unpin;
6777         u64 start;
6778         u64 end;
6779         int ret;
6780
6781         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6782                 unpin = &fs_info->freed_extents[1];
6783         else
6784                 unpin = &fs_info->freed_extents[0];
6785
6786         while (!trans->aborted) {
6787                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6788                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6789                                             EXTENT_DIRTY, NULL);
6790                 if (ret) {
6791                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6792                         break;
6793                 }
6794
6795                 if (btrfs_test_opt(fs_info, DISCARD))
6796                         ret = btrfs_discard_extent(fs_info, start,
6797                                                    end + 1 - start, NULL);
6798
6799                 clear_extent_dirty(unpin, start, end);
6800                 unpin_extent_range(fs_info, start, end, true);
6801                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6802                 cond_resched();
6803         }
6804
6805         /*
6806          * Transaction is finished.  We don't need the lock anymore.  We
6807          * do need to clean up the block groups in case of a transaction
6808          * abort.
6809          */
6810         deleted_bgs = &trans->transaction->deleted_bgs;
6811         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6812                 u64 trimmed = 0;
6813
6814                 ret = -EROFS;
6815                 if (!trans->aborted)
6816                         ret = btrfs_discard_extent(fs_info,
6817                                                    block_group->key.objectid,
6818                                                    block_group->key.offset,
6819                                                    &trimmed);
6820
6821                 list_del_init(&block_group->bg_list);
6822                 btrfs_put_block_group_trimming(block_group);
6823                 btrfs_put_block_group(block_group);
6824
6825                 if (ret) {
6826                         const char *errstr = btrfs_decode_error(ret);
6827                         btrfs_warn(fs_info,
6828                            "discard failed while removing blockgroup: errno=%d %s",
6829                                    ret, errstr);
6830                 }
6831         }
6832
6833         return 0;
6834 }
6835
6836 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6837                                 struct btrfs_fs_info *info,
6838                                 struct btrfs_delayed_ref_node *node, u64 parent,
6839                                 u64 root_objectid, u64 owner_objectid,
6840                                 u64 owner_offset, int refs_to_drop,
6841                                 struct btrfs_delayed_extent_op *extent_op)
6842 {
6843         struct btrfs_key key;
6844         struct btrfs_path *path;
6845         struct btrfs_root *extent_root = info->extent_root;
6846         struct extent_buffer *leaf;
6847         struct btrfs_extent_item *ei;
6848         struct btrfs_extent_inline_ref *iref;
6849         int ret;
6850         int is_data;
6851         int extent_slot = 0;
6852         int found_extent = 0;
6853         int num_to_del = 1;
6854         u32 item_size;
6855         u64 refs;
6856         u64 bytenr = node->bytenr;
6857         u64 num_bytes = node->num_bytes;
6858         int last_ref = 0;
6859         bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
6860
6861         path = btrfs_alloc_path();
6862         if (!path)
6863                 return -ENOMEM;
6864
6865         path->reada = READA_FORWARD;
6866         path->leave_spinning = 1;
6867
6868         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6869         BUG_ON(!is_data && refs_to_drop != 1);
6870
6871         if (is_data)
6872                 skinny_metadata = false;
6873
6874         ret = lookup_extent_backref(trans, info, path, &iref,
6875                                     bytenr, num_bytes, parent,
6876                                     root_objectid, owner_objectid,
6877                                     owner_offset);
6878         if (ret == 0) {
6879                 extent_slot = path->slots[0];
6880                 while (extent_slot >= 0) {
6881                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6882                                               extent_slot);
6883                         if (key.objectid != bytenr)
6884                                 break;
6885                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6886                             key.offset == num_bytes) {
6887                                 found_extent = 1;
6888                                 break;
6889                         }
6890                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6891                             key.offset == owner_objectid) {
6892                                 found_extent = 1;
6893                                 break;
6894                         }
6895                         if (path->slots[0] - extent_slot > 5)
6896                                 break;
6897                         extent_slot--;
6898                 }
6899 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6900                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6901                 if (found_extent && item_size < sizeof(*ei))
6902                         found_extent = 0;
6903 #endif
6904                 if (!found_extent) {
6905                         BUG_ON(iref);
6906                         ret = remove_extent_backref(trans, info, path, NULL,
6907                                                     refs_to_drop,
6908                                                     is_data, &last_ref);
6909                         if (ret) {
6910                                 btrfs_abort_transaction(trans, ret);
6911                                 goto out;
6912                         }
6913                         btrfs_release_path(path);
6914                         path->leave_spinning = 1;
6915
6916                         key.objectid = bytenr;
6917                         key.type = BTRFS_EXTENT_ITEM_KEY;
6918                         key.offset = num_bytes;
6919
6920                         if (!is_data && skinny_metadata) {
6921                                 key.type = BTRFS_METADATA_ITEM_KEY;
6922                                 key.offset = owner_objectid;
6923                         }
6924
6925                         ret = btrfs_search_slot(trans, extent_root,
6926                                                 &key, path, -1, 1);
6927                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6928                                 /*
6929                                  * Couldn't find our skinny metadata item,
6930                                  * see if we have ye olde extent item.
6931                                  */
6932                                 path->slots[0]--;
6933                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6934                                                       path->slots[0]);
6935                                 if (key.objectid == bytenr &&
6936                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6937                                     key.offset == num_bytes)
6938                                         ret = 0;
6939                         }
6940
6941                         if (ret > 0 && skinny_metadata) {
6942                                 skinny_metadata = false;
6943                                 key.objectid = bytenr;
6944                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6945                                 key.offset = num_bytes;
6946                                 btrfs_release_path(path);
6947                                 ret = btrfs_search_slot(trans, extent_root,
6948                                                         &key, path, -1, 1);
6949                         }
6950
6951                         if (ret) {
6952                                 btrfs_err(info,
6953                                           "umm, got %d back from search, was looking for %llu",
6954                                           ret, bytenr);
6955                                 if (ret > 0)
6956                                         btrfs_print_leaf(path->nodes[0]);
6957                         }
6958                         if (ret < 0) {
6959                                 btrfs_abort_transaction(trans, ret);
6960                                 goto out;
6961                         }
6962                         extent_slot = path->slots[0];
6963                 }
6964         } else if (WARN_ON(ret == -ENOENT)) {
6965                 btrfs_print_leaf(path->nodes[0]);
6966                 btrfs_err(info,
6967                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6968                         bytenr, parent, root_objectid, owner_objectid,
6969                         owner_offset);
6970                 btrfs_abort_transaction(trans, ret);
6971                 goto out;
6972         } else {
6973                 btrfs_abort_transaction(trans, ret);
6974                 goto out;
6975         }
6976
6977         leaf = path->nodes[0];
6978         item_size = btrfs_item_size_nr(leaf, extent_slot);
6979 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6980         if (item_size < sizeof(*ei)) {
6981                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6982                 ret = convert_extent_item_v0(trans, info, path, owner_objectid,
6983                                              0);
6984                 if (ret < 0) {
6985                         btrfs_abort_transaction(trans, ret);
6986                         goto out;
6987                 }
6988
6989                 btrfs_release_path(path);
6990                 path->leave_spinning = 1;
6991
6992                 key.objectid = bytenr;
6993                 key.type = BTRFS_EXTENT_ITEM_KEY;
6994                 key.offset = num_bytes;
6995
6996                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6997                                         -1, 1);
6998                 if (ret) {
6999                         btrfs_err(info,
7000                                   "umm, got %d back from search, was looking for %llu",
7001                                 ret, bytenr);
7002                         btrfs_print_leaf(path->nodes[0]);
7003                 }
7004                 if (ret < 0) {
7005                         btrfs_abort_transaction(trans, ret);
7006                         goto out;
7007                 }
7008
7009                 extent_slot = path->slots[0];
7010                 leaf = path->nodes[0];
7011                 item_size = btrfs_item_size_nr(leaf, extent_slot);
7012         }
7013 #endif
7014         BUG_ON(item_size < sizeof(*ei));
7015         ei = btrfs_item_ptr(leaf, extent_slot,
7016                             struct btrfs_extent_item);
7017         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
7018             key.type == BTRFS_EXTENT_ITEM_KEY) {
7019                 struct btrfs_tree_block_info *bi;
7020                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
7021                 bi = (struct btrfs_tree_block_info *)(ei + 1);
7022                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
7023         }
7024
7025         refs = btrfs_extent_refs(leaf, ei);
7026         if (refs < refs_to_drop) {
7027                 btrfs_err(info,
7028                           "trying to drop %d refs but we only have %Lu for bytenr %Lu",
7029                           refs_to_drop, refs, bytenr);
7030                 ret = -EINVAL;
7031                 btrfs_abort_transaction(trans, ret);
7032                 goto out;
7033         }
7034         refs -= refs_to_drop;
7035
7036         if (refs > 0) {
7037                 if (extent_op)
7038                         __run_delayed_extent_op(extent_op, leaf, ei);
7039                 /*
7040                  * In the case of inline back ref, reference count will
7041                  * be updated by remove_extent_backref
7042                  */
7043                 if (iref) {
7044                         BUG_ON(!found_extent);
7045                 } else {
7046                         btrfs_set_extent_refs(leaf, ei, refs);
7047                         btrfs_mark_buffer_dirty(leaf);
7048                 }
7049                 if (found_extent) {
7050                         ret = remove_extent_backref(trans, info, path,
7051                                                     iref, refs_to_drop,
7052                                                     is_data, &last_ref);
7053                         if (ret) {
7054                                 btrfs_abort_transaction(trans, ret);
7055                                 goto out;
7056                         }
7057                 }
7058         } else {
7059                 if (found_extent) {
7060                         BUG_ON(is_data && refs_to_drop !=
7061                                extent_data_ref_count(path, iref));
7062                         if (iref) {
7063                                 BUG_ON(path->slots[0] != extent_slot);
7064                         } else {
7065                                 BUG_ON(path->slots[0] != extent_slot + 1);
7066                                 path->slots[0] = extent_slot;
7067                                 num_to_del = 2;
7068                         }
7069                 }
7070
7071                 last_ref = 1;
7072                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
7073                                       num_to_del);
7074                 if (ret) {
7075                         btrfs_abort_transaction(trans, ret);
7076                         goto out;
7077                 }
7078                 btrfs_release_path(path);
7079
7080                 if (is_data) {
7081                         ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
7082                         if (ret) {
7083                                 btrfs_abort_transaction(trans, ret);
7084                                 goto out;
7085                         }
7086                 }
7087
7088                 ret = add_to_free_space_tree(trans, info, bytenr, num_bytes);
7089                 if (ret) {
7090                         btrfs_abort_transaction(trans, ret);
7091                         goto out;
7092                 }
7093
7094                 ret = update_block_group(trans, info, bytenr, num_bytes, 0);
7095                 if (ret) {
7096                         btrfs_abort_transaction(trans, ret);
7097                         goto out;
7098                 }
7099         }
7100         btrfs_release_path(path);
7101
7102 out:
7103         btrfs_free_path(path);
7104         return ret;
7105 }
7106
7107 /*
7108  * when we free an block, it is possible (and likely) that we free the last
7109  * delayed ref for that extent as well.  This searches the delayed ref tree for
7110  * a given extent, and if there are no other delayed refs to be processed, it
7111  * removes it from the tree.
7112  */
7113 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
7114                                       u64 bytenr)
7115 {
7116         struct btrfs_delayed_ref_head *head;
7117         struct btrfs_delayed_ref_root *delayed_refs;
7118         int ret = 0;
7119
7120         delayed_refs = &trans->transaction->delayed_refs;
7121         spin_lock(&delayed_refs->lock);
7122         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
7123         if (!head)
7124                 goto out_delayed_unlock;
7125
7126         spin_lock(&head->lock);
7127         if (!RB_EMPTY_ROOT(&head->ref_tree))
7128                 goto out;
7129
7130         if (head->extent_op) {
7131                 if (!head->must_insert_reserved)
7132                         goto out;
7133                 btrfs_free_delayed_extent_op(head->extent_op);
7134                 head->extent_op = NULL;
7135         }
7136
7137         /*
7138          * waiting for the lock here would deadlock.  If someone else has it
7139          * locked they are already in the process of dropping it anyway
7140          */
7141         if (!mutex_trylock(&head->mutex))
7142                 goto out;
7143
7144         /*
7145          * at this point we have a head with no other entries.  Go
7146          * ahead and process it.
7147          */
7148         rb_erase(&head->href_node, &delayed_refs->href_root);
7149         RB_CLEAR_NODE(&head->href_node);
7150         atomic_dec(&delayed_refs->num_entries);
7151
7152         /*
7153          * we don't take a ref on the node because we're removing it from the
7154          * tree, so we just steal the ref the tree was holding.
7155          */
7156         delayed_refs->num_heads--;
7157         if (head->processing == 0)
7158                 delayed_refs->num_heads_ready--;
7159         head->processing = 0;
7160         spin_unlock(&head->lock);
7161         spin_unlock(&delayed_refs->lock);
7162
7163         BUG_ON(head->extent_op);
7164         if (head->must_insert_reserved)
7165                 ret = 1;
7166
7167         mutex_unlock(&head->mutex);
7168         btrfs_put_delayed_ref_head(head);
7169         return ret;
7170 out:
7171         spin_unlock(&head->lock);
7172
7173 out_delayed_unlock:
7174         spin_unlock(&delayed_refs->lock);
7175         return 0;
7176 }
7177
7178 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
7179                            struct btrfs_root *root,
7180                            struct extent_buffer *buf,
7181                            u64 parent, int last_ref)
7182 {
7183         struct btrfs_fs_info *fs_info = root->fs_info;
7184         int pin = 1;
7185         int ret;
7186
7187         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7188                 int old_ref_mod, new_ref_mod;
7189
7190                 btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
7191                                    root->root_key.objectid,
7192                                    btrfs_header_level(buf), 0,
7193                                    BTRFS_DROP_DELAYED_REF);
7194                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
7195                                                  buf->len, parent,
7196                                                  root->root_key.objectid,
7197                                                  btrfs_header_level(buf),
7198                                                  BTRFS_DROP_DELAYED_REF, NULL,
7199                                                  &old_ref_mod, &new_ref_mod);
7200                 BUG_ON(ret); /* -ENOMEM */
7201                 pin = old_ref_mod >= 0 && new_ref_mod < 0;
7202         }
7203
7204         if (last_ref && btrfs_header_generation(buf) == trans->transid) {
7205                 struct btrfs_block_group_cache *cache;
7206
7207                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7208                         ret = check_ref_cleanup(trans, buf->start);
7209                         if (!ret)
7210                                 goto out;
7211                 }
7212
7213                 pin = 0;
7214                 cache = btrfs_lookup_block_group(fs_info, buf->start);
7215
7216                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
7217                         pin_down_extent(fs_info, cache, buf->start,
7218                                         buf->len, 1);
7219                         btrfs_put_block_group(cache);
7220                         goto out;
7221                 }
7222
7223                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
7224
7225                 btrfs_add_free_space(cache, buf->start, buf->len);
7226                 btrfs_free_reserved_bytes(cache, buf->len, 0);
7227                 btrfs_put_block_group(cache);
7228                 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
7229         }
7230 out:
7231         if (pin)
7232                 add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
7233                                  root->root_key.objectid);
7234
7235         if (last_ref) {
7236                 /*
7237                  * Deleting the buffer, clear the corrupt flag since it doesn't
7238                  * matter anymore.
7239                  */
7240                 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
7241         }
7242 }
7243
7244 /* Can return -ENOMEM */
7245 int btrfs_free_extent(struct btrfs_trans_handle *trans,
7246                       struct btrfs_root *root,
7247                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
7248                       u64 owner, u64 offset)
7249 {
7250         struct btrfs_fs_info *fs_info = root->fs_info;
7251         int old_ref_mod, new_ref_mod;
7252         int ret;
7253
7254         if (btrfs_is_testing(fs_info))
7255                 return 0;
7256
7257         if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
7258                 btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
7259                                    root_objectid, owner, offset,
7260                                    BTRFS_DROP_DELAYED_REF);
7261
7262         /*
7263          * tree log blocks never actually go into the extent allocation
7264          * tree, just update pinning info and exit early.
7265          */
7266         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
7267                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
7268                 /* unlocks the pinned mutex */
7269                 btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
7270                 old_ref_mod = new_ref_mod = 0;
7271                 ret = 0;
7272         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
7273                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
7274                                                  num_bytes, parent,
7275                                                  root_objectid, (int)owner,
7276                                                  BTRFS_DROP_DELAYED_REF, NULL,
7277                                                  &old_ref_mod, &new_ref_mod);
7278         } else {
7279                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
7280                                                  num_bytes, parent,
7281                                                  root_objectid, owner, offset,
7282                                                  0, BTRFS_DROP_DELAYED_REF,
7283                                                  &old_ref_mod, &new_ref_mod);
7284         }
7285
7286         if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
7287                 add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
7288
7289         return ret;
7290 }
7291
7292 /*
7293  * when we wait for progress in the block group caching, its because
7294  * our allocation attempt failed at least once.  So, we must sleep
7295  * and let some progress happen before we try again.
7296  *
7297  * This function will sleep at least once waiting for new free space to
7298  * show up, and then it will check the block group free space numbers
7299  * for our min num_bytes.  Another option is to have it go ahead
7300  * and look in the rbtree for a free extent of a given size, but this
7301  * is a good start.
7302  *
7303  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
7304  * any of the information in this block group.
7305  */
7306 static noinline void
7307 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
7308                                 u64 num_bytes)
7309 {
7310         struct btrfs_caching_control *caching_ctl;
7311
7312         caching_ctl = get_caching_control(cache);
7313         if (!caching_ctl)
7314                 return;
7315
7316         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
7317                    (cache->free_space_ctl->free_space >= num_bytes));
7318
7319         put_caching_control(caching_ctl);
7320 }
7321
7322 static noinline int
7323 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
7324 {
7325         struct btrfs_caching_control *caching_ctl;
7326         int ret = 0;
7327
7328         caching_ctl = get_caching_control(cache);
7329         if (!caching_ctl)
7330                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
7331
7332         wait_event(caching_ctl->wait, block_group_cache_done(cache));
7333         if (cache->cached == BTRFS_CACHE_ERROR)
7334                 ret = -EIO;
7335         put_caching_control(caching_ctl);
7336         return ret;
7337 }
7338
7339 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
7340         [BTRFS_RAID_RAID10]     = "raid10",
7341         [BTRFS_RAID_RAID1]      = "raid1",
7342         [BTRFS_RAID_DUP]        = "dup",
7343         [BTRFS_RAID_RAID0]      = "raid0",
7344         [BTRFS_RAID_SINGLE]     = "single",
7345         [BTRFS_RAID_RAID5]      = "raid5",
7346         [BTRFS_RAID_RAID6]      = "raid6",
7347 };
7348
7349 static const char *get_raid_name(enum btrfs_raid_types type)
7350 {
7351         if (type >= BTRFS_NR_RAID_TYPES)
7352                 return NULL;
7353
7354         return btrfs_raid_type_names[type];
7355 }
7356
7357 enum btrfs_loop_type {
7358         LOOP_CACHING_NOWAIT = 0,
7359         LOOP_CACHING_WAIT = 1,
7360         LOOP_ALLOC_CHUNK = 2,
7361         LOOP_NO_EMPTY_SIZE = 3,
7362 };
7363
7364 static inline void
7365 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7366                        int delalloc)
7367 {
7368         if (delalloc)
7369                 down_read(&cache->data_rwsem);
7370 }
7371
7372 static inline void
7373 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7374                        int delalloc)
7375 {
7376         btrfs_get_block_group(cache);
7377         if (delalloc)
7378                 down_read(&cache->data_rwsem);
7379 }
7380
7381 static struct btrfs_block_group_cache *
7382 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7383                    struct btrfs_free_cluster *cluster,
7384                    int delalloc)
7385 {
7386         struct btrfs_block_group_cache *used_bg = NULL;
7387
7388         spin_lock(&cluster->refill_lock);
7389         while (1) {
7390                 used_bg = cluster->block_group;
7391                 if (!used_bg)
7392                         return NULL;
7393
7394                 if (used_bg == block_group)
7395                         return used_bg;
7396
7397                 btrfs_get_block_group(used_bg);
7398
7399                 if (!delalloc)
7400                         return used_bg;
7401
7402                 if (down_read_trylock(&used_bg->data_rwsem))
7403                         return used_bg;
7404
7405                 spin_unlock(&cluster->refill_lock);
7406
7407                 /* We should only have one-level nested. */
7408                 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
7409
7410                 spin_lock(&cluster->refill_lock);
7411                 if (used_bg == cluster->block_group)
7412                         return used_bg;
7413
7414                 up_read(&used_bg->data_rwsem);
7415                 btrfs_put_block_group(used_bg);
7416         }
7417 }
7418
7419 static inline void
7420 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7421                          int delalloc)
7422 {
7423         if (delalloc)
7424                 up_read(&cache->data_rwsem);
7425         btrfs_put_block_group(cache);
7426 }
7427
7428 /*
7429  * walks the btree of allocated extents and find a hole of a given size.
7430  * The key ins is changed to record the hole:
7431  * ins->objectid == start position
7432  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7433  * ins->offset == the size of the hole.
7434  * Any available blocks before search_start are skipped.
7435  *
7436  * If there is no suitable free space, we will record the max size of
7437  * the free space extent currently.
7438  */
7439 static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
7440                                 u64 ram_bytes, u64 num_bytes, u64 empty_size,
7441                                 u64 hint_byte, struct btrfs_key *ins,
7442                                 u64 flags, int delalloc)
7443 {
7444         int ret = 0;
7445         struct btrfs_root *root = fs_info->extent_root;
7446         struct btrfs_free_cluster *last_ptr = NULL;
7447         struct btrfs_block_group_cache *block_group = NULL;
7448         u64 search_start = 0;
7449         u64 max_extent_size = 0;
7450         u64 empty_cluster = 0;
7451         struct btrfs_space_info *space_info;
7452         int loop = 0;
7453         int index = btrfs_bg_flags_to_raid_index(flags);
7454         bool failed_cluster_refill = false;
7455         bool failed_alloc = false;
7456         bool use_cluster = true;
7457         bool have_caching_bg = false;
7458         bool orig_have_caching_bg = false;
7459         bool full_search = false;
7460
7461         WARN_ON(num_bytes < fs_info->sectorsize);
7462         ins->type = BTRFS_EXTENT_ITEM_KEY;
7463         ins->objectid = 0;
7464         ins->offset = 0;
7465
7466         trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
7467
7468         space_info = __find_space_info(fs_info, flags);
7469         if (!space_info) {
7470                 btrfs_err(fs_info, "No space info for %llu", flags);
7471                 return -ENOSPC;
7472         }
7473
7474         /*
7475          * If our free space is heavily fragmented we may not be able to make
7476          * big contiguous allocations, so instead of doing the expensive search
7477          * for free space, simply return ENOSPC with our max_extent_size so we
7478          * can go ahead and search for a more manageable chunk.
7479          *
7480          * If our max_extent_size is large enough for our allocation simply
7481          * disable clustering since we will likely not be able to find enough
7482          * space to create a cluster and induce latency trying.
7483          */
7484         if (unlikely(space_info->max_extent_size)) {
7485                 spin_lock(&space_info->lock);
7486                 if (space_info->max_extent_size &&
7487                     num_bytes > space_info->max_extent_size) {
7488                         ins->offset = space_info->max_extent_size;
7489                         spin_unlock(&space_info->lock);
7490                         return -ENOSPC;
7491                 } else if (space_info->max_extent_size) {
7492                         use_cluster = false;
7493                 }
7494                 spin_unlock(&space_info->lock);
7495         }
7496
7497         last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
7498         if (last_ptr) {
7499                 spin_lock(&last_ptr->lock);
7500                 if (last_ptr->block_group)
7501                         hint_byte = last_ptr->window_start;
7502                 if (last_ptr->fragmented) {
7503                         /*
7504                          * We still set window_start so we can keep track of the
7505                          * last place we found an allocation to try and save
7506                          * some time.
7507                          */
7508                         hint_byte = last_ptr->window_start;
7509                         use_cluster = false;
7510                 }
7511                 spin_unlock(&last_ptr->lock);
7512         }
7513
7514         search_start = max(search_start, first_logical_byte(fs_info, 0));
7515         search_start = max(search_start, hint_byte);
7516         if (search_start == hint_byte) {
7517                 block_group = btrfs_lookup_block_group(fs_info, search_start);
7518                 /*
7519                  * we don't want to use the block group if it doesn't match our
7520                  * allocation bits, or if its not cached.
7521                  *
7522                  * However if we are re-searching with an ideal block group
7523                  * picked out then we don't care that the block group is cached.
7524                  */
7525                 if (block_group && block_group_bits(block_group, flags) &&
7526                     block_group->cached != BTRFS_CACHE_NO) {
7527                         down_read(&space_info->groups_sem);
7528                         if (list_empty(&block_group->list) ||
7529                             block_group->ro) {
7530                                 /*
7531                                  * someone is removing this block group,
7532                                  * we can't jump into the have_block_group
7533                                  * target because our list pointers are not
7534                                  * valid
7535                                  */
7536                                 btrfs_put_block_group(block_group);
7537                                 up_read(&space_info->groups_sem);
7538                         } else {
7539                                 index = btrfs_bg_flags_to_raid_index(
7540                                                 block_group->flags);
7541                                 btrfs_lock_block_group(block_group, delalloc);
7542                                 goto have_block_group;
7543                         }
7544                 } else if (block_group) {
7545                         btrfs_put_block_group(block_group);
7546                 }
7547         }
7548 search:
7549         have_caching_bg = false;
7550         if (index == 0 || index == btrfs_bg_flags_to_raid_index(flags))
7551                 full_search = true;
7552         down_read(&space_info->groups_sem);
7553         list_for_each_entry(block_group, &space_info->block_groups[index],
7554                             list) {
7555                 u64 offset;
7556                 int cached;
7557
7558                 /* If the block group is read-only, we can skip it entirely. */
7559                 if (unlikely(block_group->ro))
7560                         continue;
7561
7562                 btrfs_grab_block_group(block_group, delalloc);
7563                 search_start = block_group->key.objectid;
7564
7565                 /*
7566                  * this can happen if we end up cycling through all the
7567                  * raid types, but we want to make sure we only allocate
7568                  * for the proper type.
7569                  */
7570                 if (!block_group_bits(block_group, flags)) {
7571                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
7572                                 BTRFS_BLOCK_GROUP_RAID1 |
7573                                 BTRFS_BLOCK_GROUP_RAID5 |
7574                                 BTRFS_BLOCK_GROUP_RAID6 |
7575                                 BTRFS_BLOCK_GROUP_RAID10;
7576
7577                         /*
7578                          * if they asked for extra copies and this block group
7579                          * doesn't provide them, bail.  This does allow us to
7580                          * fill raid0 from raid1.
7581                          */
7582                         if ((flags & extra) && !(block_group->flags & extra))
7583                                 goto loop;
7584                 }
7585
7586 have_block_group:
7587                 cached = block_group_cache_done(block_group);
7588                 if (unlikely(!cached)) {
7589                         have_caching_bg = true;
7590                         ret = cache_block_group(block_group, 0);
7591                         BUG_ON(ret < 0);
7592                         ret = 0;
7593                 }
7594
7595                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7596                         goto loop;
7597
7598                 /*
7599                  * Ok we want to try and use the cluster allocator, so
7600                  * lets look there
7601                  */
7602                 if (last_ptr && use_cluster) {
7603                         struct btrfs_block_group_cache *used_block_group;
7604                         unsigned long aligned_cluster;
7605                         /*
7606                          * the refill lock keeps out other
7607                          * people trying to start a new cluster
7608                          */
7609                         used_block_group = btrfs_lock_cluster(block_group,
7610                                                               last_ptr,
7611                                                               delalloc);
7612                         if (!used_block_group)
7613                                 goto refill_cluster;
7614
7615                         if (used_block_group != block_group &&
7616                             (used_block_group->ro ||
7617                              !block_group_bits(used_block_group, flags)))
7618                                 goto release_cluster;
7619
7620                         offset = btrfs_alloc_from_cluster(used_block_group,
7621                                                 last_ptr,
7622                                                 num_bytes,
7623                                                 used_block_group->key.objectid,
7624                                                 &max_extent_size);
7625                         if (offset) {
7626                                 /* we have a block, we're done */
7627                                 spin_unlock(&last_ptr->refill_lock);
7628                                 trace_btrfs_reserve_extent_cluster(fs_info,
7629                                                 used_block_group,
7630                                                 search_start, num_bytes);
7631                                 if (used_block_group != block_group) {
7632                                         btrfs_release_block_group(block_group,
7633                                                                   delalloc);
7634                                         block_group = used_block_group;
7635                                 }
7636                                 goto checks;
7637                         }
7638
7639                         WARN_ON(last_ptr->block_group != used_block_group);
7640 release_cluster:
7641                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7642                          * set up a new clusters, so lets just skip it
7643                          * and let the allocator find whatever block
7644                          * it can find.  If we reach this point, we
7645                          * will have tried the cluster allocator
7646                          * plenty of times and not have found
7647                          * anything, so we are likely way too
7648                          * fragmented for the clustering stuff to find
7649                          * anything.
7650                          *
7651                          * However, if the cluster is taken from the
7652                          * current block group, release the cluster
7653                          * first, so that we stand a better chance of
7654                          * succeeding in the unclustered
7655                          * allocation.  */
7656                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7657                             used_block_group != block_group) {
7658                                 spin_unlock(&last_ptr->refill_lock);
7659                                 btrfs_release_block_group(used_block_group,
7660                                                           delalloc);
7661                                 goto unclustered_alloc;
7662                         }
7663
7664                         /*
7665                          * this cluster didn't work out, free it and
7666                          * start over
7667                          */
7668                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7669
7670                         if (used_block_group != block_group)
7671                                 btrfs_release_block_group(used_block_group,
7672                                                           delalloc);
7673 refill_cluster:
7674                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7675                                 spin_unlock(&last_ptr->refill_lock);
7676                                 goto unclustered_alloc;
7677                         }
7678
7679                         aligned_cluster = max_t(unsigned long,
7680                                                 empty_cluster + empty_size,
7681                                               block_group->full_stripe_len);
7682
7683                         /* allocate a cluster in this block group */
7684                         ret = btrfs_find_space_cluster(fs_info, block_group,
7685                                                        last_ptr, search_start,
7686                                                        num_bytes,
7687                                                        aligned_cluster);
7688                         if (ret == 0) {
7689                                 /*
7690                                  * now pull our allocation out of this
7691                                  * cluster
7692                                  */
7693                                 offset = btrfs_alloc_from_cluster(block_group,
7694                                                         last_ptr,
7695                                                         num_bytes,
7696                                                         search_start,
7697                                                         &max_extent_size);
7698                                 if (offset) {
7699                                         /* we found one, proceed */
7700                                         spin_unlock(&last_ptr->refill_lock);
7701                                         trace_btrfs_reserve_extent_cluster(fs_info,
7702                                                 block_group, search_start,
7703                                                 num_bytes);
7704                                         goto checks;
7705                                 }
7706                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7707                                    && !failed_cluster_refill) {
7708                                 spin_unlock(&last_ptr->refill_lock);
7709
7710                                 failed_cluster_refill = true;
7711                                 wait_block_group_cache_progress(block_group,
7712                                        num_bytes + empty_cluster + empty_size);
7713                                 goto have_block_group;
7714                         }
7715
7716                         /*
7717                          * at this point we either didn't find a cluster
7718                          * or we weren't able to allocate a block from our
7719                          * cluster.  Free the cluster we've been trying
7720                          * to use, and go to the next block group
7721                          */
7722                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7723                         spin_unlock(&last_ptr->refill_lock);
7724                         goto loop;
7725                 }
7726
7727 unclustered_alloc:
7728                 /*
7729                  * We are doing an unclustered alloc, set the fragmented flag so
7730                  * we don't bother trying to setup a cluster again until we get
7731                  * more space.
7732                  */
7733                 if (unlikely(last_ptr)) {
7734                         spin_lock(&last_ptr->lock);
7735                         last_ptr->fragmented = 1;
7736                         spin_unlock(&last_ptr->lock);
7737                 }
7738                 if (cached) {
7739                         struct btrfs_free_space_ctl *ctl =
7740                                 block_group->free_space_ctl;
7741
7742                         spin_lock(&ctl->tree_lock);
7743                         if (ctl->free_space <
7744                             num_bytes + empty_cluster + empty_size) {
7745                                 if (ctl->free_space > max_extent_size)
7746                                         max_extent_size = ctl->free_space;
7747                                 spin_unlock(&ctl->tree_lock);
7748                                 goto loop;
7749                         }
7750                         spin_unlock(&ctl->tree_lock);
7751                 }
7752
7753                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7754                                                     num_bytes, empty_size,
7755                                                     &max_extent_size);
7756                 /*
7757                  * If we didn't find a chunk, and we haven't failed on this
7758                  * block group before, and this block group is in the middle of
7759                  * caching and we are ok with waiting, then go ahead and wait
7760                  * for progress to be made, and set failed_alloc to true.
7761                  *
7762                  * If failed_alloc is true then we've already waited on this
7763                  * block group once and should move on to the next block group.
7764                  */
7765                 if (!offset && !failed_alloc && !cached &&
7766                     loop > LOOP_CACHING_NOWAIT) {
7767                         wait_block_group_cache_progress(block_group,
7768                                                 num_bytes + empty_size);
7769                         failed_alloc = true;
7770                         goto have_block_group;
7771                 } else if (!offset) {
7772                         goto loop;
7773                 }
7774 checks:
7775                 search_start = ALIGN(offset, fs_info->stripesize);
7776
7777                 /* move on to the next group */
7778                 if (search_start + num_bytes >
7779                     block_group->key.objectid + block_group->key.offset) {
7780                         btrfs_add_free_space(block_group, offset, num_bytes);
7781                         goto loop;
7782                 }
7783
7784                 if (offset < search_start)
7785                         btrfs_add_free_space(block_group, offset,
7786                                              search_start - offset);
7787                 BUG_ON(offset > search_start);
7788
7789                 ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
7790                                 num_bytes, delalloc);
7791                 if (ret == -EAGAIN) {
7792                         btrfs_add_free_space(block_group, offset, num_bytes);
7793                         goto loop;
7794                 }
7795                 btrfs_inc_block_group_reservations(block_group);
7796
7797                 /* we are all good, lets return */
7798                 ins->objectid = search_start;
7799                 ins->offset = num_bytes;
7800
7801                 trace_btrfs_reserve_extent(fs_info, block_group,
7802                                            search_start, num_bytes);
7803                 btrfs_release_block_group(block_group, delalloc);
7804                 break;
7805 loop:
7806                 failed_cluster_refill = false;
7807                 failed_alloc = false;
7808                 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
7809                        index);
7810                 btrfs_release_block_group(block_group, delalloc);
7811                 cond_resched();
7812         }
7813         up_read(&space_info->groups_sem);
7814
7815         if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7816                 && !orig_have_caching_bg)
7817                 orig_have_caching_bg = true;
7818
7819         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7820                 goto search;
7821
7822         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7823                 goto search;
7824
7825         /*
7826          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7827          *                      caching kthreads as we move along
7828          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7829          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7830          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7831          *                      again
7832          */
7833         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7834                 index = 0;
7835                 if (loop == LOOP_CACHING_NOWAIT) {
7836                         /*
7837                          * We want to skip the LOOP_CACHING_WAIT step if we
7838                          * don't have any uncached bgs and we've already done a
7839                          * full search through.
7840                          */
7841                         if (orig_have_caching_bg || !full_search)
7842                                 loop = LOOP_CACHING_WAIT;
7843                         else
7844                                 loop = LOOP_ALLOC_CHUNK;
7845                 } else {
7846                         loop++;
7847                 }
7848
7849                 if (loop == LOOP_ALLOC_CHUNK) {
7850                         struct btrfs_trans_handle *trans;
7851                         int exist = 0;
7852
7853                         trans = current->journal_info;
7854                         if (trans)
7855                                 exist = 1;
7856                         else
7857                                 trans = btrfs_join_transaction(root);
7858
7859                         if (IS_ERR(trans)) {
7860                                 ret = PTR_ERR(trans);
7861                                 goto out;
7862                         }
7863
7864                         ret = do_chunk_alloc(trans, fs_info, flags,
7865                                              CHUNK_ALLOC_FORCE);
7866
7867                         /*
7868                          * If we can't allocate a new chunk we've already looped
7869                          * through at least once, move on to the NO_EMPTY_SIZE
7870                          * case.
7871                          */
7872                         if (ret == -ENOSPC)
7873                                 loop = LOOP_NO_EMPTY_SIZE;
7874
7875                         /*
7876                          * Do not bail out on ENOSPC since we
7877                          * can do more things.
7878                          */
7879                         if (ret < 0 && ret != -ENOSPC)
7880                                 btrfs_abort_transaction(trans, ret);
7881                         else
7882                                 ret = 0;
7883                         if (!exist)
7884                                 btrfs_end_transaction(trans);
7885                         if (ret)
7886                                 goto out;
7887                 }
7888
7889                 if (loop == LOOP_NO_EMPTY_SIZE) {
7890                         /*
7891                          * Don't loop again if we already have no empty_size and
7892                          * no empty_cluster.
7893                          */
7894                         if (empty_size == 0 &&
7895                             empty_cluster == 0) {
7896                                 ret = -ENOSPC;
7897                                 goto out;
7898                         }
7899                         empty_size = 0;
7900                         empty_cluster = 0;
7901                 }
7902
7903                 goto search;
7904         } else if (!ins->objectid) {
7905                 ret = -ENOSPC;
7906         } else if (ins->objectid) {
7907                 if (!use_cluster && last_ptr) {
7908                         spin_lock(&last_ptr->lock);
7909                         last_ptr->window_start = ins->objectid;
7910                         spin_unlock(&last_ptr->lock);
7911                 }
7912                 ret = 0;
7913         }
7914 out:
7915         if (ret == -ENOSPC) {
7916                 spin_lock(&space_info->lock);
7917                 space_info->max_extent_size = max_extent_size;
7918                 spin_unlock(&space_info->lock);
7919                 ins->offset = max_extent_size;
7920         }
7921         return ret;
7922 }
7923
7924 static void dump_space_info(struct btrfs_fs_info *fs_info,
7925                             struct btrfs_space_info *info, u64 bytes,
7926                             int dump_block_groups)
7927 {
7928         struct btrfs_block_group_cache *cache;
7929         int index = 0;
7930
7931         spin_lock(&info->lock);
7932         btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
7933                    info->flags,
7934                    info->total_bytes - btrfs_space_info_used(info, true),
7935                    info->full ? "" : "not ");
7936         btrfs_info(fs_info,
7937                 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
7938                 info->total_bytes, info->bytes_used, info->bytes_pinned,
7939                 info->bytes_reserved, info->bytes_may_use,
7940                 info->bytes_readonly);
7941         spin_unlock(&info->lock);
7942
7943         if (!dump_block_groups)
7944                 return;
7945
7946         down_read(&info->groups_sem);
7947 again:
7948         list_for_each_entry(cache, &info->block_groups[index], list) {
7949                 spin_lock(&cache->lock);
7950                 btrfs_info(fs_info,
7951                         "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
7952                         cache->key.objectid, cache->key.offset,
7953                         btrfs_block_group_used(&cache->item), cache->pinned,
7954                         cache->reserved, cache->ro ? "[readonly]" : "");
7955                 btrfs_dump_free_space(cache, bytes);
7956                 spin_unlock(&cache->lock);
7957         }
7958         if (++index < BTRFS_NR_RAID_TYPES)
7959                 goto again;
7960         up_read(&info->groups_sem);
7961 }
7962
7963 /*
7964  * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
7965  *                        hole that is at least as big as @num_bytes.
7966  *
7967  * @root           -    The root that will contain this extent
7968  *
7969  * @ram_bytes      -    The amount of space in ram that @num_bytes take. This
7970  *                      is used for accounting purposes. This value differs
7971  *                      from @num_bytes only in the case of compressed extents.
7972  *
7973  * @num_bytes      -    Number of bytes to allocate on-disk.
7974  *
7975  * @min_alloc_size -    Indicates the minimum amount of space that the
7976  *                      allocator should try to satisfy. In some cases
7977  *                      @num_bytes may be larger than what is required and if
7978  *                      the filesystem is fragmented then allocation fails.
7979  *                      However, the presence of @min_alloc_size gives a
7980  *                      chance to try and satisfy the smaller allocation.
7981  *
7982  * @empty_size     -    A hint that you plan on doing more COW. This is the
7983  *                      size in bytes the allocator should try to find free
7984  *                      next to the block it returns.  This is just a hint and
7985  *                      may be ignored by the allocator.
7986  *
7987  * @hint_byte      -    Hint to the allocator to start searching above the byte
7988  *                      address passed. It might be ignored.
7989  *
7990  * @ins            -    This key is modified to record the found hole. It will
7991  *                      have the following values:
7992  *                      ins->objectid == start position
7993  *                      ins->flags = BTRFS_EXTENT_ITEM_KEY
7994  *                      ins->offset == the size of the hole.
7995  *
7996  * @is_data        -    Boolean flag indicating whether an extent is
7997  *                      allocated for data (true) or metadata (false)
7998  *
7999  * @delalloc       -    Boolean flag indicating whether this allocation is for
8000  *                      delalloc or not. If 'true' data_rwsem of block groups
8001  *                      is going to be acquired.
8002  *
8003  *
8004  * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
8005  * case -ENOSPC is returned then @ins->offset will contain the size of the
8006  * largest available hole the allocator managed to find.
8007  */
8008 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
8009                          u64 num_bytes, u64 min_alloc_size,
8010                          u64 empty_size, u64 hint_byte,
8011                          struct btrfs_key *ins, int is_data, int delalloc)
8012 {
8013         struct btrfs_fs_info *fs_info = root->fs_info;
8014         bool final_tried = num_bytes == min_alloc_size;
8015         u64 flags;
8016         int ret;
8017
8018         flags = get_alloc_profile_by_root(root, is_data);
8019 again:
8020         WARN_ON(num_bytes < fs_info->sectorsize);
8021         ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
8022                                hint_byte, ins, flags, delalloc);
8023         if (!ret && !is_data) {
8024                 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
8025         } else if (ret == -ENOSPC) {
8026                 if (!final_tried && ins->offset) {
8027                         num_bytes = min(num_bytes >> 1, ins->offset);
8028                         num_bytes = round_down(num_bytes,
8029                                                fs_info->sectorsize);
8030                         num_bytes = max(num_bytes, min_alloc_size);
8031                         ram_bytes = num_bytes;
8032                         if (num_bytes == min_alloc_size)
8033                                 final_tried = true;
8034                         goto again;
8035                 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
8036                         struct btrfs_space_info *sinfo;
8037
8038                         sinfo = __find_space_info(fs_info, flags);
8039                         btrfs_err(fs_info,
8040                                   "allocation failed flags %llu, wanted %llu",
8041                                   flags, num_bytes);
8042                         if (sinfo)
8043                                 dump_space_info(fs_info, sinfo, num_bytes, 1);
8044                 }
8045         }
8046
8047         return ret;
8048 }
8049
8050 static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
8051                                         u64 start, u64 len,
8052                                         int pin, int delalloc)
8053 {
8054         struct btrfs_block_group_cache *cache;
8055         int ret = 0;
8056
8057         cache = btrfs_lookup_block_group(fs_info, start);
8058         if (!cache) {
8059                 btrfs_err(fs_info, "Unable to find block group for %llu",
8060                           start);
8061                 return -ENOSPC;
8062         }
8063
8064         if (pin)
8065                 pin_down_extent(fs_info, cache, start, len, 1);
8066         else {
8067                 if (btrfs_test_opt(fs_info, DISCARD))
8068                         ret = btrfs_discard_extent(fs_info, start, len, NULL);
8069                 btrfs_add_free_space(cache, start, len);
8070                 btrfs_free_reserved_bytes(cache, len, delalloc);
8071                 trace_btrfs_reserved_extent_free(fs_info, start, len);
8072         }
8073
8074         btrfs_put_block_group(cache);
8075         return ret;
8076 }
8077
8078 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
8079                                u64 start, u64 len, int delalloc)
8080 {
8081         return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
8082 }
8083
8084 int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
8085                                        u64 start, u64 len)
8086 {
8087         return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
8088 }
8089
8090 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8091                                       struct btrfs_fs_info *fs_info,
8092                                       u64 parent, u64 root_objectid,
8093                                       u64 flags, u64 owner, u64 offset,
8094                                       struct btrfs_key *ins, int ref_mod)
8095 {
8096         int ret;
8097         struct btrfs_extent_item *extent_item;
8098         struct btrfs_extent_inline_ref *iref;
8099         struct btrfs_path *path;
8100         struct extent_buffer *leaf;
8101         int type;
8102         u32 size;
8103
8104         if (parent > 0)
8105                 type = BTRFS_SHARED_DATA_REF_KEY;
8106         else
8107                 type = BTRFS_EXTENT_DATA_REF_KEY;
8108
8109         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
8110
8111         path = btrfs_alloc_path();
8112         if (!path)
8113                 return -ENOMEM;
8114
8115         path->leave_spinning = 1;
8116         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8117                                       ins, size);
8118         if (ret) {
8119                 btrfs_free_path(path);
8120                 return ret;
8121         }
8122
8123         leaf = path->nodes[0];
8124         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8125                                      struct btrfs_extent_item);
8126         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
8127         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8128         btrfs_set_extent_flags(leaf, extent_item,
8129                                flags | BTRFS_EXTENT_FLAG_DATA);
8130
8131         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8132         btrfs_set_extent_inline_ref_type(leaf, iref, type);
8133         if (parent > 0) {
8134                 struct btrfs_shared_data_ref *ref;
8135                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
8136                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8137                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
8138         } else {
8139                 struct btrfs_extent_data_ref *ref;
8140                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
8141                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
8142                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
8143                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
8144                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
8145         }
8146
8147         btrfs_mark_buffer_dirty(path->nodes[0]);
8148         btrfs_free_path(path);
8149
8150         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8151                                           ins->offset);
8152         if (ret)
8153                 return ret;
8154
8155         ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
8156         if (ret) { /* -ENOENT, logic error */
8157                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8158                         ins->objectid, ins->offset);
8159                 BUG();
8160         }
8161         trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
8162         return ret;
8163 }
8164
8165 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
8166                                      struct btrfs_fs_info *fs_info,
8167                                      u64 parent, u64 root_objectid,
8168                                      u64 flags, struct btrfs_disk_key *key,
8169                                      int level, struct btrfs_key *ins)
8170 {
8171         int ret;
8172         struct btrfs_extent_item *extent_item;
8173         struct btrfs_tree_block_info *block_info;
8174         struct btrfs_extent_inline_ref *iref;
8175         struct btrfs_path *path;
8176         struct extent_buffer *leaf;
8177         u32 size = sizeof(*extent_item) + sizeof(*iref);
8178         u64 num_bytes = ins->offset;
8179         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
8180
8181         if (!skinny_metadata)
8182                 size += sizeof(*block_info);
8183
8184         path = btrfs_alloc_path();
8185         if (!path) {
8186                 btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
8187                                                    fs_info->nodesize);
8188                 return -ENOMEM;
8189         }
8190
8191         path->leave_spinning = 1;
8192         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8193                                       ins, size);
8194         if (ret) {
8195                 btrfs_free_path(path);
8196                 btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
8197                                                    fs_info->nodesize);
8198                 return ret;
8199         }
8200
8201         leaf = path->nodes[0];
8202         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8203                                      struct btrfs_extent_item);
8204         btrfs_set_extent_refs(leaf, extent_item, 1);
8205         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8206         btrfs_set_extent_flags(leaf, extent_item,
8207                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
8208
8209         if (skinny_metadata) {
8210                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8211                 num_bytes = fs_info->nodesize;
8212         } else {
8213                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
8214                 btrfs_set_tree_block_key(leaf, block_info, key);
8215                 btrfs_set_tree_block_level(leaf, block_info, level);
8216                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
8217         }
8218
8219         if (parent > 0) {
8220                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
8221                 btrfs_set_extent_inline_ref_type(leaf, iref,
8222                                                  BTRFS_SHARED_BLOCK_REF_KEY);
8223                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8224         } else {
8225                 btrfs_set_extent_inline_ref_type(leaf, iref,
8226                                                  BTRFS_TREE_BLOCK_REF_KEY);
8227                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
8228         }
8229
8230         btrfs_mark_buffer_dirty(leaf);
8231         btrfs_free_path(path);
8232
8233         ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
8234                                           num_bytes);
8235         if (ret)
8236                 return ret;
8237
8238         ret = update_block_group(trans, fs_info, ins->objectid,
8239                                  fs_info->nodesize, 1);
8240         if (ret) { /* -ENOENT, logic error */
8241                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8242                         ins->objectid, ins->offset);
8243                 BUG();
8244         }
8245
8246         trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid,
8247                                           fs_info->nodesize);
8248         return ret;
8249 }
8250
8251 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8252                                      struct btrfs_root *root, u64 owner,
8253                                      u64 offset, u64 ram_bytes,
8254                                      struct btrfs_key *ins)
8255 {
8256         struct btrfs_fs_info *fs_info = root->fs_info;
8257         int ret;
8258
8259         BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
8260
8261         btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
8262                            root->root_key.objectid, owner, offset,
8263                            BTRFS_ADD_DELAYED_EXTENT);
8264
8265         ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
8266                                          ins->offset, 0,
8267                                          root->root_key.objectid, owner,
8268                                          offset, ram_bytes,
8269                                          BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
8270         return ret;
8271 }
8272
8273 /*
8274  * this is used by the tree logging recovery code.  It records that
8275  * an extent has been allocated and makes sure to clear the free
8276  * space cache bits as well
8277  */
8278 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8279                                    struct btrfs_fs_info *fs_info,
8280                                    u64 root_objectid, u64 owner, u64 offset,
8281                                    struct btrfs_key *ins)
8282 {
8283         int ret;
8284         struct btrfs_block_group_cache *block_group;
8285         struct btrfs_space_info *space_info;
8286
8287         /*
8288          * Mixed block groups will exclude before processing the log so we only
8289          * need to do the exclude dance if this fs isn't mixed.
8290          */
8291         if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
8292                 ret = __exclude_logged_extent(fs_info, ins->objectid,
8293                                               ins->offset);
8294                 if (ret)
8295                         return ret;
8296         }
8297
8298         block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
8299         if (!block_group)
8300                 return -EINVAL;
8301
8302         space_info = block_group->space_info;
8303         spin_lock(&space_info->lock);
8304         spin_lock(&block_group->lock);
8305         space_info->bytes_reserved += ins->offset;
8306         block_group->reserved += ins->offset;
8307         spin_unlock(&block_group->lock);
8308         spin_unlock(&space_info->lock);
8309
8310         ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
8311                                          0, owner, offset, ins, 1);
8312         btrfs_put_block_group(block_group);
8313         return ret;
8314 }
8315
8316 static struct extent_buffer *
8317 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8318                       u64 bytenr, int level)
8319 {
8320         struct btrfs_fs_info *fs_info = root->fs_info;
8321         struct extent_buffer *buf;
8322
8323         buf = btrfs_find_create_tree_block(fs_info, bytenr);
8324         if (IS_ERR(buf))
8325                 return buf;
8326
8327         btrfs_set_header_generation(buf, trans->transid);
8328         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8329         btrfs_tree_lock(buf);
8330         clean_tree_block(fs_info, buf);
8331         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
8332
8333         btrfs_set_lock_blocking(buf);
8334         set_extent_buffer_uptodate(buf);
8335
8336         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8337                 buf->log_index = root->log_transid % 2;
8338                 /*
8339                  * we allow two log transactions at a time, use different
8340                  * EXENT bit to differentiate dirty pages.
8341                  */
8342                 if (buf->log_index == 0)
8343                         set_extent_dirty(&root->dirty_log_pages, buf->start,
8344                                         buf->start + buf->len - 1, GFP_NOFS);
8345                 else
8346                         set_extent_new(&root->dirty_log_pages, buf->start,
8347                                         buf->start + buf->len - 1);
8348         } else {
8349                 buf->log_index = -1;
8350                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8351                          buf->start + buf->len - 1, GFP_NOFS);
8352         }
8353         trans->dirty = true;
8354         /* this returns a buffer locked for blocking */
8355         return buf;
8356 }
8357
8358 static struct btrfs_block_rsv *
8359 use_block_rsv(struct btrfs_trans_handle *trans,
8360               struct btrfs_root *root, u32 blocksize)
8361 {
8362         struct btrfs_fs_info *fs_info = root->fs_info;
8363         struct btrfs_block_rsv *block_rsv;
8364         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
8365         int ret;
8366         bool global_updated = false;
8367
8368         block_rsv = get_block_rsv(trans, root);
8369
8370         if (unlikely(block_rsv->size == 0))
8371                 goto try_reserve;
8372 again:
8373         ret = block_rsv_use_bytes(block_rsv, blocksize);
8374         if (!ret)
8375                 return block_rsv;
8376
8377         if (block_rsv->failfast)
8378                 return ERR_PTR(ret);
8379
8380         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
8381                 global_updated = true;
8382                 update_global_block_rsv(fs_info);
8383                 goto again;
8384         }
8385
8386         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
8387                 static DEFINE_RATELIMIT_STATE(_rs,
8388                                 DEFAULT_RATELIMIT_INTERVAL * 10,
8389                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
8390                 if (__ratelimit(&_rs))
8391                         WARN(1, KERN_DEBUG
8392                                 "BTRFS: block rsv returned %d\n", ret);
8393         }
8394 try_reserve:
8395         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
8396                                      BTRFS_RESERVE_NO_FLUSH);
8397         if (!ret)
8398                 return block_rsv;
8399         /*
8400          * If we couldn't reserve metadata bytes try and use some from
8401          * the global reserve if its space type is the same as the global
8402          * reservation.
8403          */
8404         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
8405             block_rsv->space_info == global_rsv->space_info) {
8406                 ret = block_rsv_use_bytes(global_rsv, blocksize);
8407                 if (!ret)
8408                         return global_rsv;
8409         }
8410         return ERR_PTR(ret);
8411 }
8412
8413 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8414                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
8415 {
8416         block_rsv_add_bytes(block_rsv, blocksize, 0);
8417         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
8418 }
8419
8420 /*
8421  * finds a free extent and does all the dirty work required for allocation
8422  * returns the tree buffer or an ERR_PTR on error.
8423  */
8424 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8425                                              struct btrfs_root *root,
8426                                              u64 parent, u64 root_objectid,
8427                                              const struct btrfs_disk_key *key,
8428                                              int level, u64 hint,
8429                                              u64 empty_size)
8430 {
8431         struct btrfs_fs_info *fs_info = root->fs_info;
8432         struct btrfs_key ins;
8433         struct btrfs_block_rsv *block_rsv;
8434         struct extent_buffer *buf;
8435         struct btrfs_delayed_extent_op *extent_op;
8436         u64 flags = 0;
8437         int ret;
8438         u32 blocksize = fs_info->nodesize;
8439         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
8440
8441 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8442         if (btrfs_is_testing(fs_info)) {
8443                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8444                                             level);
8445                 if (!IS_ERR(buf))
8446                         root->alloc_bytenr += blocksize;
8447                 return buf;
8448         }
8449 #endif
8450
8451         block_rsv = use_block_rsv(trans, root, blocksize);
8452         if (IS_ERR(block_rsv))
8453                 return ERR_CAST(block_rsv);
8454
8455         ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
8456                                    empty_size, hint, &ins, 0, 0);
8457         if (ret)
8458                 goto out_unuse;
8459
8460         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
8461         if (IS_ERR(buf)) {
8462                 ret = PTR_ERR(buf);
8463                 goto out_free_reserved;
8464         }
8465
8466         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8467                 if (parent == 0)
8468                         parent = ins.objectid;
8469                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8470         } else
8471                 BUG_ON(parent > 0);
8472
8473         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8474                 extent_op = btrfs_alloc_delayed_extent_op();
8475                 if (!extent_op) {
8476                         ret = -ENOMEM;
8477                         goto out_free_buf;
8478                 }
8479                 if (key)
8480                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
8481                 else
8482                         memset(&extent_op->key, 0, sizeof(extent_op->key));
8483                 extent_op->flags_to_set = flags;
8484                 extent_op->update_key = skinny_metadata ? false : true;
8485                 extent_op->update_flags = true;
8486                 extent_op->is_data = false;
8487                 extent_op->level = level;
8488
8489                 btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
8490                                    root_objectid, level, 0,
8491                                    BTRFS_ADD_DELAYED_EXTENT);
8492                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
8493                                                  ins.offset, parent,
8494                                                  root_objectid, level,
8495                                                  BTRFS_ADD_DELAYED_EXTENT,
8496                                                  extent_op, NULL, NULL);
8497                 if (ret)
8498                         goto out_free_delayed;
8499         }
8500         return buf;
8501
8502 out_free_delayed:
8503         btrfs_free_delayed_extent_op(extent_op);
8504 out_free_buf:
8505         free_extent_buffer(buf);
8506 out_free_reserved:
8507         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
8508 out_unuse:
8509         unuse_block_rsv(fs_info, block_rsv, blocksize);
8510         return ERR_PTR(ret);
8511 }
8512
8513 struct walk_control {
8514         u64 refs[BTRFS_MAX_LEVEL];
8515         u64 flags[BTRFS_MAX_LEVEL];
8516         struct btrfs_key update_progress;
8517         int stage;
8518         int level;
8519         int shared_level;
8520         int update_ref;
8521         int keep_locks;
8522         int reada_slot;
8523         int reada_count;
8524         int for_reloc;
8525 };
8526
8527 #define DROP_REFERENCE  1
8528 #define UPDATE_BACKREF  2
8529
8530 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8531                                      struct btrfs_root *root,
8532                                      struct walk_control *wc,
8533                                      struct btrfs_path *path)
8534 {
8535         struct btrfs_fs_info *fs_info = root->fs_info;
8536         u64 bytenr;
8537         u64 generation;
8538         u64 refs;
8539         u64 flags;
8540         u32 nritems;
8541         struct btrfs_key key;
8542         struct extent_buffer *eb;
8543         int ret;
8544         int slot;
8545         int nread = 0;
8546
8547         if (path->slots[wc->level] < wc->reada_slot) {
8548                 wc->reada_count = wc->reada_count * 2 / 3;
8549                 wc->reada_count = max(wc->reada_count, 2);
8550         } else {
8551                 wc->reada_count = wc->reada_count * 3 / 2;
8552                 wc->reada_count = min_t(int, wc->reada_count,
8553                                         BTRFS_NODEPTRS_PER_BLOCK(fs_info));
8554         }
8555
8556         eb = path->nodes[wc->level];
8557         nritems = btrfs_header_nritems(eb);
8558
8559         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8560                 if (nread >= wc->reada_count)
8561                         break;
8562
8563                 cond_resched();
8564                 bytenr = btrfs_node_blockptr(eb, slot);
8565                 generation = btrfs_node_ptr_generation(eb, slot);
8566
8567                 if (slot == path->slots[wc->level])
8568                         goto reada;
8569
8570                 if (wc->stage == UPDATE_BACKREF &&
8571                     generation <= root->root_key.offset)
8572                         continue;
8573
8574                 /* We don't lock the tree block, it's OK to be racy here */
8575                 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
8576                                                wc->level - 1, 1, &refs,
8577                                                &flags);
8578                 /* We don't care about errors in readahead. */
8579                 if (ret < 0)
8580                         continue;
8581                 BUG_ON(refs == 0);
8582
8583                 if (wc->stage == DROP_REFERENCE) {
8584                         if (refs == 1)
8585                                 goto reada;
8586
8587                         if (wc->level == 1 &&
8588                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8589                                 continue;
8590                         if (!wc->update_ref ||
8591                             generation <= root->root_key.offset)
8592                                 continue;
8593                         btrfs_node_key_to_cpu(eb, &key, slot);
8594                         ret = btrfs_comp_cpu_keys(&key,
8595                                                   &wc->update_progress);
8596                         if (ret < 0)
8597                                 continue;
8598                 } else {
8599                         if (wc->level == 1 &&
8600                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8601                                 continue;
8602                 }
8603 reada:
8604                 readahead_tree_block(fs_info, bytenr);
8605                 nread++;
8606         }
8607         wc->reada_slot = slot;
8608 }
8609
8610 /*
8611  * helper to process tree block while walking down the tree.
8612  *
8613  * when wc->stage == UPDATE_BACKREF, this function updates
8614  * back refs for pointers in the block.
8615  *
8616  * NOTE: return value 1 means we should stop walking down.
8617  */
8618 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8619                                    struct btrfs_root *root,
8620                                    struct btrfs_path *path,
8621                                    struct walk_control *wc, int lookup_info)
8622 {
8623         struct btrfs_fs_info *fs_info = root->fs_info;
8624         int level = wc->level;
8625         struct extent_buffer *eb = path->nodes[level];
8626         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8627         int ret;
8628
8629         if (wc->stage == UPDATE_BACKREF &&
8630             btrfs_header_owner(eb) != root->root_key.objectid)
8631                 return 1;
8632
8633         /*
8634          * when reference count of tree block is 1, it won't increase
8635          * again. once full backref flag is set, we never clear it.
8636          */
8637         if (lookup_info &&
8638             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8639              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8640                 BUG_ON(!path->locks[level]);
8641                 ret = btrfs_lookup_extent_info(trans, fs_info,
8642                                                eb->start, level, 1,
8643                                                &wc->refs[level],
8644                                                &wc->flags[level]);
8645                 BUG_ON(ret == -ENOMEM);
8646                 if (ret)
8647                         return ret;
8648                 BUG_ON(wc->refs[level] == 0);
8649         }
8650
8651         if (wc->stage == DROP_REFERENCE) {
8652                 if (wc->refs[level] > 1)
8653                         return 1;
8654
8655                 if (path->locks[level] && !wc->keep_locks) {
8656                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8657                         path->locks[level] = 0;
8658                 }
8659                 return 0;
8660         }
8661
8662         /* wc->stage == UPDATE_BACKREF */
8663         if (!(wc->flags[level] & flag)) {
8664                 BUG_ON(!path->locks[level]);
8665                 ret = btrfs_inc_ref(trans, root, eb, 1);
8666                 BUG_ON(ret); /* -ENOMEM */
8667                 ret = btrfs_dec_ref(trans, root, eb, 0);
8668                 BUG_ON(ret); /* -ENOMEM */
8669                 ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
8670                                                   eb->len, flag,
8671                                                   btrfs_header_level(eb), 0);
8672                 BUG_ON(ret); /* -ENOMEM */
8673                 wc->flags[level] |= flag;
8674         }
8675
8676         /*
8677          * the block is shared by multiple trees, so it's not good to
8678          * keep the tree lock
8679          */
8680         if (path->locks[level] && level > 0) {
8681                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8682                 path->locks[level] = 0;
8683         }
8684         return 0;
8685 }
8686
8687 /*
8688  * helper to process tree block pointer.
8689  *
8690  * when wc->stage == DROP_REFERENCE, this function checks
8691  * reference count of the block pointed to. if the block
8692  * is shared and we need update back refs for the subtree
8693  * rooted at the block, this function changes wc->stage to
8694  * UPDATE_BACKREF. if the block is shared and there is no
8695  * need to update back, this function drops the reference
8696  * to the block.
8697  *
8698  * NOTE: return value 1 means we should stop walking down.
8699  */
8700 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8701                                  struct btrfs_root *root,
8702                                  struct btrfs_path *path,
8703                                  struct walk_control *wc, int *lookup_info)
8704 {
8705         struct btrfs_fs_info *fs_info = root->fs_info;
8706         u64 bytenr;
8707         u64 generation;
8708         u64 parent;
8709         u32 blocksize;
8710         struct btrfs_key key;
8711         struct btrfs_key first_key;
8712         struct extent_buffer *next;
8713         int level = wc->level;
8714         int reada = 0;
8715         int ret = 0;
8716         bool need_account = false;
8717
8718         generation = btrfs_node_ptr_generation(path->nodes[level],
8719                                                path->slots[level]);
8720         /*
8721          * if the lower level block was created before the snapshot
8722          * was created, we know there is no need to update back refs
8723          * for the subtree
8724          */
8725         if (wc->stage == UPDATE_BACKREF &&
8726             generation <= root->root_key.offset) {
8727                 *lookup_info = 1;
8728                 return 1;
8729         }
8730
8731         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8732         btrfs_node_key_to_cpu(path->nodes[level], &first_key,
8733                               path->slots[level]);
8734         blocksize = fs_info->nodesize;
8735
8736         next = find_extent_buffer(fs_info, bytenr);
8737         if (!next) {
8738                 next = btrfs_find_create_tree_block(fs_info, bytenr);
8739                 if (IS_ERR(next))
8740                         return PTR_ERR(next);
8741
8742                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8743                                                level - 1);
8744                 reada = 1;
8745         }
8746         btrfs_tree_lock(next);
8747         btrfs_set_lock_blocking(next);
8748
8749         ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
8750                                        &wc->refs[level - 1],
8751                                        &wc->flags[level - 1]);
8752         if (ret < 0)
8753                 goto out_unlock;
8754
8755         if (unlikely(wc->refs[level - 1] == 0)) {
8756                 btrfs_err(fs_info, "Missing references.");
8757                 ret = -EIO;
8758                 goto out_unlock;
8759         }
8760         *lookup_info = 0;
8761
8762         if (wc->stage == DROP_REFERENCE) {
8763                 if (wc->refs[level - 1] > 1) {
8764                         need_account = true;
8765                         if (level == 1 &&
8766                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8767                                 goto skip;
8768
8769                         if (!wc->update_ref ||
8770                             generation <= root->root_key.offset)
8771                                 goto skip;
8772
8773                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8774                                               path->slots[level]);
8775                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8776                         if (ret < 0)
8777                                 goto skip;
8778
8779                         wc->stage = UPDATE_BACKREF;
8780                         wc->shared_level = level - 1;
8781                 }
8782         } else {
8783                 if (level == 1 &&
8784                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8785                         goto skip;
8786         }
8787
8788         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8789                 btrfs_tree_unlock(next);
8790                 free_extent_buffer(next);
8791                 next = NULL;
8792                 *lookup_info = 1;
8793         }
8794
8795         if (!next) {
8796                 if (reada && level == 1)
8797                         reada_walk_down(trans, root, wc, path);
8798                 next = read_tree_block(fs_info, bytenr, generation, level - 1,
8799                                        &first_key);
8800                 if (IS_ERR(next)) {
8801                         return PTR_ERR(next);
8802                 } else if (!extent_buffer_uptodate(next)) {
8803                         free_extent_buffer(next);
8804                         return -EIO;
8805                 }
8806                 btrfs_tree_lock(next);
8807                 btrfs_set_lock_blocking(next);
8808         }
8809
8810         level--;
8811         ASSERT(level == btrfs_header_level(next));
8812         if (level != btrfs_header_level(next)) {
8813                 btrfs_err(root->fs_info, "mismatched level");
8814                 ret = -EIO;
8815                 goto out_unlock;
8816         }
8817         path->nodes[level] = next;
8818         path->slots[level] = 0;
8819         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8820         wc->level = level;
8821         if (wc->level == 1)
8822                 wc->reada_slot = 0;
8823         return 0;
8824 skip:
8825         wc->refs[level - 1] = 0;
8826         wc->flags[level - 1] = 0;
8827         if (wc->stage == DROP_REFERENCE) {
8828                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8829                         parent = path->nodes[level]->start;
8830                 } else {
8831                         ASSERT(root->root_key.objectid ==
8832                                btrfs_header_owner(path->nodes[level]));
8833                         if (root->root_key.objectid !=
8834                             btrfs_header_owner(path->nodes[level])) {
8835                                 btrfs_err(root->fs_info,
8836                                                 "mismatched block owner");
8837                                 ret = -EIO;
8838                                 goto out_unlock;
8839                         }
8840                         parent = 0;
8841                 }
8842
8843                 if (need_account) {
8844                         ret = btrfs_qgroup_trace_subtree(trans, root, next,
8845                                                          generation, level - 1);
8846                         if (ret) {
8847                                 btrfs_err_rl(fs_info,
8848                                              "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
8849                                              ret);
8850                         }
8851                 }
8852                 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
8853                                         parent, root->root_key.objectid,
8854                                         level - 1, 0);
8855                 if (ret)
8856                         goto out_unlock;
8857         }
8858
8859         *lookup_info = 1;
8860         ret = 1;
8861
8862 out_unlock:
8863         btrfs_tree_unlock(next);
8864         free_extent_buffer(next);
8865
8866         return ret;
8867 }
8868
8869 /*
8870  * helper to process tree block while walking up the tree.
8871  *
8872  * when wc->stage == DROP_REFERENCE, this function drops
8873  * reference count on the block.
8874  *
8875  * when wc->stage == UPDATE_BACKREF, this function changes
8876  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8877  * to UPDATE_BACKREF previously while processing the block.
8878  *
8879  * NOTE: return value 1 means we should stop walking up.
8880  */
8881 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8882                                  struct btrfs_root *root,
8883                                  struct btrfs_path *path,
8884                                  struct walk_control *wc)
8885 {
8886         struct btrfs_fs_info *fs_info = root->fs_info;
8887         int ret;
8888         int level = wc->level;
8889         struct extent_buffer *eb = path->nodes[level];
8890         u64 parent = 0;
8891
8892         if (wc->stage == UPDATE_BACKREF) {
8893                 BUG_ON(wc->shared_level < level);
8894                 if (level < wc->shared_level)
8895                         goto out;
8896
8897                 ret = find_next_key(path, level + 1, &wc->update_progress);
8898                 if (ret > 0)
8899                         wc->update_ref = 0;
8900
8901                 wc->stage = DROP_REFERENCE;
8902                 wc->shared_level = -1;
8903                 path->slots[level] = 0;
8904
8905                 /*
8906                  * check reference count again if the block isn't locked.
8907                  * we should start walking down the tree again if reference
8908                  * count is one.
8909                  */
8910                 if (!path->locks[level]) {
8911                         BUG_ON(level == 0);
8912                         btrfs_tree_lock(eb);
8913                         btrfs_set_lock_blocking(eb);
8914                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8915
8916                         ret = btrfs_lookup_extent_info(trans, fs_info,
8917                                                        eb->start, level, 1,
8918                                                        &wc->refs[level],
8919                                                        &wc->flags[level]);
8920                         if (ret < 0) {
8921                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8922                                 path->locks[level] = 0;
8923                                 return ret;
8924                         }
8925                         BUG_ON(wc->refs[level] == 0);
8926                         if (wc->refs[level] == 1) {
8927                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8928                                 path->locks[level] = 0;
8929                                 return 1;
8930                         }
8931                 }
8932         }
8933
8934         /* wc->stage == DROP_REFERENCE */
8935         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8936
8937         if (wc->refs[level] == 1) {
8938                 if (level == 0) {
8939                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8940                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8941                         else
8942                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8943                         BUG_ON(ret); /* -ENOMEM */
8944                         ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
8945                         if (ret) {
8946                                 btrfs_err_rl(fs_info,
8947                                              "error %d accounting leaf items. Quota is out of sync, rescan required.",
8948                                              ret);
8949                         }
8950                 }
8951                 /* make block locked assertion in clean_tree_block happy */
8952                 if (!path->locks[level] &&
8953                     btrfs_header_generation(eb) == trans->transid) {
8954                         btrfs_tree_lock(eb);
8955                         btrfs_set_lock_blocking(eb);
8956                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8957                 }
8958                 clean_tree_block(fs_info, eb);
8959         }
8960
8961         if (eb == root->node) {
8962                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8963                         parent = eb->start;
8964                 else
8965                         BUG_ON(root->root_key.objectid !=
8966                                btrfs_header_owner(eb));
8967         } else {
8968                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8969                         parent = path->nodes[level + 1]->start;
8970                 else
8971                         BUG_ON(root->root_key.objectid !=
8972                                btrfs_header_owner(path->nodes[level + 1]));
8973         }
8974
8975         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8976 out:
8977         wc->refs[level] = 0;
8978         wc->flags[level] = 0;
8979         return 0;
8980 }
8981
8982 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8983                                    struct btrfs_root *root,
8984                                    struct btrfs_path *path,
8985                                    struct walk_control *wc)
8986 {
8987         int level = wc->level;
8988         int lookup_info = 1;
8989         int ret;
8990
8991         while (level >= 0) {
8992                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8993                 if (ret > 0)
8994                         break;
8995
8996                 if (level == 0)
8997                         break;
8998
8999                 if (path->slots[level] >=
9000                     btrfs_header_nritems(path->nodes[level]))
9001                         break;
9002
9003                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
9004                 if (ret > 0) {
9005                         path->slots[level]++;
9006                         continue;
9007                 } else if (ret < 0)
9008                         return ret;
9009                 level = wc->level;
9010         }
9011         return 0;
9012 }
9013
9014 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
9015                                  struct btrfs_root *root,
9016                                  struct btrfs_path *path,
9017                                  struct walk_control *wc, int max_level)
9018 {
9019         int level = wc->level;
9020         int ret;
9021
9022         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
9023         while (level < max_level && path->nodes[level]) {
9024                 wc->level = level;
9025                 if (path->slots[level] + 1 <
9026                     btrfs_header_nritems(path->nodes[level])) {
9027                         path->slots[level]++;
9028                         return 0;
9029                 } else {
9030                         ret = walk_up_proc(trans, root, path, wc);
9031                         if (ret > 0)
9032                                 return 0;
9033
9034                         if (path->locks[level]) {
9035                                 btrfs_tree_unlock_rw(path->nodes[level],
9036                                                      path->locks[level]);
9037                                 path->locks[level] = 0;
9038                         }
9039                         free_extent_buffer(path->nodes[level]);
9040                         path->nodes[level] = NULL;
9041                         level++;
9042                 }
9043         }
9044         return 1;
9045 }
9046
9047 /*
9048  * drop a subvolume tree.
9049  *
9050  * this function traverses the tree freeing any blocks that only
9051  * referenced by the tree.
9052  *
9053  * when a shared tree block is found. this function decreases its
9054  * reference count by one. if update_ref is true, this function
9055  * also make sure backrefs for the shared block and all lower level
9056  * blocks are properly updated.
9057  *
9058  * If called with for_reloc == 0, may exit early with -EAGAIN
9059  */
9060 int btrfs_drop_snapshot(struct btrfs_root *root,
9061                          struct btrfs_block_rsv *block_rsv, int update_ref,
9062                          int for_reloc)
9063 {
9064         struct btrfs_fs_info *fs_info = root->fs_info;
9065         struct btrfs_path *path;
9066         struct btrfs_trans_handle *trans;
9067         struct btrfs_root *tree_root = fs_info->tree_root;
9068         struct btrfs_root_item *root_item = &root->root_item;
9069         struct walk_control *wc;
9070         struct btrfs_key key;
9071         int err = 0;
9072         int ret;
9073         int level;
9074         bool root_dropped = false;
9075
9076         btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
9077
9078         path = btrfs_alloc_path();
9079         if (!path) {
9080                 err = -ENOMEM;
9081                 goto out;
9082         }
9083
9084         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9085         if (!wc) {
9086                 btrfs_free_path(path);
9087                 err = -ENOMEM;
9088                 goto out;
9089         }
9090
9091         trans = btrfs_start_transaction(tree_root, 0);
9092         if (IS_ERR(trans)) {
9093                 err = PTR_ERR(trans);
9094                 goto out_free;
9095         }
9096
9097         if (block_rsv)
9098                 trans->block_rsv = block_rsv;
9099
9100         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
9101                 level = btrfs_header_level(root->node);
9102                 path->nodes[level] = btrfs_lock_root_node(root);
9103                 btrfs_set_lock_blocking(path->nodes[level]);
9104                 path->slots[level] = 0;
9105                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9106                 memset(&wc->update_progress, 0,
9107                        sizeof(wc->update_progress));
9108         } else {
9109                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
9110                 memcpy(&wc->update_progress, &key,
9111                        sizeof(wc->update_progress));
9112
9113                 level = root_item->drop_level;
9114                 BUG_ON(level == 0);
9115                 path->lowest_level = level;
9116                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9117                 path->lowest_level = 0;
9118                 if (ret < 0) {
9119                         err = ret;
9120                         goto out_end_trans;
9121                 }
9122                 WARN_ON(ret > 0);
9123
9124                 /*
9125                  * unlock our path, this is safe because only this
9126                  * function is allowed to delete this snapshot
9127                  */
9128                 btrfs_unlock_up_safe(path, 0);
9129
9130                 level = btrfs_header_level(root->node);
9131                 while (1) {
9132                         btrfs_tree_lock(path->nodes[level]);
9133                         btrfs_set_lock_blocking(path->nodes[level]);
9134                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9135
9136                         ret = btrfs_lookup_extent_info(trans, fs_info,
9137                                                 path->nodes[level]->start,
9138                                                 level, 1, &wc->refs[level],
9139                                                 &wc->flags[level]);
9140                         if (ret < 0) {
9141                                 err = ret;
9142                                 goto out_end_trans;
9143                         }
9144                         BUG_ON(wc->refs[level] == 0);
9145
9146                         if (level == root_item->drop_level)
9147                                 break;
9148
9149                         btrfs_tree_unlock(path->nodes[level]);
9150                         path->locks[level] = 0;
9151                         WARN_ON(wc->refs[level] != 1);
9152                         level--;
9153                 }
9154         }
9155
9156         wc->level = level;
9157         wc->shared_level = -1;
9158         wc->stage = DROP_REFERENCE;
9159         wc->update_ref = update_ref;
9160         wc->keep_locks = 0;
9161         wc->for_reloc = for_reloc;
9162         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
9163
9164         while (1) {
9165
9166                 ret = walk_down_tree(trans, root, path, wc);
9167                 if (ret < 0) {
9168                         err = ret;
9169                         break;
9170                 }
9171
9172                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
9173                 if (ret < 0) {
9174                         err = ret;
9175                         break;
9176                 }
9177
9178                 if (ret > 0) {
9179                         BUG_ON(wc->stage != DROP_REFERENCE);
9180                         break;
9181                 }
9182
9183                 if (wc->stage == DROP_REFERENCE) {
9184                         level = wc->level;
9185                         btrfs_node_key(path->nodes[level],
9186                                        &root_item->drop_progress,
9187                                        path->slots[level]);
9188                         root_item->drop_level = level;
9189                 }
9190
9191                 BUG_ON(wc->level == 0);
9192                 if (btrfs_should_end_transaction(trans) ||
9193                     (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
9194                         ret = btrfs_update_root(trans, tree_root,
9195                                                 &root->root_key,
9196                                                 root_item);
9197                         if (ret) {
9198                                 btrfs_abort_transaction(trans, ret);
9199                                 err = ret;
9200                                 goto out_end_trans;
9201                         }
9202
9203                         btrfs_end_transaction_throttle(trans);
9204                         if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
9205                                 btrfs_debug(fs_info,
9206                                             "drop snapshot early exit");
9207                                 err = -EAGAIN;
9208                                 goto out_free;
9209                         }
9210
9211                         trans = btrfs_start_transaction(tree_root, 0);
9212                         if (IS_ERR(trans)) {
9213                                 err = PTR_ERR(trans);
9214                                 goto out_free;
9215                         }
9216                         if (block_rsv)
9217                                 trans->block_rsv = block_rsv;
9218                 }
9219         }
9220         btrfs_release_path(path);
9221         if (err)
9222                 goto out_end_trans;
9223
9224         ret = btrfs_del_root(trans, fs_info, &root->root_key);
9225         if (ret) {
9226                 btrfs_abort_transaction(trans, ret);
9227                 err = ret;
9228                 goto out_end_trans;
9229         }
9230
9231         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9232                 ret = btrfs_find_root(tree_root, &root->root_key, path,
9233                                       NULL, NULL);
9234                 if (ret < 0) {
9235                         btrfs_abort_transaction(trans, ret);
9236                         err = ret;
9237                         goto out_end_trans;
9238                 } else if (ret > 0) {
9239                         /* if we fail to delete the orphan item this time
9240                          * around, it'll get picked up the next time.
9241                          *
9242                          * The most common failure here is just -ENOENT.
9243                          */
9244                         btrfs_del_orphan_item(trans, tree_root,
9245                                               root->root_key.objectid);
9246                 }
9247         }
9248
9249         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9250                 btrfs_add_dropped_root(trans, root);
9251         } else {
9252                 free_extent_buffer(root->node);
9253                 free_extent_buffer(root->commit_root);
9254                 btrfs_put_fs_root(root);
9255         }
9256         root_dropped = true;
9257 out_end_trans:
9258         btrfs_end_transaction_throttle(trans);
9259 out_free:
9260         kfree(wc);
9261         btrfs_free_path(path);
9262 out:
9263         /*
9264          * So if we need to stop dropping the snapshot for whatever reason we
9265          * need to make sure to add it back to the dead root list so that we
9266          * keep trying to do the work later.  This also cleans up roots if we
9267          * don't have it in the radix (like when we recover after a power fail
9268          * or unmount) so we don't leak memory.
9269          */
9270         if (!for_reloc && !root_dropped)
9271                 btrfs_add_dead_root(root);
9272         if (err && err != -EAGAIN)
9273                 btrfs_handle_fs_error(fs_info, err, NULL);
9274         return err;
9275 }
9276
9277 /*
9278  * drop subtree rooted at tree block 'node'.
9279  *
9280  * NOTE: this function will unlock and release tree block 'node'
9281  * only used by relocation code
9282  */
9283 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9284                         struct btrfs_root *root,
9285                         struct extent_buffer *node,
9286                         struct extent_buffer *parent)
9287 {
9288         struct btrfs_fs_info *fs_info = root->fs_info;
9289         struct btrfs_path *path;
9290         struct walk_control *wc;
9291         int level;
9292         int parent_level;
9293         int ret = 0;
9294         int wret;
9295
9296         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9297
9298         path = btrfs_alloc_path();
9299         if (!path)
9300                 return -ENOMEM;
9301
9302         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9303         if (!wc) {
9304                 btrfs_free_path(path);
9305                 return -ENOMEM;
9306         }
9307
9308         btrfs_assert_tree_locked(parent);
9309         parent_level = btrfs_header_level(parent);
9310         extent_buffer_get(parent);
9311         path->nodes[parent_level] = parent;
9312         path->slots[parent_level] = btrfs_header_nritems(parent);
9313
9314         btrfs_assert_tree_locked(node);
9315         level = btrfs_header_level(node);
9316         path->nodes[level] = node;
9317         path->slots[level] = 0;
9318         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9319
9320         wc->refs[parent_level] = 1;
9321         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9322         wc->level = level;
9323         wc->shared_level = -1;
9324         wc->stage = DROP_REFERENCE;
9325         wc->update_ref = 0;
9326         wc->keep_locks = 1;
9327         wc->for_reloc = 1;
9328         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
9329
9330         while (1) {
9331                 wret = walk_down_tree(trans, root, path, wc);
9332                 if (wret < 0) {
9333                         ret = wret;
9334                         break;
9335                 }
9336
9337                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9338                 if (wret < 0)
9339                         ret = wret;
9340                 if (wret != 0)
9341                         break;
9342         }
9343
9344         kfree(wc);
9345         btrfs_free_path(path);
9346         return ret;
9347 }
9348
9349 static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
9350 {
9351         u64 num_devices;
9352         u64 stripped;
9353
9354         /*
9355          * if restripe for this chunk_type is on pick target profile and
9356          * return, otherwise do the usual balance
9357          */
9358         stripped = get_restripe_target(fs_info, flags);
9359         if (stripped)
9360                 return extended_to_chunk(stripped);
9361
9362         num_devices = fs_info->fs_devices->rw_devices;
9363
9364         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9365                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9366                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9367
9368         if (num_devices == 1) {
9369                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9370                 stripped = flags & ~stripped;
9371
9372                 /* turn raid0 into single device chunks */
9373                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9374                         return stripped;
9375
9376                 /* turn mirroring into duplication */
9377                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9378                              BTRFS_BLOCK_GROUP_RAID10))
9379                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9380         } else {
9381                 /* they already had raid on here, just return */
9382                 if (flags & stripped)
9383                         return flags;
9384
9385                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9386                 stripped = flags & ~stripped;
9387
9388                 /* switch duplicated blocks with raid1 */
9389                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9390                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9391
9392                 /* this is drive concat, leave it alone */
9393         }
9394
9395         return flags;
9396 }
9397
9398 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9399 {
9400         struct btrfs_space_info *sinfo = cache->space_info;
9401         u64 num_bytes;
9402         u64 min_allocable_bytes;
9403         int ret = -ENOSPC;
9404
9405         /*
9406          * We need some metadata space and system metadata space for
9407          * allocating chunks in some corner cases until we force to set
9408          * it to be readonly.
9409          */
9410         if ((sinfo->flags &
9411              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9412             !force)
9413                 min_allocable_bytes = SZ_1M;
9414         else
9415                 min_allocable_bytes = 0;
9416
9417         spin_lock(&sinfo->lock);
9418         spin_lock(&cache->lock);
9419
9420         if (cache->ro) {
9421                 cache->ro++;
9422                 ret = 0;
9423                 goto out;
9424         }
9425
9426         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9427                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9428
9429         if (btrfs_space_info_used(sinfo, true) + num_bytes +
9430             min_allocable_bytes <= sinfo->total_bytes) {
9431                 sinfo->bytes_readonly += num_bytes;
9432                 cache->ro++;
9433                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9434                 ret = 0;
9435         }
9436 out:
9437         spin_unlock(&cache->lock);
9438         spin_unlock(&sinfo->lock);
9439         return ret;
9440 }
9441
9442 int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
9443                              struct btrfs_block_group_cache *cache)
9444
9445 {
9446         struct btrfs_trans_handle *trans;
9447         u64 alloc_flags;
9448         int ret;
9449
9450 again:
9451         trans = btrfs_join_transaction(fs_info->extent_root);
9452         if (IS_ERR(trans))
9453                 return PTR_ERR(trans);
9454
9455         /*
9456          * we're not allowed to set block groups readonly after the dirty
9457          * block groups cache has started writing.  If it already started,
9458          * back off and let this transaction commit
9459          */
9460         mutex_lock(&fs_info->ro_block_group_mutex);
9461         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9462                 u64 transid = trans->transid;
9463
9464                 mutex_unlock(&fs_info->ro_block_group_mutex);
9465                 btrfs_end_transaction(trans);
9466
9467                 ret = btrfs_wait_for_commit(fs_info, transid);
9468                 if (ret)
9469                         return ret;
9470                 goto again;
9471         }
9472
9473         /*
9474          * if we are changing raid levels, try to allocate a corresponding
9475          * block group with the new raid level.
9476          */
9477         alloc_flags = update_block_group_flags(fs_info, cache->flags);
9478         if (alloc_flags != cache->flags) {
9479                 ret = do_chunk_alloc(trans, fs_info, alloc_flags,
9480                                      CHUNK_ALLOC_FORCE);
9481                 /*
9482                  * ENOSPC is allowed here, we may have enough space
9483                  * already allocated at the new raid level to
9484                  * carry on
9485                  */
9486                 if (ret == -ENOSPC)
9487                         ret = 0;
9488                 if (ret < 0)
9489                         goto out;
9490         }
9491
9492         ret = inc_block_group_ro(cache, 0);
9493         if (!ret)
9494                 goto out;
9495         alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
9496         ret = do_chunk_alloc(trans, fs_info, alloc_flags,
9497                              CHUNK_ALLOC_FORCE);
9498         if (ret < 0)
9499                 goto out;
9500         ret = inc_block_group_ro(cache, 0);
9501 out:
9502         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9503                 alloc_flags = update_block_group_flags(fs_info, cache->flags);
9504                 mutex_lock(&fs_info->chunk_mutex);
9505                 check_system_chunk(trans, fs_info, alloc_flags);
9506                 mutex_unlock(&fs_info->chunk_mutex);
9507         }
9508         mutex_unlock(&fs_info->ro_block_group_mutex);
9509
9510         btrfs_end_transaction(trans);
9511         return ret;
9512 }
9513
9514 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9515                             struct btrfs_fs_info *fs_info, u64 type)
9516 {
9517         u64 alloc_flags = get_alloc_profile(fs_info, type);
9518
9519         return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
9520 }
9521
9522 /*
9523  * helper to account the unused space of all the readonly block group in the
9524  * space_info. takes mirrors into account.
9525  */
9526 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9527 {
9528         struct btrfs_block_group_cache *block_group;
9529         u64 free_bytes = 0;
9530         int factor;
9531
9532         /* It's df, we don't care if it's racy */
9533         if (list_empty(&sinfo->ro_bgs))
9534                 return 0;
9535
9536         spin_lock(&sinfo->lock);
9537         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9538                 spin_lock(&block_group->lock);
9539
9540                 if (!block_group->ro) {
9541                         spin_unlock(&block_group->lock);
9542                         continue;
9543                 }
9544
9545                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9546                                           BTRFS_BLOCK_GROUP_RAID10 |
9547                                           BTRFS_BLOCK_GROUP_DUP))
9548                         factor = 2;
9549                 else
9550                         factor = 1;
9551
9552                 free_bytes += (block_group->key.offset -
9553                                btrfs_block_group_used(&block_group->item)) *
9554                                factor;
9555
9556                 spin_unlock(&block_group->lock);
9557         }
9558         spin_unlock(&sinfo->lock);
9559
9560         return free_bytes;
9561 }
9562
9563 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
9564 {
9565         struct btrfs_space_info *sinfo = cache->space_info;
9566         u64 num_bytes;
9567
9568         BUG_ON(!cache->ro);
9569
9570         spin_lock(&sinfo->lock);
9571         spin_lock(&cache->lock);
9572         if (!--cache->ro) {
9573                 num_bytes = cache->key.offset - cache->reserved -
9574                             cache->pinned - cache->bytes_super -
9575                             btrfs_block_group_used(&cache->item);
9576                 sinfo->bytes_readonly -= num_bytes;
9577                 list_del_init(&cache->ro_list);
9578         }
9579         spin_unlock(&cache->lock);
9580         spin_unlock(&sinfo->lock);
9581 }
9582
9583 /*
9584  * checks to see if its even possible to relocate this block group.
9585  *
9586  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9587  * ok to go ahead and try.
9588  */
9589 int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
9590 {
9591         struct btrfs_root *root = fs_info->extent_root;
9592         struct btrfs_block_group_cache *block_group;
9593         struct btrfs_space_info *space_info;
9594         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
9595         struct btrfs_device *device;
9596         struct btrfs_trans_handle *trans;
9597         u64 min_free;
9598         u64 dev_min = 1;
9599         u64 dev_nr = 0;
9600         u64 target;
9601         int debug;
9602         int index;
9603         int full = 0;
9604         int ret = 0;
9605
9606         debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
9607
9608         block_group = btrfs_lookup_block_group(fs_info, bytenr);
9609
9610         /* odd, couldn't find the block group, leave it alone */
9611         if (!block_group) {
9612                 if (debug)
9613                         btrfs_warn(fs_info,
9614                                    "can't find block group for bytenr %llu",
9615                                    bytenr);
9616                 return -1;
9617         }
9618
9619         min_free = btrfs_block_group_used(&block_group->item);
9620
9621         /* no bytes used, we're good */
9622         if (!min_free)
9623                 goto out;
9624
9625         space_info = block_group->space_info;
9626         spin_lock(&space_info->lock);
9627
9628         full = space_info->full;
9629
9630         /*
9631          * if this is the last block group we have in this space, we can't
9632          * relocate it unless we're able to allocate a new chunk below.
9633          *
9634          * Otherwise, we need to make sure we have room in the space to handle
9635          * all of the extents from this block group.  If we can, we're good
9636          */
9637         if ((space_info->total_bytes != block_group->key.offset) &&
9638             (btrfs_space_info_used(space_info, false) + min_free <
9639              space_info->total_bytes)) {
9640                 spin_unlock(&space_info->lock);
9641                 goto out;
9642         }
9643         spin_unlock(&space_info->lock);
9644
9645         /*
9646          * ok we don't have enough space, but maybe we have free space on our
9647          * devices to allocate new chunks for relocation, so loop through our
9648          * alloc devices and guess if we have enough space.  if this block
9649          * group is going to be restriped, run checks against the target
9650          * profile instead of the current one.
9651          */
9652         ret = -1;
9653
9654         /*
9655          * index:
9656          *      0: raid10
9657          *      1: raid1
9658          *      2: dup
9659          *      3: raid0
9660          *      4: single
9661          */
9662         target = get_restripe_target(fs_info, block_group->flags);
9663         if (target) {
9664                 index = btrfs_bg_flags_to_raid_index(extended_to_chunk(target));
9665         } else {
9666                 /*
9667                  * this is just a balance, so if we were marked as full
9668                  * we know there is no space for a new chunk
9669                  */
9670                 if (full) {
9671                         if (debug)
9672                                 btrfs_warn(fs_info,
9673                                            "no space to alloc new chunk for block group %llu",
9674                                            block_group->key.objectid);
9675                         goto out;
9676                 }
9677
9678                 index = btrfs_bg_flags_to_raid_index(block_group->flags);
9679         }
9680
9681         if (index == BTRFS_RAID_RAID10) {
9682                 dev_min = 4;
9683                 /* Divide by 2 */
9684                 min_free >>= 1;
9685         } else if (index == BTRFS_RAID_RAID1) {
9686                 dev_min = 2;
9687         } else if (index == BTRFS_RAID_DUP) {
9688                 /* Multiply by 2 */
9689                 min_free <<= 1;
9690         } else if (index == BTRFS_RAID_RAID0) {
9691                 dev_min = fs_devices->rw_devices;
9692                 min_free = div64_u64(min_free, dev_min);
9693         }
9694
9695         /* We need to do this so that we can look at pending chunks */
9696         trans = btrfs_join_transaction(root);
9697         if (IS_ERR(trans)) {
9698                 ret = PTR_ERR(trans);
9699                 goto out;
9700         }
9701
9702         mutex_lock(&fs_info->chunk_mutex);
9703         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9704                 u64 dev_offset;
9705
9706                 /*
9707                  * check to make sure we can actually find a chunk with enough
9708                  * space to fit our block group in.
9709                  */
9710                 if (device->total_bytes > device->bytes_used + min_free &&
9711                     !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
9712                         ret = find_free_dev_extent(trans, device, min_free,
9713                                                    &dev_offset, NULL);
9714                         if (!ret)
9715                                 dev_nr++;
9716
9717                         if (dev_nr >= dev_min)
9718                                 break;
9719
9720                         ret = -1;
9721                 }
9722         }
9723         if (debug && ret == -1)
9724                 btrfs_warn(fs_info,
9725                            "no space to allocate a new chunk for block group %llu",
9726                            block_group->key.objectid);
9727         mutex_unlock(&fs_info->chunk_mutex);
9728         btrfs_end_transaction(trans);
9729 out:
9730         btrfs_put_block_group(block_group);
9731         return ret;
9732 }
9733
9734 static int find_first_block_group(struct btrfs_fs_info *fs_info,
9735                                   struct btrfs_path *path,
9736                                   struct btrfs_key *key)
9737 {
9738         struct btrfs_root *root = fs_info->extent_root;
9739         int ret = 0;
9740         struct btrfs_key found_key;
9741         struct extent_buffer *leaf;
9742         int slot;
9743
9744         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9745         if (ret < 0)
9746                 goto out;
9747
9748         while (1) {
9749                 slot = path->slots[0];
9750                 leaf = path->nodes[0];
9751                 if (slot >= btrfs_header_nritems(leaf)) {
9752                         ret = btrfs_next_leaf(root, path);
9753                         if (ret == 0)
9754                                 continue;
9755                         if (ret < 0)
9756                                 goto out;
9757                         break;
9758                 }
9759                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9760
9761                 if (found_key.objectid >= key->objectid &&
9762                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9763                         struct extent_map_tree *em_tree;
9764                         struct extent_map *em;
9765
9766                         em_tree = &root->fs_info->mapping_tree.map_tree;
9767                         read_lock(&em_tree->lock);
9768                         em = lookup_extent_mapping(em_tree, found_key.objectid,
9769                                                    found_key.offset);
9770                         read_unlock(&em_tree->lock);
9771                         if (!em) {
9772                                 btrfs_err(fs_info,
9773                         "logical %llu len %llu found bg but no related chunk",
9774                                           found_key.objectid, found_key.offset);
9775                                 ret = -ENOENT;
9776                         } else {
9777                                 ret = 0;
9778                         }
9779                         free_extent_map(em);
9780                         goto out;
9781                 }
9782                 path->slots[0]++;
9783         }
9784 out:
9785         return ret;
9786 }
9787
9788 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9789 {
9790         struct btrfs_block_group_cache *block_group;
9791         u64 last = 0;
9792
9793         while (1) {
9794                 struct inode *inode;
9795
9796                 block_group = btrfs_lookup_first_block_group(info, last);
9797                 while (block_group) {
9798                         spin_lock(&block_group->lock);
9799                         if (block_group->iref)
9800                                 break;
9801                         spin_unlock(&block_group->lock);
9802                         block_group = next_block_group(info, block_group);
9803                 }
9804                 if (!block_group) {
9805                         if (last == 0)
9806                                 break;
9807                         last = 0;
9808                         continue;
9809                 }
9810
9811                 inode = block_group->inode;
9812                 block_group->iref = 0;
9813                 block_group->inode = NULL;
9814                 spin_unlock(&block_group->lock);
9815                 ASSERT(block_group->io_ctl.inode == NULL);
9816                 iput(inode);
9817                 last = block_group->key.objectid + block_group->key.offset;
9818                 btrfs_put_block_group(block_group);
9819         }
9820 }
9821
9822 /*
9823  * Must be called only after stopping all workers, since we could have block
9824  * group caching kthreads running, and therefore they could race with us if we
9825  * freed the block groups before stopping them.
9826  */
9827 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9828 {
9829         struct btrfs_block_group_cache *block_group;
9830         struct btrfs_space_info *space_info;
9831         struct btrfs_caching_control *caching_ctl;
9832         struct rb_node *n;
9833
9834         down_write(&info->commit_root_sem);
9835         while (!list_empty(&info->caching_block_groups)) {
9836                 caching_ctl = list_entry(info->caching_block_groups.next,
9837                                          struct btrfs_caching_control, list);
9838                 list_del(&caching_ctl->list);
9839                 put_caching_control(caching_ctl);
9840         }
9841         up_write(&info->commit_root_sem);
9842
9843         spin_lock(&info->unused_bgs_lock);
9844         while (!list_empty(&info->unused_bgs)) {
9845                 block_group = list_first_entry(&info->unused_bgs,
9846                                                struct btrfs_block_group_cache,
9847                                                bg_list);
9848                 list_del_init(&block_group->bg_list);
9849                 btrfs_put_block_group(block_group);
9850         }
9851         spin_unlock(&info->unused_bgs_lock);
9852
9853         spin_lock(&info->block_group_cache_lock);
9854         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9855                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9856                                        cache_node);
9857                 rb_erase(&block_group->cache_node,
9858                          &info->block_group_cache_tree);
9859                 RB_CLEAR_NODE(&block_group->cache_node);
9860                 spin_unlock(&info->block_group_cache_lock);
9861
9862                 down_write(&block_group->space_info->groups_sem);
9863                 list_del(&block_group->list);
9864                 up_write(&block_group->space_info->groups_sem);
9865
9866                 /*
9867                  * We haven't cached this block group, which means we could
9868                  * possibly have excluded extents on this block group.
9869                  */
9870                 if (block_group->cached == BTRFS_CACHE_NO ||
9871                     block_group->cached == BTRFS_CACHE_ERROR)
9872                         free_excluded_extents(info, block_group);
9873
9874                 btrfs_remove_free_space_cache(block_group);
9875                 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
9876                 ASSERT(list_empty(&block_group->dirty_list));
9877                 ASSERT(list_empty(&block_group->io_list));
9878                 ASSERT(list_empty(&block_group->bg_list));
9879                 ASSERT(atomic_read(&block_group->count) == 1);
9880                 btrfs_put_block_group(block_group);
9881
9882                 spin_lock(&info->block_group_cache_lock);
9883         }
9884         spin_unlock(&info->block_group_cache_lock);
9885
9886         /* now that all the block groups are freed, go through and
9887          * free all the space_info structs.  This is only called during
9888          * the final stages of unmount, and so we know nobody is
9889          * using them.  We call synchronize_rcu() once before we start,
9890          * just to be on the safe side.
9891          */
9892         synchronize_rcu();
9893
9894         release_global_block_rsv(info);
9895
9896         while (!list_empty(&info->space_info)) {
9897                 int i;
9898
9899                 space_info = list_entry(info->space_info.next,
9900                                         struct btrfs_space_info,
9901                                         list);
9902
9903                 /*
9904                  * Do not hide this behind enospc_debug, this is actually
9905                  * important and indicates a real bug if this happens.
9906                  */
9907                 if (WARN_ON(space_info->bytes_pinned > 0 ||
9908                             space_info->bytes_reserved > 0 ||
9909                             space_info->bytes_may_use > 0))
9910                         dump_space_info(info, space_info, 0, 0);
9911                 list_del(&space_info->list);
9912                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9913                         struct kobject *kobj;
9914                         kobj = space_info->block_group_kobjs[i];
9915                         space_info->block_group_kobjs[i] = NULL;
9916                         if (kobj) {
9917                                 kobject_del(kobj);
9918                                 kobject_put(kobj);
9919                         }
9920                 }
9921                 kobject_del(&space_info->kobj);
9922                 kobject_put(&space_info->kobj);
9923         }
9924         return 0;
9925 }
9926
9927 /* link_block_group will queue up kobjects to add when we're reclaim-safe */
9928 void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
9929 {
9930         struct btrfs_space_info *space_info;
9931         struct raid_kobject *rkobj;
9932         LIST_HEAD(list);
9933         int index;
9934         int ret = 0;
9935
9936         spin_lock(&fs_info->pending_raid_kobjs_lock);
9937         list_splice_init(&fs_info->pending_raid_kobjs, &list);
9938         spin_unlock(&fs_info->pending_raid_kobjs_lock);
9939
9940         list_for_each_entry(rkobj, &list, list) {
9941                 space_info = __find_space_info(fs_info, rkobj->flags);
9942                 index = btrfs_bg_flags_to_raid_index(rkobj->flags);
9943
9944                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9945                                   "%s", get_raid_name(index));
9946                 if (ret) {
9947                         kobject_put(&rkobj->kobj);
9948                         break;
9949                 }
9950         }
9951         if (ret)
9952                 btrfs_warn(fs_info,
9953                            "failed to add kobject for block cache, ignoring");
9954 }
9955
9956 static void link_block_group(struct btrfs_block_group_cache *cache)
9957 {
9958         struct btrfs_space_info *space_info = cache->space_info;
9959         struct btrfs_fs_info *fs_info = cache->fs_info;
9960         int index = btrfs_bg_flags_to_raid_index(cache->flags);
9961         bool first = false;
9962
9963         down_write(&space_info->groups_sem);
9964         if (list_empty(&space_info->block_groups[index]))
9965                 first = true;
9966         list_add_tail(&cache->list, &space_info->block_groups[index]);
9967         up_write(&space_info->groups_sem);
9968
9969         if (first) {
9970                 struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9971                 if (!rkobj) {
9972                         btrfs_warn(cache->fs_info,
9973                                 "couldn't alloc memory for raid level kobject");
9974                         return;
9975                 }
9976                 rkobj->flags = cache->flags;
9977                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9978
9979                 spin_lock(&fs_info->pending_raid_kobjs_lock);
9980                 list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
9981                 spin_unlock(&fs_info->pending_raid_kobjs_lock);
9982                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9983         }
9984 }
9985
9986 static struct btrfs_block_group_cache *
9987 btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
9988                                u64 start, u64 size)
9989 {
9990         struct btrfs_block_group_cache *cache;
9991
9992         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9993         if (!cache)
9994                 return NULL;
9995
9996         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9997                                         GFP_NOFS);
9998         if (!cache->free_space_ctl) {
9999                 kfree(cache);
10000                 return NULL;
10001         }
10002
10003         cache->key.objectid = start;
10004         cache->key.offset = size;
10005         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10006
10007         cache->fs_info = fs_info;
10008         cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
10009         set_free_space_tree_thresholds(cache);
10010
10011         atomic_set(&cache->count, 1);
10012         spin_lock_init(&cache->lock);
10013         init_rwsem(&cache->data_rwsem);
10014         INIT_LIST_HEAD(&cache->list);
10015         INIT_LIST_HEAD(&cache->cluster_list);
10016         INIT_LIST_HEAD(&cache->bg_list);
10017         INIT_LIST_HEAD(&cache->ro_list);
10018         INIT_LIST_HEAD(&cache->dirty_list);
10019         INIT_LIST_HEAD(&cache->io_list);
10020         btrfs_init_free_space_ctl(cache);
10021         atomic_set(&cache->trimming, 0);
10022         mutex_init(&cache->free_space_lock);
10023         btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
10024
10025         return cache;
10026 }
10027
10028 int btrfs_read_block_groups(struct btrfs_fs_info *info)
10029 {
10030         struct btrfs_path *path;
10031         int ret;
10032         struct btrfs_block_group_cache *cache;
10033         struct btrfs_space_info *space_info;
10034         struct btrfs_key key;
10035         struct btrfs_key found_key;
10036         struct extent_buffer *leaf;
10037         int need_clear = 0;
10038         u64 cache_gen;
10039         u64 feature;
10040         int mixed;
10041
10042         feature = btrfs_super_incompat_flags(info->super_copy);
10043         mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
10044
10045         key.objectid = 0;
10046         key.offset = 0;
10047         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10048         path = btrfs_alloc_path();
10049         if (!path)
10050                 return -ENOMEM;
10051         path->reada = READA_FORWARD;
10052
10053         cache_gen = btrfs_super_cache_generation(info->super_copy);
10054         if (btrfs_test_opt(info, SPACE_CACHE) &&
10055             btrfs_super_generation(info->super_copy) != cache_gen)
10056                 need_clear = 1;
10057         if (btrfs_test_opt(info, CLEAR_CACHE))
10058                 need_clear = 1;
10059
10060         while (1) {
10061                 ret = find_first_block_group(info, path, &key);
10062                 if (ret > 0)
10063                         break;
10064                 if (ret != 0)
10065                         goto error;
10066
10067                 leaf = path->nodes[0];
10068                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
10069
10070                 cache = btrfs_create_block_group_cache(info, found_key.objectid,
10071                                                        found_key.offset);
10072                 if (!cache) {
10073                         ret = -ENOMEM;
10074                         goto error;
10075                 }
10076
10077                 if (need_clear) {
10078                         /*
10079                          * When we mount with old space cache, we need to
10080                          * set BTRFS_DC_CLEAR and set dirty flag.
10081                          *
10082                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
10083                          *    truncate the old free space cache inode and
10084                          *    setup a new one.
10085                          * b) Setting 'dirty flag' makes sure that we flush
10086                          *    the new space cache info onto disk.
10087                          */
10088                         if (btrfs_test_opt(info, SPACE_CACHE))
10089                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
10090                 }
10091
10092                 read_extent_buffer(leaf, &cache->item,
10093                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
10094                                    sizeof(cache->item));
10095                 cache->flags = btrfs_block_group_flags(&cache->item);
10096                 if (!mixed &&
10097                     ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
10098                     (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
10099                         btrfs_err(info,
10100 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
10101                                   cache->key.objectid);
10102                         ret = -EINVAL;
10103                         goto error;
10104                 }
10105
10106                 key.objectid = found_key.objectid + found_key.offset;
10107                 btrfs_release_path(path);
10108
10109                 /*
10110                  * We need to exclude the super stripes now so that the space
10111                  * info has super bytes accounted for, otherwise we'll think
10112                  * we have more space than we actually do.
10113                  */
10114                 ret = exclude_super_stripes(info, cache);
10115                 if (ret) {
10116                         /*
10117                          * We may have excluded something, so call this just in
10118                          * case.
10119                          */
10120                         free_excluded_extents(info, cache);
10121                         btrfs_put_block_group(cache);
10122                         goto error;
10123                 }
10124
10125                 /*
10126                  * check for two cases, either we are full, and therefore
10127                  * don't need to bother with the caching work since we won't
10128                  * find any space, or we are empty, and we can just add all
10129                  * the space in and be done with it.  This saves us _alot_ of
10130                  * time, particularly in the full case.
10131                  */
10132                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
10133                         cache->last_byte_to_unpin = (u64)-1;
10134                         cache->cached = BTRFS_CACHE_FINISHED;
10135                         free_excluded_extents(info, cache);
10136                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10137                         cache->last_byte_to_unpin = (u64)-1;
10138                         cache->cached = BTRFS_CACHE_FINISHED;
10139                         add_new_free_space(cache, info,
10140                                            found_key.objectid,
10141                                            found_key.objectid +
10142                                            found_key.offset);
10143                         free_excluded_extents(info, cache);
10144                 }
10145
10146                 ret = btrfs_add_block_group_cache(info, cache);
10147                 if (ret) {
10148                         btrfs_remove_free_space_cache(cache);
10149                         btrfs_put_block_group(cache);
10150                         goto error;
10151                 }
10152
10153                 trace_btrfs_add_block_group(info, cache, 0);
10154                 update_space_info(info, cache->flags, found_key.offset,
10155                                   btrfs_block_group_used(&cache->item),
10156                                   cache->bytes_super, &space_info);
10157
10158                 cache->space_info = space_info;
10159
10160                 link_block_group(cache);
10161
10162                 set_avail_alloc_bits(info, cache->flags);
10163                 if (btrfs_chunk_readonly(info, cache->key.objectid)) {
10164                         inc_block_group_ro(cache, 1);
10165                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10166                         spin_lock(&info->unused_bgs_lock);
10167                         /* Should always be true but just in case. */
10168                         if (list_empty(&cache->bg_list)) {
10169                                 btrfs_get_block_group(cache);
10170                                 list_add_tail(&cache->bg_list,
10171                                               &info->unused_bgs);
10172                         }
10173                         spin_unlock(&info->unused_bgs_lock);
10174                 }
10175         }
10176
10177         list_for_each_entry_rcu(space_info, &info->space_info, list) {
10178                 if (!(get_alloc_profile(info, space_info->flags) &
10179                       (BTRFS_BLOCK_GROUP_RAID10 |
10180                        BTRFS_BLOCK_GROUP_RAID1 |
10181                        BTRFS_BLOCK_GROUP_RAID5 |
10182                        BTRFS_BLOCK_GROUP_RAID6 |
10183                        BTRFS_BLOCK_GROUP_DUP)))
10184                         continue;
10185                 /*
10186                  * avoid allocating from un-mirrored block group if there are
10187                  * mirrored block groups.
10188                  */
10189                 list_for_each_entry(cache,
10190                                 &space_info->block_groups[BTRFS_RAID_RAID0],
10191                                 list)
10192                         inc_block_group_ro(cache, 1);
10193                 list_for_each_entry(cache,
10194                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
10195                                 list)
10196                         inc_block_group_ro(cache, 1);
10197         }
10198
10199         btrfs_add_raid_kobjects(info);
10200         init_global_block_rsv(info);
10201         ret = 0;
10202 error:
10203         btrfs_free_path(path);
10204         return ret;
10205 }
10206
10207 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
10208 {
10209         struct btrfs_fs_info *fs_info = trans->fs_info;
10210         struct btrfs_block_group_cache *block_group, *tmp;
10211         struct btrfs_root *extent_root = fs_info->extent_root;
10212         struct btrfs_block_group_item item;
10213         struct btrfs_key key;
10214         int ret = 0;
10215         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
10216
10217         trans->can_flush_pending_bgs = false;
10218         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
10219                 if (ret)
10220                         goto next;
10221
10222                 spin_lock(&block_group->lock);
10223                 memcpy(&item, &block_group->item, sizeof(item));
10224                 memcpy(&key, &block_group->key, sizeof(key));
10225                 spin_unlock(&block_group->lock);
10226
10227                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
10228                                         sizeof(item));
10229                 if (ret)
10230                         btrfs_abort_transaction(trans, ret);
10231                 ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
10232                                                key.offset);
10233                 if (ret)
10234                         btrfs_abort_transaction(trans, ret);
10235                 add_block_group_free_space(trans, fs_info, block_group);
10236                 /* already aborted the transaction if it failed. */
10237 next:
10238                 list_del_init(&block_group->bg_list);
10239         }
10240         trans->can_flush_pending_bgs = can_flush_pending_bgs;
10241 }
10242
10243 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
10244                            struct btrfs_fs_info *fs_info, u64 bytes_used,
10245                            u64 type, u64 chunk_offset, u64 size)
10246 {
10247         struct btrfs_block_group_cache *cache;
10248         int ret;
10249
10250         btrfs_set_log_full_commit(fs_info, trans);
10251
10252         cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
10253         if (!cache)
10254                 return -ENOMEM;
10255
10256         btrfs_set_block_group_used(&cache->item, bytes_used);
10257         btrfs_set_block_group_chunk_objectid(&cache->item,
10258                                              BTRFS_FIRST_CHUNK_TREE_OBJECTID);
10259         btrfs_set_block_group_flags(&cache->item, type);
10260
10261         cache->flags = type;
10262         cache->last_byte_to_unpin = (u64)-1;
10263         cache->cached = BTRFS_CACHE_FINISHED;
10264         cache->needs_free_space = 1;
10265         ret = exclude_super_stripes(fs_info, cache);
10266         if (ret) {
10267                 /*
10268                  * We may have excluded something, so call this just in
10269                  * case.
10270                  */
10271                 free_excluded_extents(fs_info, cache);
10272                 btrfs_put_block_group(cache);
10273                 return ret;
10274         }
10275
10276         add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size);
10277
10278         free_excluded_extents(fs_info, cache);
10279
10280 #ifdef CONFIG_BTRFS_DEBUG
10281         if (btrfs_should_fragment_free_space(cache)) {
10282                 u64 new_bytes_used = size - bytes_used;
10283
10284                 bytes_used += new_bytes_used >> 1;
10285                 fragment_free_space(cache);
10286         }
10287 #endif
10288         /*
10289          * Ensure the corresponding space_info object is created and
10290          * assigned to our block group. We want our bg to be added to the rbtree
10291          * with its ->space_info set.
10292          */
10293         cache->space_info = __find_space_info(fs_info, cache->flags);
10294         ASSERT(cache->space_info);
10295
10296         ret = btrfs_add_block_group_cache(fs_info, cache);
10297         if (ret) {
10298                 btrfs_remove_free_space_cache(cache);
10299                 btrfs_put_block_group(cache);
10300                 return ret;
10301         }
10302
10303         /*
10304          * Now that our block group has its ->space_info set and is inserted in
10305          * the rbtree, update the space info's counters.
10306          */
10307         trace_btrfs_add_block_group(fs_info, cache, 1);
10308         update_space_info(fs_info, cache->flags, size, bytes_used,
10309                                 cache->bytes_super, &cache->space_info);
10310         update_global_block_rsv(fs_info);
10311
10312         link_block_group(cache);
10313
10314         list_add_tail(&cache->bg_list, &trans->new_bgs);
10315
10316         set_avail_alloc_bits(fs_info, type);
10317         return 0;
10318 }
10319
10320 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10321 {
10322         u64 extra_flags = chunk_to_extended(flags) &
10323                                 BTRFS_EXTENDED_PROFILE_MASK;
10324
10325         write_seqlock(&fs_info->profiles_lock);
10326         if (flags & BTRFS_BLOCK_GROUP_DATA)
10327                 fs_info->avail_data_alloc_bits &= ~extra_flags;
10328         if (flags & BTRFS_BLOCK_GROUP_METADATA)
10329                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10330         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10331                 fs_info->avail_system_alloc_bits &= ~extra_flags;
10332         write_sequnlock(&fs_info->profiles_lock);
10333 }
10334
10335 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10336                              struct btrfs_fs_info *fs_info, u64 group_start,
10337                              struct extent_map *em)
10338 {
10339         struct btrfs_root *root = fs_info->extent_root;
10340         struct btrfs_path *path;
10341         struct btrfs_block_group_cache *block_group;
10342         struct btrfs_free_cluster *cluster;
10343         struct btrfs_root *tree_root = fs_info->tree_root;
10344         struct btrfs_key key;
10345         struct inode *inode;
10346         struct kobject *kobj = NULL;
10347         int ret;
10348         int index;
10349         int factor;
10350         struct btrfs_caching_control *caching_ctl = NULL;
10351         bool remove_em;
10352
10353         block_group = btrfs_lookup_block_group(fs_info, group_start);
10354         BUG_ON(!block_group);
10355         BUG_ON(!block_group->ro);
10356
10357         /*
10358          * Free the reserved super bytes from this block group before
10359          * remove it.
10360          */
10361         free_excluded_extents(fs_info, block_group);
10362         btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
10363                                   block_group->key.offset);
10364
10365         memcpy(&key, &block_group->key, sizeof(key));
10366         index = btrfs_bg_flags_to_raid_index(block_group->flags);
10367         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10368                                   BTRFS_BLOCK_GROUP_RAID1 |
10369                                   BTRFS_BLOCK_GROUP_RAID10))
10370                 factor = 2;
10371         else
10372                 factor = 1;
10373
10374         /* make sure this block group isn't part of an allocation cluster */
10375         cluster = &fs_info->data_alloc_cluster;
10376         spin_lock(&cluster->refill_lock);
10377         btrfs_return_cluster_to_free_space(block_group, cluster);
10378         spin_unlock(&cluster->refill_lock);
10379
10380         /*
10381          * make sure this block group isn't part of a metadata
10382          * allocation cluster
10383          */
10384         cluster = &fs_info->meta_alloc_cluster;
10385         spin_lock(&cluster->refill_lock);
10386         btrfs_return_cluster_to_free_space(block_group, cluster);
10387         spin_unlock(&cluster->refill_lock);
10388
10389         path = btrfs_alloc_path();
10390         if (!path) {
10391                 ret = -ENOMEM;
10392                 goto out;
10393         }
10394
10395         /*
10396          * get the inode first so any iput calls done for the io_list
10397          * aren't the final iput (no unlinks allowed now)
10398          */
10399         inode = lookup_free_space_inode(fs_info, block_group, path);
10400
10401         mutex_lock(&trans->transaction->cache_write_mutex);
10402         /*
10403          * make sure our free spache cache IO is done before remove the
10404          * free space inode
10405          */
10406         spin_lock(&trans->transaction->dirty_bgs_lock);
10407         if (!list_empty(&block_group->io_list)) {
10408                 list_del_init(&block_group->io_list);
10409
10410                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10411
10412                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10413                 btrfs_wait_cache_io(trans, block_group, path);
10414                 btrfs_put_block_group(block_group);
10415                 spin_lock(&trans->transaction->dirty_bgs_lock);
10416         }
10417
10418         if (!list_empty(&block_group->dirty_list)) {
10419                 list_del_init(&block_group->dirty_list);
10420                 btrfs_put_block_group(block_group);
10421         }
10422         spin_unlock(&trans->transaction->dirty_bgs_lock);
10423         mutex_unlock(&trans->transaction->cache_write_mutex);
10424
10425         if (!IS_ERR(inode)) {
10426                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10427                 if (ret) {
10428                         btrfs_add_delayed_iput(inode);
10429                         goto out;
10430                 }
10431                 clear_nlink(inode);
10432                 /* One for the block groups ref */
10433                 spin_lock(&block_group->lock);
10434                 if (block_group->iref) {
10435                         block_group->iref = 0;
10436                         block_group->inode = NULL;
10437                         spin_unlock(&block_group->lock);
10438                         iput(inode);
10439                 } else {
10440                         spin_unlock(&block_group->lock);
10441                 }
10442                 /* One for our lookup ref */
10443                 btrfs_add_delayed_iput(inode);
10444         }
10445
10446         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10447         key.offset = block_group->key.objectid;
10448         key.type = 0;
10449
10450         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10451         if (ret < 0)
10452                 goto out;
10453         if (ret > 0)
10454                 btrfs_release_path(path);
10455         if (ret == 0) {
10456                 ret = btrfs_del_item(trans, tree_root, path);
10457                 if (ret)
10458                         goto out;
10459                 btrfs_release_path(path);
10460         }
10461
10462         spin_lock(&fs_info->block_group_cache_lock);
10463         rb_erase(&block_group->cache_node,
10464                  &fs_info->block_group_cache_tree);
10465         RB_CLEAR_NODE(&block_group->cache_node);
10466
10467         if (fs_info->first_logical_byte == block_group->key.objectid)
10468                 fs_info->first_logical_byte = (u64)-1;
10469         spin_unlock(&fs_info->block_group_cache_lock);
10470
10471         down_write(&block_group->space_info->groups_sem);
10472         /*
10473          * we must use list_del_init so people can check to see if they
10474          * are still on the list after taking the semaphore
10475          */
10476         list_del_init(&block_group->list);
10477         if (list_empty(&block_group->space_info->block_groups[index])) {
10478                 kobj = block_group->space_info->block_group_kobjs[index];
10479                 block_group->space_info->block_group_kobjs[index] = NULL;
10480                 clear_avail_alloc_bits(fs_info, block_group->flags);
10481         }
10482         up_write(&block_group->space_info->groups_sem);
10483         if (kobj) {
10484                 kobject_del(kobj);
10485                 kobject_put(kobj);
10486         }
10487
10488         if (block_group->has_caching_ctl)
10489                 caching_ctl = get_caching_control(block_group);
10490         if (block_group->cached == BTRFS_CACHE_STARTED)
10491                 wait_block_group_cache_done(block_group);
10492         if (block_group->has_caching_ctl) {
10493                 down_write(&fs_info->commit_root_sem);
10494                 if (!caching_ctl) {
10495                         struct btrfs_caching_control *ctl;
10496
10497                         list_for_each_entry(ctl,
10498                                     &fs_info->caching_block_groups, list)
10499                                 if (ctl->block_group == block_group) {
10500                                         caching_ctl = ctl;
10501                                         refcount_inc(&caching_ctl->count);
10502                                         break;
10503                                 }
10504                 }
10505                 if (caching_ctl)
10506                         list_del_init(&caching_ctl->list);
10507                 up_write(&fs_info->commit_root_sem);
10508                 if (caching_ctl) {
10509                         /* Once for the caching bgs list and once for us. */
10510                         put_caching_control(caching_ctl);
10511                         put_caching_control(caching_ctl);
10512                 }
10513         }
10514
10515         spin_lock(&trans->transaction->dirty_bgs_lock);
10516         if (!list_empty(&block_group->dirty_list)) {
10517                 WARN_ON(1);
10518         }
10519         if (!list_empty(&block_group->io_list)) {
10520                 WARN_ON(1);
10521         }
10522         spin_unlock(&trans->transaction->dirty_bgs_lock);
10523         btrfs_remove_free_space_cache(block_group);
10524
10525         spin_lock(&block_group->space_info->lock);
10526         list_del_init(&block_group->ro_list);
10527
10528         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
10529                 WARN_ON(block_group->space_info->total_bytes
10530                         < block_group->key.offset);
10531                 WARN_ON(block_group->space_info->bytes_readonly
10532                         < block_group->key.offset);
10533                 WARN_ON(block_group->space_info->disk_total
10534                         < block_group->key.offset * factor);
10535         }
10536         block_group->space_info->total_bytes -= block_group->key.offset;
10537         block_group->space_info->bytes_readonly -= block_group->key.offset;
10538         block_group->space_info->disk_total -= block_group->key.offset * factor;
10539
10540         spin_unlock(&block_group->space_info->lock);
10541
10542         memcpy(&key, &block_group->key, sizeof(key));
10543
10544         mutex_lock(&fs_info->chunk_mutex);
10545         if (!list_empty(&em->list)) {
10546                 /* We're in the transaction->pending_chunks list. */
10547                 free_extent_map(em);
10548         }
10549         spin_lock(&block_group->lock);
10550         block_group->removed = 1;
10551         /*
10552          * At this point trimming can't start on this block group, because we
10553          * removed the block group from the tree fs_info->block_group_cache_tree
10554          * so no one can't find it anymore and even if someone already got this
10555          * block group before we removed it from the rbtree, they have already
10556          * incremented block_group->trimming - if they didn't, they won't find
10557          * any free space entries because we already removed them all when we
10558          * called btrfs_remove_free_space_cache().
10559          *
10560          * And we must not remove the extent map from the fs_info->mapping_tree
10561          * to prevent the same logical address range and physical device space
10562          * ranges from being reused for a new block group. This is because our
10563          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10564          * completely transactionless, so while it is trimming a range the
10565          * currently running transaction might finish and a new one start,
10566          * allowing for new block groups to be created that can reuse the same
10567          * physical device locations unless we take this special care.
10568          *
10569          * There may also be an implicit trim operation if the file system
10570          * is mounted with -odiscard. The same protections must remain
10571          * in place until the extents have been discarded completely when
10572          * the transaction commit has completed.
10573          */
10574         remove_em = (atomic_read(&block_group->trimming) == 0);
10575         /*
10576          * Make sure a trimmer task always sees the em in the pinned_chunks list
10577          * if it sees block_group->removed == 1 (needs to lock block_group->lock
10578          * before checking block_group->removed).
10579          */
10580         if (!remove_em) {
10581                 /*
10582                  * Our em might be in trans->transaction->pending_chunks which
10583                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10584                  * and so is the fs_info->pinned_chunks list.
10585                  *
10586                  * So at this point we must be holding the chunk_mutex to avoid
10587                  * any races with chunk allocation (more specifically at
10588                  * volumes.c:contains_pending_extent()), to ensure it always
10589                  * sees the em, either in the pending_chunks list or in the
10590                  * pinned_chunks list.
10591                  */
10592                 list_move_tail(&em->list, &fs_info->pinned_chunks);
10593         }
10594         spin_unlock(&block_group->lock);
10595
10596         if (remove_em) {
10597                 struct extent_map_tree *em_tree;
10598
10599                 em_tree = &fs_info->mapping_tree.map_tree;
10600                 write_lock(&em_tree->lock);
10601                 /*
10602                  * The em might be in the pending_chunks list, so make sure the
10603                  * chunk mutex is locked, since remove_extent_mapping() will
10604                  * delete us from that list.
10605                  */
10606                 remove_extent_mapping(em_tree, em);
10607                 write_unlock(&em_tree->lock);
10608                 /* once for the tree */
10609                 free_extent_map(em);
10610         }
10611
10612         mutex_unlock(&fs_info->chunk_mutex);
10613
10614         ret = remove_block_group_free_space(trans, fs_info, block_group);
10615         if (ret)
10616                 goto out;
10617
10618         btrfs_put_block_group(block_group);
10619         btrfs_put_block_group(block_group);
10620
10621         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10622         if (ret > 0)
10623                 ret = -EIO;
10624         if (ret < 0)
10625                 goto out;
10626
10627         ret = btrfs_del_item(trans, root, path);
10628 out:
10629         btrfs_free_path(path);
10630         return ret;
10631 }
10632
10633 struct btrfs_trans_handle *
10634 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10635                                      const u64 chunk_offset)
10636 {
10637         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10638         struct extent_map *em;
10639         struct map_lookup *map;
10640         unsigned int num_items;
10641
10642         read_lock(&em_tree->lock);
10643         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10644         read_unlock(&em_tree->lock);
10645         ASSERT(em && em->start == chunk_offset);
10646
10647         /*
10648          * We need to reserve 3 + N units from the metadata space info in order
10649          * to remove a block group (done at btrfs_remove_chunk() and at
10650          * btrfs_remove_block_group()), which are used for:
10651          *
10652          * 1 unit for adding the free space inode's orphan (located in the tree
10653          * of tree roots).
10654          * 1 unit for deleting the block group item (located in the extent
10655          * tree).
10656          * 1 unit for deleting the free space item (located in tree of tree
10657          * roots).
10658          * N units for deleting N device extent items corresponding to each
10659          * stripe (located in the device tree).
10660          *
10661          * In order to remove a block group we also need to reserve units in the
10662          * system space info in order to update the chunk tree (update one or
10663          * more device items and remove one chunk item), but this is done at
10664          * btrfs_remove_chunk() through a call to check_system_chunk().
10665          */
10666         map = em->map_lookup;
10667         num_items = 3 + map->num_stripes;
10668         free_extent_map(em);
10669
10670         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10671                                                            num_items, 1);
10672 }
10673
10674 /*
10675  * Process the unused_bgs list and remove any that don't have any allocated
10676  * space inside of them.
10677  */
10678 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10679 {
10680         struct btrfs_block_group_cache *block_group;
10681         struct btrfs_space_info *space_info;
10682         struct btrfs_trans_handle *trans;
10683         int ret = 0;
10684
10685         if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
10686                 return;
10687
10688         spin_lock(&fs_info->unused_bgs_lock);
10689         while (!list_empty(&fs_info->unused_bgs)) {
10690                 u64 start, end;
10691                 int trimming;
10692
10693                 block_group = list_first_entry(&fs_info->unused_bgs,
10694                                                struct btrfs_block_group_cache,
10695                                                bg_list);
10696                 list_del_init(&block_group->bg_list);
10697
10698                 space_info = block_group->space_info;
10699
10700                 if (ret || btrfs_mixed_space_info(space_info)) {
10701                         btrfs_put_block_group(block_group);
10702                         continue;
10703                 }
10704                 spin_unlock(&fs_info->unused_bgs_lock);
10705
10706                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10707
10708                 /* Don't want to race with allocators so take the groups_sem */
10709                 down_write(&space_info->groups_sem);
10710                 spin_lock(&block_group->lock);
10711                 if (block_group->reserved ||
10712                     btrfs_block_group_used(&block_group->item) ||
10713                     block_group->ro ||
10714                     list_is_singular(&block_group->list)) {
10715                         /*
10716                          * We want to bail if we made new allocations or have
10717                          * outstanding allocations in this block group.  We do
10718                          * the ro check in case balance is currently acting on
10719                          * this block group.
10720                          */
10721                         spin_unlock(&block_group->lock);
10722                         up_write(&space_info->groups_sem);
10723                         goto next;
10724                 }
10725                 spin_unlock(&block_group->lock);
10726
10727                 /* We don't want to force the issue, only flip if it's ok. */
10728                 ret = inc_block_group_ro(block_group, 0);
10729                 up_write(&space_info->groups_sem);
10730                 if (ret < 0) {
10731                         ret = 0;
10732                         goto next;
10733                 }
10734
10735                 /*
10736                  * Want to do this before we do anything else so we can recover
10737                  * properly if we fail to join the transaction.
10738                  */
10739                 trans = btrfs_start_trans_remove_block_group(fs_info,
10740                                                      block_group->key.objectid);
10741                 if (IS_ERR(trans)) {
10742                         btrfs_dec_block_group_ro(block_group);
10743                         ret = PTR_ERR(trans);
10744                         goto next;
10745                 }
10746
10747                 /*
10748                  * We could have pending pinned extents for this block group,
10749                  * just delete them, we don't care about them anymore.
10750                  */
10751                 start = block_group->key.objectid;
10752                 end = start + block_group->key.offset - 1;
10753                 /*
10754                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10755                  * btrfs_finish_extent_commit(). If we are at transaction N,
10756                  * another task might be running finish_extent_commit() for the
10757                  * previous transaction N - 1, and have seen a range belonging
10758                  * to the block group in freed_extents[] before we were able to
10759                  * clear the whole block group range from freed_extents[]. This
10760                  * means that task can lookup for the block group after we
10761                  * unpinned it from freed_extents[] and removed it, leading to
10762                  * a BUG_ON() at btrfs_unpin_extent_range().
10763                  */
10764                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10765                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10766                                   EXTENT_DIRTY);
10767                 if (ret) {
10768                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10769                         btrfs_dec_block_group_ro(block_group);
10770                         goto end_trans;
10771                 }
10772                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10773                                   EXTENT_DIRTY);
10774                 if (ret) {
10775                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10776                         btrfs_dec_block_group_ro(block_group);
10777                         goto end_trans;
10778                 }
10779                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10780
10781                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10782                 spin_lock(&space_info->lock);
10783                 spin_lock(&block_group->lock);
10784
10785                 space_info->bytes_pinned -= block_group->pinned;
10786                 space_info->bytes_readonly += block_group->pinned;
10787                 percpu_counter_add(&space_info->total_bytes_pinned,
10788                                    -block_group->pinned);
10789                 block_group->pinned = 0;
10790
10791                 spin_unlock(&block_group->lock);
10792                 spin_unlock(&space_info->lock);
10793
10794                 /* DISCARD can flip during remount */
10795                 trimming = btrfs_test_opt(fs_info, DISCARD);
10796
10797                 /* Implicit trim during transaction commit. */
10798                 if (trimming)
10799                         btrfs_get_block_group_trimming(block_group);
10800
10801                 /*
10802                  * Btrfs_remove_chunk will abort the transaction if things go
10803                  * horribly wrong.
10804                  */
10805                 ret = btrfs_remove_chunk(trans, fs_info,
10806                                          block_group->key.objectid);
10807
10808                 if (ret) {
10809                         if (trimming)
10810                                 btrfs_put_block_group_trimming(block_group);
10811                         goto end_trans;
10812                 }
10813
10814                 /*
10815                  * If we're not mounted with -odiscard, we can just forget
10816                  * about this block group. Otherwise we'll need to wait
10817                  * until transaction commit to do the actual discard.
10818                  */
10819                 if (trimming) {
10820                         spin_lock(&fs_info->unused_bgs_lock);
10821                         /*
10822                          * A concurrent scrub might have added us to the list
10823                          * fs_info->unused_bgs, so use a list_move operation
10824                          * to add the block group to the deleted_bgs list.
10825                          */
10826                         list_move(&block_group->bg_list,
10827                                   &trans->transaction->deleted_bgs);
10828                         spin_unlock(&fs_info->unused_bgs_lock);
10829                         btrfs_get_block_group(block_group);
10830                 }
10831 end_trans:
10832                 btrfs_end_transaction(trans);
10833 next:
10834                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10835                 btrfs_put_block_group(block_group);
10836                 spin_lock(&fs_info->unused_bgs_lock);
10837         }
10838         spin_unlock(&fs_info->unused_bgs_lock);
10839 }
10840
10841 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10842 {
10843         struct btrfs_space_info *space_info;
10844         struct btrfs_super_block *disk_super;
10845         u64 features;
10846         u64 flags;
10847         int mixed = 0;
10848         int ret;
10849
10850         disk_super = fs_info->super_copy;
10851         if (!btrfs_super_root(disk_super))
10852                 return -EINVAL;
10853
10854         features = btrfs_super_incompat_flags(disk_super);
10855         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10856                 mixed = 1;
10857
10858         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10859         ret = create_space_info(fs_info, flags, &space_info);
10860         if (ret)
10861                 goto out;
10862
10863         if (mixed) {
10864                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10865                 ret = create_space_info(fs_info, flags, &space_info);
10866         } else {
10867                 flags = BTRFS_BLOCK_GROUP_METADATA;
10868                 ret = create_space_info(fs_info, flags, &space_info);
10869                 if (ret)
10870                         goto out;
10871
10872                 flags = BTRFS_BLOCK_GROUP_DATA;
10873                 ret = create_space_info(fs_info, flags, &space_info);
10874         }
10875 out:
10876         return ret;
10877 }
10878
10879 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
10880                                    u64 start, u64 end)
10881 {
10882         return unpin_extent_range(fs_info, start, end, false);
10883 }
10884
10885 /*
10886  * It used to be that old block groups would be left around forever.
10887  * Iterating over them would be enough to trim unused space.  Since we
10888  * now automatically remove them, we also need to iterate over unallocated
10889  * space.
10890  *
10891  * We don't want a transaction for this since the discard may take a
10892  * substantial amount of time.  We don't require that a transaction be
10893  * running, but we do need to take a running transaction into account
10894  * to ensure that we're not discarding chunks that were released in
10895  * the current transaction.
10896  *
10897  * Holding the chunks lock will prevent other threads from allocating
10898  * or releasing chunks, but it won't prevent a running transaction
10899  * from committing and releasing the memory that the pending chunks
10900  * list head uses.  For that, we need to take a reference to the
10901  * transaction.
10902  */
10903 static int btrfs_trim_free_extents(struct btrfs_device *device,
10904                                    u64 minlen, u64 *trimmed)
10905 {
10906         u64 start = 0, len = 0;
10907         int ret;
10908
10909         *trimmed = 0;
10910
10911         /* Not writeable = nothing to do. */
10912         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
10913                 return 0;
10914
10915         /* No free space = nothing to do. */
10916         if (device->total_bytes <= device->bytes_used)
10917                 return 0;
10918
10919         ret = 0;
10920
10921         while (1) {
10922                 struct btrfs_fs_info *fs_info = device->fs_info;
10923                 struct btrfs_transaction *trans;
10924                 u64 bytes;
10925
10926                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10927                 if (ret)
10928                         return ret;
10929
10930                 down_read(&fs_info->commit_root_sem);
10931
10932                 spin_lock(&fs_info->trans_lock);
10933                 trans = fs_info->running_transaction;
10934                 if (trans)
10935                         refcount_inc(&trans->use_count);
10936                 spin_unlock(&fs_info->trans_lock);
10937
10938                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10939                                                  &start, &len);
10940                 if (trans)
10941                         btrfs_put_transaction(trans);
10942
10943                 if (ret) {
10944                         up_read(&fs_info->commit_root_sem);
10945                         mutex_unlock(&fs_info->chunk_mutex);
10946                         if (ret == -ENOSPC)
10947                                 ret = 0;
10948                         break;
10949                 }
10950
10951                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10952                 up_read(&fs_info->commit_root_sem);
10953                 mutex_unlock(&fs_info->chunk_mutex);
10954
10955                 if (ret)
10956                         break;
10957
10958                 start += len;
10959                 *trimmed += bytes;
10960
10961                 if (fatal_signal_pending(current)) {
10962                         ret = -ERESTARTSYS;
10963                         break;
10964                 }
10965
10966                 cond_resched();
10967         }
10968
10969         return ret;
10970 }
10971
10972 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
10973 {
10974         struct btrfs_block_group_cache *cache = NULL;
10975         struct btrfs_device *device;
10976         struct list_head *devices;
10977         u64 group_trimmed;
10978         u64 start;
10979         u64 end;
10980         u64 trimmed = 0;
10981         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10982         int ret = 0;
10983
10984         /*
10985          * try to trim all FS space, our block group may start from non-zero.
10986          */
10987         if (range->len == total_bytes)
10988                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10989         else
10990                 cache = btrfs_lookup_block_group(fs_info, range->start);
10991
10992         while (cache) {
10993                 if (cache->key.objectid >= (range->start + range->len)) {
10994                         btrfs_put_block_group(cache);
10995                         break;
10996                 }
10997
10998                 start = max(range->start, cache->key.objectid);
10999                 end = min(range->start + range->len,
11000                                 cache->key.objectid + cache->key.offset);
11001
11002                 if (end - start >= range->minlen) {
11003                         if (!block_group_cache_done(cache)) {
11004                                 ret = cache_block_group(cache, 0);
11005                                 if (ret) {
11006                                         btrfs_put_block_group(cache);
11007                                         break;
11008                                 }
11009                                 ret = wait_block_group_cache_done(cache);
11010                                 if (ret) {
11011                                         btrfs_put_block_group(cache);
11012                                         break;
11013                                 }
11014                         }
11015                         ret = btrfs_trim_block_group(cache,
11016                                                      &group_trimmed,
11017                                                      start,
11018                                                      end,
11019                                                      range->minlen);
11020
11021                         trimmed += group_trimmed;
11022                         if (ret) {
11023                                 btrfs_put_block_group(cache);
11024                                 break;
11025                         }
11026                 }
11027
11028                 cache = next_block_group(fs_info, cache);
11029         }
11030
11031         mutex_lock(&fs_info->fs_devices->device_list_mutex);
11032         devices = &fs_info->fs_devices->alloc_list;
11033         list_for_each_entry(device, devices, dev_alloc_list) {
11034                 ret = btrfs_trim_free_extents(device, range->minlen,
11035                                               &group_trimmed);
11036                 if (ret)
11037                         break;
11038
11039                 trimmed += group_trimmed;
11040         }
11041         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
11042
11043         range->len = trimmed;
11044         return ret;
11045 }
11046
11047 /*
11048  * btrfs_{start,end}_write_no_snapshotting() are similar to
11049  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
11050  * data into the page cache through nocow before the subvolume is snapshoted,
11051  * but flush the data into disk after the snapshot creation, or to prevent
11052  * operations while snapshotting is ongoing and that cause the snapshot to be
11053  * inconsistent (writes followed by expanding truncates for example).
11054  */
11055 void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
11056 {
11057         percpu_counter_dec(&root->subv_writers->counter);
11058         /*
11059          * Make sure counter is updated before we wake up waiters.
11060          */
11061         smp_mb();
11062         if (waitqueue_active(&root->subv_writers->wait))
11063                 wake_up(&root->subv_writers->wait);
11064 }
11065
11066 int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
11067 {
11068         if (atomic_read(&root->will_be_snapshotted))
11069                 return 0;
11070
11071         percpu_counter_inc(&root->subv_writers->counter);
11072         /*
11073          * Make sure counter is updated before we check for snapshot creation.
11074          */
11075         smp_mb();
11076         if (atomic_read(&root->will_be_snapshotted)) {
11077                 btrfs_end_write_no_snapshotting(root);
11078                 return 0;
11079         }
11080         return 1;
11081 }
11082
11083 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11084 {
11085         while (true) {
11086                 int ret;
11087
11088                 ret = btrfs_start_write_no_snapshotting(root);
11089                 if (ret)
11090                         break;
11091                 wait_var_event(&root->will_be_snapshotted,
11092                                !atomic_read(&root->will_be_snapshotted));
11093         }
11094 }