btrfs: Remove 'trans' argument from find_free_dev_extent(_start)
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
19 #include "tree-log.h"
20 #include "disk-io.h"
21 #include "print-tree.h"
22 #include "volumes.h"
23 #include "raid56.h"
24 #include "locking.h"
25 #include "free-space-cache.h"
26 #include "free-space-tree.h"
27 #include "math.h"
28 #include "sysfs.h"
29 #include "qgroup.h"
30 #include "ref-verify.h"
31
32 #undef SCRAMBLE_DELAYED_REFS
33
34 /*
35  * control flags for do_chunk_alloc's force field
36  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
37  * if we really need one.
38  *
39  * CHUNK_ALLOC_LIMITED means to only try and allocate one
40  * if we have very few chunks already allocated.  This is
41  * used as part of the clustering code to help make sure
42  * we have a good pool of storage to cluster in, without
43  * filling the FS with empty chunks
44  *
45  * CHUNK_ALLOC_FORCE means it must try to allocate one
46  *
47  */
48 enum {
49         CHUNK_ALLOC_NO_FORCE = 0,
50         CHUNK_ALLOC_LIMITED = 1,
51         CHUNK_ALLOC_FORCE = 2,
52 };
53
54 /*
55  * Declare a helper function to detect underflow of various space info members
56  */
57 #define DECLARE_SPACE_INFO_UPDATE(name)                                 \
58 static inline void update_##name(struct btrfs_space_info *sinfo,        \
59                                  s64 bytes)                             \
60 {                                                                       \
61         if (bytes < 0 && sinfo->name < -bytes) {                        \
62                 WARN_ON(1);                                             \
63                 sinfo->name = 0;                                        \
64                 return;                                                 \
65         }                                                               \
66         sinfo->name += bytes;                                           \
67 }
68
69 DECLARE_SPACE_INFO_UPDATE(bytes_may_use);
70 DECLARE_SPACE_INFO_UPDATE(bytes_pinned);
71
72 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
73                                struct btrfs_delayed_ref_node *node, u64 parent,
74                                u64 root_objectid, u64 owner_objectid,
75                                u64 owner_offset, int refs_to_drop,
76                                struct btrfs_delayed_extent_op *extra_op);
77 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
78                                     struct extent_buffer *leaf,
79                                     struct btrfs_extent_item *ei);
80 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
81                                       u64 parent, u64 root_objectid,
82                                       u64 flags, u64 owner, u64 offset,
83                                       struct btrfs_key *ins, int ref_mod);
84 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
85                                      struct btrfs_delayed_ref_node *node,
86                                      struct btrfs_delayed_extent_op *extent_op);
87 static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
88                           int force);
89 static int find_next_key(struct btrfs_path *path, int level,
90                          struct btrfs_key *key);
91 static void dump_space_info(struct btrfs_fs_info *fs_info,
92                             struct btrfs_space_info *info, u64 bytes,
93                             int dump_block_groups);
94 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
95                                u64 num_bytes);
96 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
97                                      struct btrfs_space_info *space_info,
98                                      u64 num_bytes);
99 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
100                                      struct btrfs_space_info *space_info,
101                                      u64 num_bytes);
102
103 static noinline int
104 block_group_cache_done(struct btrfs_block_group_cache *cache)
105 {
106         smp_mb();
107         return cache->cached == BTRFS_CACHE_FINISHED ||
108                 cache->cached == BTRFS_CACHE_ERROR;
109 }
110
111 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
112 {
113         return (cache->flags & bits) == bits;
114 }
115
116 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
117 {
118         atomic_inc(&cache->count);
119 }
120
121 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
122 {
123         if (atomic_dec_and_test(&cache->count)) {
124                 WARN_ON(cache->pinned > 0);
125                 WARN_ON(cache->reserved > 0);
126
127                 /*
128                  * If not empty, someone is still holding mutex of
129                  * full_stripe_lock, which can only be released by caller.
130                  * And it will definitely cause use-after-free when caller
131                  * tries to release full stripe lock.
132                  *
133                  * No better way to resolve, but only to warn.
134                  */
135                 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
136                 kfree(cache->free_space_ctl);
137                 kfree(cache);
138         }
139 }
140
141 /*
142  * this adds the block group to the fs_info rb tree for the block group
143  * cache
144  */
145 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
146                                 struct btrfs_block_group_cache *block_group)
147 {
148         struct rb_node **p;
149         struct rb_node *parent = NULL;
150         struct btrfs_block_group_cache *cache;
151
152         spin_lock(&info->block_group_cache_lock);
153         p = &info->block_group_cache_tree.rb_node;
154
155         while (*p) {
156                 parent = *p;
157                 cache = rb_entry(parent, struct btrfs_block_group_cache,
158                                  cache_node);
159                 if (block_group->key.objectid < cache->key.objectid) {
160                         p = &(*p)->rb_left;
161                 } else if (block_group->key.objectid > cache->key.objectid) {
162                         p = &(*p)->rb_right;
163                 } else {
164                         spin_unlock(&info->block_group_cache_lock);
165                         return -EEXIST;
166                 }
167         }
168
169         rb_link_node(&block_group->cache_node, parent, p);
170         rb_insert_color(&block_group->cache_node,
171                         &info->block_group_cache_tree);
172
173         if (info->first_logical_byte > block_group->key.objectid)
174                 info->first_logical_byte = block_group->key.objectid;
175
176         spin_unlock(&info->block_group_cache_lock);
177
178         return 0;
179 }
180
181 /*
182  * This will return the block group at or after bytenr if contains is 0, else
183  * it will return the block group that contains the bytenr
184  */
185 static struct btrfs_block_group_cache *
186 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
187                               int contains)
188 {
189         struct btrfs_block_group_cache *cache, *ret = NULL;
190         struct rb_node *n;
191         u64 end, start;
192
193         spin_lock(&info->block_group_cache_lock);
194         n = info->block_group_cache_tree.rb_node;
195
196         while (n) {
197                 cache = rb_entry(n, struct btrfs_block_group_cache,
198                                  cache_node);
199                 end = cache->key.objectid + cache->key.offset - 1;
200                 start = cache->key.objectid;
201
202                 if (bytenr < start) {
203                         if (!contains && (!ret || start < ret->key.objectid))
204                                 ret = cache;
205                         n = n->rb_left;
206                 } else if (bytenr > start) {
207                         if (contains && bytenr <= end) {
208                                 ret = cache;
209                                 break;
210                         }
211                         n = n->rb_right;
212                 } else {
213                         ret = cache;
214                         break;
215                 }
216         }
217         if (ret) {
218                 btrfs_get_block_group(ret);
219                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
220                         info->first_logical_byte = ret->key.objectid;
221         }
222         spin_unlock(&info->block_group_cache_lock);
223
224         return ret;
225 }
226
227 static int add_excluded_extent(struct btrfs_fs_info *fs_info,
228                                u64 start, u64 num_bytes)
229 {
230         u64 end = start + num_bytes - 1;
231         set_extent_bits(&fs_info->freed_extents[0],
232                         start, end, EXTENT_UPTODATE);
233         set_extent_bits(&fs_info->freed_extents[1],
234                         start, end, EXTENT_UPTODATE);
235         return 0;
236 }
237
238 static void free_excluded_extents(struct btrfs_block_group_cache *cache)
239 {
240         struct btrfs_fs_info *fs_info = cache->fs_info;
241         u64 start, end;
242
243         start = cache->key.objectid;
244         end = start + cache->key.offset - 1;
245
246         clear_extent_bits(&fs_info->freed_extents[0],
247                           start, end, EXTENT_UPTODATE);
248         clear_extent_bits(&fs_info->freed_extents[1],
249                           start, end, EXTENT_UPTODATE);
250 }
251
252 static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
253 {
254         struct btrfs_fs_info *fs_info = cache->fs_info;
255         u64 bytenr;
256         u64 *logical;
257         int stripe_len;
258         int i, nr, ret;
259
260         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
261                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
262                 cache->bytes_super += stripe_len;
263                 ret = add_excluded_extent(fs_info, cache->key.objectid,
264                                           stripe_len);
265                 if (ret)
266                         return ret;
267         }
268
269         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
270                 bytenr = btrfs_sb_offset(i);
271                 ret = btrfs_rmap_block(fs_info, cache->key.objectid,
272                                        bytenr, &logical, &nr, &stripe_len);
273                 if (ret)
274                         return ret;
275
276                 while (nr--) {
277                         u64 start, len;
278
279                         if (logical[nr] > cache->key.objectid +
280                             cache->key.offset)
281                                 continue;
282
283                         if (logical[nr] + stripe_len <= cache->key.objectid)
284                                 continue;
285
286                         start = logical[nr];
287                         if (start < cache->key.objectid) {
288                                 start = cache->key.objectid;
289                                 len = (logical[nr] + stripe_len) - start;
290                         } else {
291                                 len = min_t(u64, stripe_len,
292                                             cache->key.objectid +
293                                             cache->key.offset - start);
294                         }
295
296                         cache->bytes_super += len;
297                         ret = add_excluded_extent(fs_info, start, len);
298                         if (ret) {
299                                 kfree(logical);
300                                 return ret;
301                         }
302                 }
303
304                 kfree(logical);
305         }
306         return 0;
307 }
308
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
311 {
312         struct btrfs_caching_control *ctl;
313
314         spin_lock(&cache->lock);
315         if (!cache->caching_ctl) {
316                 spin_unlock(&cache->lock);
317                 return NULL;
318         }
319
320         ctl = cache->caching_ctl;
321         refcount_inc(&ctl->count);
322         spin_unlock(&cache->lock);
323         return ctl;
324 }
325
326 static void put_caching_control(struct btrfs_caching_control *ctl)
327 {
328         if (refcount_dec_and_test(&ctl->count))
329                 kfree(ctl);
330 }
331
332 #ifdef CONFIG_BTRFS_DEBUG
333 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
334 {
335         struct btrfs_fs_info *fs_info = block_group->fs_info;
336         u64 start = block_group->key.objectid;
337         u64 len = block_group->key.offset;
338         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
339                 fs_info->nodesize : fs_info->sectorsize;
340         u64 step = chunk << 1;
341
342         while (len > chunk) {
343                 btrfs_remove_free_space(block_group, start, chunk);
344                 start += step;
345                 if (len < step)
346                         len = 0;
347                 else
348                         len -= step;
349         }
350 }
351 #endif
352
353 /*
354  * this is only called by cache_block_group, since we could have freed extents
355  * we need to check the pinned_extents for any extents that can't be used yet
356  * since their free space will be released as soon as the transaction commits.
357  */
358 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
359                        u64 start, u64 end)
360 {
361         struct btrfs_fs_info *info = block_group->fs_info;
362         u64 extent_start, extent_end, size, total_added = 0;
363         int ret;
364
365         while (start < end) {
366                 ret = find_first_extent_bit(info->pinned_extents, start,
367                                             &extent_start, &extent_end,
368                                             EXTENT_DIRTY | EXTENT_UPTODATE,
369                                             NULL);
370                 if (ret)
371                         break;
372
373                 if (extent_start <= start) {
374                         start = extent_end + 1;
375                 } else if (extent_start > start && extent_start < end) {
376                         size = extent_start - start;
377                         total_added += size;
378                         ret = btrfs_add_free_space(block_group, start,
379                                                    size);
380                         BUG_ON(ret); /* -ENOMEM or logic error */
381                         start = extent_end + 1;
382                 } else {
383                         break;
384                 }
385         }
386
387         if (start < end) {
388                 size = end - start;
389                 total_added += size;
390                 ret = btrfs_add_free_space(block_group, start, size);
391                 BUG_ON(ret); /* -ENOMEM or logic error */
392         }
393
394         return total_added;
395 }
396
397 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
398 {
399         struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
400         struct btrfs_fs_info *fs_info = block_group->fs_info;
401         struct btrfs_root *extent_root = fs_info->extent_root;
402         struct btrfs_path *path;
403         struct extent_buffer *leaf;
404         struct btrfs_key key;
405         u64 total_found = 0;
406         u64 last = 0;
407         u32 nritems;
408         int ret;
409         bool wakeup = true;
410
411         path = btrfs_alloc_path();
412         if (!path)
413                 return -ENOMEM;
414
415         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
416
417 #ifdef CONFIG_BTRFS_DEBUG
418         /*
419          * If we're fragmenting we don't want to make anybody think we can
420          * allocate from this block group until we've had a chance to fragment
421          * the free space.
422          */
423         if (btrfs_should_fragment_free_space(block_group))
424                 wakeup = false;
425 #endif
426         /*
427          * We don't want to deadlock with somebody trying to allocate a new
428          * extent for the extent root while also trying to search the extent
429          * root to add free space.  So we skip locking and search the commit
430          * root, since its read-only
431          */
432         path->skip_locking = 1;
433         path->search_commit_root = 1;
434         path->reada = READA_FORWARD;
435
436         key.objectid = last;
437         key.offset = 0;
438         key.type = BTRFS_EXTENT_ITEM_KEY;
439
440 next:
441         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
442         if (ret < 0)
443                 goto out;
444
445         leaf = path->nodes[0];
446         nritems = btrfs_header_nritems(leaf);
447
448         while (1) {
449                 if (btrfs_fs_closing(fs_info) > 1) {
450                         last = (u64)-1;
451                         break;
452                 }
453
454                 if (path->slots[0] < nritems) {
455                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
456                 } else {
457                         ret = find_next_key(path, 0, &key);
458                         if (ret)
459                                 break;
460
461                         if (need_resched() ||
462                             rwsem_is_contended(&fs_info->commit_root_sem)) {
463                                 if (wakeup)
464                                         caching_ctl->progress = last;
465                                 btrfs_release_path(path);
466                                 up_read(&fs_info->commit_root_sem);
467                                 mutex_unlock(&caching_ctl->mutex);
468                                 cond_resched();
469                                 mutex_lock(&caching_ctl->mutex);
470                                 down_read(&fs_info->commit_root_sem);
471                                 goto next;
472                         }
473
474                         ret = btrfs_next_leaf(extent_root, path);
475                         if (ret < 0)
476                                 goto out;
477                         if (ret)
478                                 break;
479                         leaf = path->nodes[0];
480                         nritems = btrfs_header_nritems(leaf);
481                         continue;
482                 }
483
484                 if (key.objectid < last) {
485                         key.objectid = last;
486                         key.offset = 0;
487                         key.type = BTRFS_EXTENT_ITEM_KEY;
488
489                         if (wakeup)
490                                 caching_ctl->progress = last;
491                         btrfs_release_path(path);
492                         goto next;
493                 }
494
495                 if (key.objectid < block_group->key.objectid) {
496                         path->slots[0]++;
497                         continue;
498                 }
499
500                 if (key.objectid >= block_group->key.objectid +
501                     block_group->key.offset)
502                         break;
503
504                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
505                     key.type == BTRFS_METADATA_ITEM_KEY) {
506                         total_found += add_new_free_space(block_group, last,
507                                                           key.objectid);
508                         if (key.type == BTRFS_METADATA_ITEM_KEY)
509                                 last = key.objectid +
510                                         fs_info->nodesize;
511                         else
512                                 last = key.objectid + key.offset;
513
514                         if (total_found > CACHING_CTL_WAKE_UP) {
515                                 total_found = 0;
516                                 if (wakeup)
517                                         wake_up(&caching_ctl->wait);
518                         }
519                 }
520                 path->slots[0]++;
521         }
522         ret = 0;
523
524         total_found += add_new_free_space(block_group, last,
525                                           block_group->key.objectid +
526                                           block_group->key.offset);
527         caching_ctl->progress = (u64)-1;
528
529 out:
530         btrfs_free_path(path);
531         return ret;
532 }
533
534 static noinline void caching_thread(struct btrfs_work *work)
535 {
536         struct btrfs_block_group_cache *block_group;
537         struct btrfs_fs_info *fs_info;
538         struct btrfs_caching_control *caching_ctl;
539         int ret;
540
541         caching_ctl = container_of(work, struct btrfs_caching_control, work);
542         block_group = caching_ctl->block_group;
543         fs_info = block_group->fs_info;
544
545         mutex_lock(&caching_ctl->mutex);
546         down_read(&fs_info->commit_root_sem);
547
548         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
549                 ret = load_free_space_tree(caching_ctl);
550         else
551                 ret = load_extent_tree_free(caching_ctl);
552
553         spin_lock(&block_group->lock);
554         block_group->caching_ctl = NULL;
555         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
556         spin_unlock(&block_group->lock);
557
558 #ifdef CONFIG_BTRFS_DEBUG
559         if (btrfs_should_fragment_free_space(block_group)) {
560                 u64 bytes_used;
561
562                 spin_lock(&block_group->space_info->lock);
563                 spin_lock(&block_group->lock);
564                 bytes_used = block_group->key.offset -
565                         btrfs_block_group_used(&block_group->item);
566                 block_group->space_info->bytes_used += bytes_used >> 1;
567                 spin_unlock(&block_group->lock);
568                 spin_unlock(&block_group->space_info->lock);
569                 fragment_free_space(block_group);
570         }
571 #endif
572
573         caching_ctl->progress = (u64)-1;
574
575         up_read(&fs_info->commit_root_sem);
576         free_excluded_extents(block_group);
577         mutex_unlock(&caching_ctl->mutex);
578
579         wake_up(&caching_ctl->wait);
580
581         put_caching_control(caching_ctl);
582         btrfs_put_block_group(block_group);
583 }
584
585 static int cache_block_group(struct btrfs_block_group_cache *cache,
586                              int load_cache_only)
587 {
588         DEFINE_WAIT(wait);
589         struct btrfs_fs_info *fs_info = cache->fs_info;
590         struct btrfs_caching_control *caching_ctl;
591         int ret = 0;
592
593         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
594         if (!caching_ctl)
595                 return -ENOMEM;
596
597         INIT_LIST_HEAD(&caching_ctl->list);
598         mutex_init(&caching_ctl->mutex);
599         init_waitqueue_head(&caching_ctl->wait);
600         caching_ctl->block_group = cache;
601         caching_ctl->progress = cache->key.objectid;
602         refcount_set(&caching_ctl->count, 1);
603         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
604                         caching_thread, NULL, NULL);
605
606         spin_lock(&cache->lock);
607         /*
608          * This should be a rare occasion, but this could happen I think in the
609          * case where one thread starts to load the space cache info, and then
610          * some other thread starts a transaction commit which tries to do an
611          * allocation while the other thread is still loading the space cache
612          * info.  The previous loop should have kept us from choosing this block
613          * group, but if we've moved to the state where we will wait on caching
614          * block groups we need to first check if we're doing a fast load here,
615          * so we can wait for it to finish, otherwise we could end up allocating
616          * from a block group who's cache gets evicted for one reason or
617          * another.
618          */
619         while (cache->cached == BTRFS_CACHE_FAST) {
620                 struct btrfs_caching_control *ctl;
621
622                 ctl = cache->caching_ctl;
623                 refcount_inc(&ctl->count);
624                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
625                 spin_unlock(&cache->lock);
626
627                 schedule();
628
629                 finish_wait(&ctl->wait, &wait);
630                 put_caching_control(ctl);
631                 spin_lock(&cache->lock);
632         }
633
634         if (cache->cached != BTRFS_CACHE_NO) {
635                 spin_unlock(&cache->lock);
636                 kfree(caching_ctl);
637                 return 0;
638         }
639         WARN_ON(cache->caching_ctl);
640         cache->caching_ctl = caching_ctl;
641         cache->cached = BTRFS_CACHE_FAST;
642         spin_unlock(&cache->lock);
643
644         if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
645                 mutex_lock(&caching_ctl->mutex);
646                 ret = load_free_space_cache(fs_info, cache);
647
648                 spin_lock(&cache->lock);
649                 if (ret == 1) {
650                         cache->caching_ctl = NULL;
651                         cache->cached = BTRFS_CACHE_FINISHED;
652                         cache->last_byte_to_unpin = (u64)-1;
653                         caching_ctl->progress = (u64)-1;
654                 } else {
655                         if (load_cache_only) {
656                                 cache->caching_ctl = NULL;
657                                 cache->cached = BTRFS_CACHE_NO;
658                         } else {
659                                 cache->cached = BTRFS_CACHE_STARTED;
660                                 cache->has_caching_ctl = 1;
661                         }
662                 }
663                 spin_unlock(&cache->lock);
664 #ifdef CONFIG_BTRFS_DEBUG
665                 if (ret == 1 &&
666                     btrfs_should_fragment_free_space(cache)) {
667                         u64 bytes_used;
668
669                         spin_lock(&cache->space_info->lock);
670                         spin_lock(&cache->lock);
671                         bytes_used = cache->key.offset -
672                                 btrfs_block_group_used(&cache->item);
673                         cache->space_info->bytes_used += bytes_used >> 1;
674                         spin_unlock(&cache->lock);
675                         spin_unlock(&cache->space_info->lock);
676                         fragment_free_space(cache);
677                 }
678 #endif
679                 mutex_unlock(&caching_ctl->mutex);
680
681                 wake_up(&caching_ctl->wait);
682                 if (ret == 1) {
683                         put_caching_control(caching_ctl);
684                         free_excluded_extents(cache);
685                         return 0;
686                 }
687         } else {
688                 /*
689                  * We're either using the free space tree or no caching at all.
690                  * Set cached to the appropriate value and wakeup any waiters.
691                  */
692                 spin_lock(&cache->lock);
693                 if (load_cache_only) {
694                         cache->caching_ctl = NULL;
695                         cache->cached = BTRFS_CACHE_NO;
696                 } else {
697                         cache->cached = BTRFS_CACHE_STARTED;
698                         cache->has_caching_ctl = 1;
699                 }
700                 spin_unlock(&cache->lock);
701                 wake_up(&caching_ctl->wait);
702         }
703
704         if (load_cache_only) {
705                 put_caching_control(caching_ctl);
706                 return 0;
707         }
708
709         down_write(&fs_info->commit_root_sem);
710         refcount_inc(&caching_ctl->count);
711         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
712         up_write(&fs_info->commit_root_sem);
713
714         btrfs_get_block_group(cache);
715
716         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
717
718         return ret;
719 }
720
721 /*
722  * return the block group that starts at or after bytenr
723  */
724 static struct btrfs_block_group_cache *
725 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
726 {
727         return block_group_cache_tree_search(info, bytenr, 0);
728 }
729
730 /*
731  * return the block group that contains the given bytenr
732  */
733 struct btrfs_block_group_cache *btrfs_lookup_block_group(
734                                                  struct btrfs_fs_info *info,
735                                                  u64 bytenr)
736 {
737         return block_group_cache_tree_search(info, bytenr, 1);
738 }
739
740 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
741                                                   u64 flags)
742 {
743         struct list_head *head = &info->space_info;
744         struct btrfs_space_info *found;
745
746         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
747
748         rcu_read_lock();
749         list_for_each_entry_rcu(found, head, list) {
750                 if (found->flags & flags) {
751                         rcu_read_unlock();
752                         return found;
753                 }
754         }
755         rcu_read_unlock();
756         return NULL;
757 }
758
759 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
760                              bool metadata, u64 root_objectid)
761 {
762         struct btrfs_space_info *space_info;
763         u64 flags;
764
765         if (metadata) {
766                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
767                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
768                 else
769                         flags = BTRFS_BLOCK_GROUP_METADATA;
770         } else {
771                 flags = BTRFS_BLOCK_GROUP_DATA;
772         }
773
774         space_info = __find_space_info(fs_info, flags);
775         ASSERT(space_info);
776         percpu_counter_add_batch(&space_info->total_bytes_pinned, num_bytes,
777                     BTRFS_TOTAL_BYTES_PINNED_BATCH);
778 }
779
780 /*
781  * after adding space to the filesystem, we need to clear the full flags
782  * on all the space infos.
783  */
784 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
785 {
786         struct list_head *head = &info->space_info;
787         struct btrfs_space_info *found;
788
789         rcu_read_lock();
790         list_for_each_entry_rcu(found, head, list)
791                 found->full = 0;
792         rcu_read_unlock();
793 }
794
795 /* simple helper to search for an existing data extent at a given offset */
796 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
797 {
798         int ret;
799         struct btrfs_key key;
800         struct btrfs_path *path;
801
802         path = btrfs_alloc_path();
803         if (!path)
804                 return -ENOMEM;
805
806         key.objectid = start;
807         key.offset = len;
808         key.type = BTRFS_EXTENT_ITEM_KEY;
809         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
810         btrfs_free_path(path);
811         return ret;
812 }
813
814 /*
815  * helper function to lookup reference count and flags of a tree block.
816  *
817  * the head node for delayed ref is used to store the sum of all the
818  * reference count modifications queued up in the rbtree. the head
819  * node may also store the extent flags to set. This way you can check
820  * to see what the reference count and extent flags would be if all of
821  * the delayed refs are not processed.
822  */
823 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
824                              struct btrfs_fs_info *fs_info, u64 bytenr,
825                              u64 offset, int metadata, u64 *refs, u64 *flags)
826 {
827         struct btrfs_delayed_ref_head *head;
828         struct btrfs_delayed_ref_root *delayed_refs;
829         struct btrfs_path *path;
830         struct btrfs_extent_item *ei;
831         struct extent_buffer *leaf;
832         struct btrfs_key key;
833         u32 item_size;
834         u64 num_refs;
835         u64 extent_flags;
836         int ret;
837
838         /*
839          * If we don't have skinny metadata, don't bother doing anything
840          * different
841          */
842         if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
843                 offset = fs_info->nodesize;
844                 metadata = 0;
845         }
846
847         path = btrfs_alloc_path();
848         if (!path)
849                 return -ENOMEM;
850
851         if (!trans) {
852                 path->skip_locking = 1;
853                 path->search_commit_root = 1;
854         }
855
856 search_again:
857         key.objectid = bytenr;
858         key.offset = offset;
859         if (metadata)
860                 key.type = BTRFS_METADATA_ITEM_KEY;
861         else
862                 key.type = BTRFS_EXTENT_ITEM_KEY;
863
864         ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
865         if (ret < 0)
866                 goto out_free;
867
868         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
869                 if (path->slots[0]) {
870                         path->slots[0]--;
871                         btrfs_item_key_to_cpu(path->nodes[0], &key,
872                                               path->slots[0]);
873                         if (key.objectid == bytenr &&
874                             key.type == BTRFS_EXTENT_ITEM_KEY &&
875                             key.offset == fs_info->nodesize)
876                                 ret = 0;
877                 }
878         }
879
880         if (ret == 0) {
881                 leaf = path->nodes[0];
882                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
883                 if (item_size >= sizeof(*ei)) {
884                         ei = btrfs_item_ptr(leaf, path->slots[0],
885                                             struct btrfs_extent_item);
886                         num_refs = btrfs_extent_refs(leaf, ei);
887                         extent_flags = btrfs_extent_flags(leaf, ei);
888                 } else {
889                         ret = -EINVAL;
890                         btrfs_print_v0_err(fs_info);
891                         if (trans)
892                                 btrfs_abort_transaction(trans, ret);
893                         else
894                                 btrfs_handle_fs_error(fs_info, ret, NULL);
895
896                         goto out_free;
897                 }
898
899                 BUG_ON(num_refs == 0);
900         } else {
901                 num_refs = 0;
902                 extent_flags = 0;
903                 ret = 0;
904         }
905
906         if (!trans)
907                 goto out;
908
909         delayed_refs = &trans->transaction->delayed_refs;
910         spin_lock(&delayed_refs->lock);
911         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
912         if (head) {
913                 if (!mutex_trylock(&head->mutex)) {
914                         refcount_inc(&head->refs);
915                         spin_unlock(&delayed_refs->lock);
916
917                         btrfs_release_path(path);
918
919                         /*
920                          * Mutex was contended, block until it's released and try
921                          * again
922                          */
923                         mutex_lock(&head->mutex);
924                         mutex_unlock(&head->mutex);
925                         btrfs_put_delayed_ref_head(head);
926                         goto search_again;
927                 }
928                 spin_lock(&head->lock);
929                 if (head->extent_op && head->extent_op->update_flags)
930                         extent_flags |= head->extent_op->flags_to_set;
931                 else
932                         BUG_ON(num_refs == 0);
933
934                 num_refs += head->ref_mod;
935                 spin_unlock(&head->lock);
936                 mutex_unlock(&head->mutex);
937         }
938         spin_unlock(&delayed_refs->lock);
939 out:
940         WARN_ON(num_refs == 0);
941         if (refs)
942                 *refs = num_refs;
943         if (flags)
944                 *flags = extent_flags;
945 out_free:
946         btrfs_free_path(path);
947         return ret;
948 }
949
950 /*
951  * Back reference rules.  Back refs have three main goals:
952  *
953  * 1) differentiate between all holders of references to an extent so that
954  *    when a reference is dropped we can make sure it was a valid reference
955  *    before freeing the extent.
956  *
957  * 2) Provide enough information to quickly find the holders of an extent
958  *    if we notice a given block is corrupted or bad.
959  *
960  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
961  *    maintenance.  This is actually the same as #2, but with a slightly
962  *    different use case.
963  *
964  * There are two kinds of back refs. The implicit back refs is optimized
965  * for pointers in non-shared tree blocks. For a given pointer in a block,
966  * back refs of this kind provide information about the block's owner tree
967  * and the pointer's key. These information allow us to find the block by
968  * b-tree searching. The full back refs is for pointers in tree blocks not
969  * referenced by their owner trees. The location of tree block is recorded
970  * in the back refs. Actually the full back refs is generic, and can be
971  * used in all cases the implicit back refs is used. The major shortcoming
972  * of the full back refs is its overhead. Every time a tree block gets
973  * COWed, we have to update back refs entry for all pointers in it.
974  *
975  * For a newly allocated tree block, we use implicit back refs for
976  * pointers in it. This means most tree related operations only involve
977  * implicit back refs. For a tree block created in old transaction, the
978  * only way to drop a reference to it is COW it. So we can detect the
979  * event that tree block loses its owner tree's reference and do the
980  * back refs conversion.
981  *
982  * When a tree block is COWed through a tree, there are four cases:
983  *
984  * The reference count of the block is one and the tree is the block's
985  * owner tree. Nothing to do in this case.
986  *
987  * The reference count of the block is one and the tree is not the
988  * block's owner tree. In this case, full back refs is used for pointers
989  * in the block. Remove these full back refs, add implicit back refs for
990  * every pointers in the new block.
991  *
992  * The reference count of the block is greater than one and the tree is
993  * the block's owner tree. In this case, implicit back refs is used for
994  * pointers in the block. Add full back refs for every pointers in the
995  * block, increase lower level extents' reference counts. The original
996  * implicit back refs are entailed to the new block.
997  *
998  * The reference count of the block is greater than one and the tree is
999  * not the block's owner tree. Add implicit back refs for every pointer in
1000  * the new block, increase lower level extents' reference count.
1001  *
1002  * Back Reference Key composing:
1003  *
1004  * The key objectid corresponds to the first byte in the extent,
1005  * The key type is used to differentiate between types of back refs.
1006  * There are different meanings of the key offset for different types
1007  * of back refs.
1008  *
1009  * File extents can be referenced by:
1010  *
1011  * - multiple snapshots, subvolumes, or different generations in one subvol
1012  * - different files inside a single subvolume
1013  * - different offsets inside a file (bookend extents in file.c)
1014  *
1015  * The extent ref structure for the implicit back refs has fields for:
1016  *
1017  * - Objectid of the subvolume root
1018  * - objectid of the file holding the reference
1019  * - original offset in the file
1020  * - how many bookend extents
1021  *
1022  * The key offset for the implicit back refs is hash of the first
1023  * three fields.
1024  *
1025  * The extent ref structure for the full back refs has field for:
1026  *
1027  * - number of pointers in the tree leaf
1028  *
1029  * The key offset for the implicit back refs is the first byte of
1030  * the tree leaf
1031  *
1032  * When a file extent is allocated, The implicit back refs is used.
1033  * the fields are filled in:
1034  *
1035  *     (root_key.objectid, inode objectid, offset in file, 1)
1036  *
1037  * When a file extent is removed file truncation, we find the
1038  * corresponding implicit back refs and check the following fields:
1039  *
1040  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1041  *
1042  * Btree extents can be referenced by:
1043  *
1044  * - Different subvolumes
1045  *
1046  * Both the implicit back refs and the full back refs for tree blocks
1047  * only consist of key. The key offset for the implicit back refs is
1048  * objectid of block's owner tree. The key offset for the full back refs
1049  * is the first byte of parent block.
1050  *
1051  * When implicit back refs is used, information about the lowest key and
1052  * level of the tree block are required. These information are stored in
1053  * tree block info structure.
1054  */
1055
1056 /*
1057  * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
1058  * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
1059  * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
1060  */
1061 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
1062                                      struct btrfs_extent_inline_ref *iref,
1063                                      enum btrfs_inline_ref_type is_data)
1064 {
1065         int type = btrfs_extent_inline_ref_type(eb, iref);
1066         u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
1067
1068         if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1069             type == BTRFS_SHARED_BLOCK_REF_KEY ||
1070             type == BTRFS_SHARED_DATA_REF_KEY ||
1071             type == BTRFS_EXTENT_DATA_REF_KEY) {
1072                 if (is_data == BTRFS_REF_TYPE_BLOCK) {
1073                         if (type == BTRFS_TREE_BLOCK_REF_KEY)
1074                                 return type;
1075                         if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1076                                 ASSERT(eb->fs_info);
1077                                 /*
1078                                  * Every shared one has parent tree
1079                                  * block, which must be aligned to
1080                                  * nodesize.
1081                                  */
1082                                 if (offset &&
1083                                     IS_ALIGNED(offset, eb->fs_info->nodesize))
1084                                         return type;
1085                         }
1086                 } else if (is_data == BTRFS_REF_TYPE_DATA) {
1087                         if (type == BTRFS_EXTENT_DATA_REF_KEY)
1088                                 return type;
1089                         if (type == BTRFS_SHARED_DATA_REF_KEY) {
1090                                 ASSERT(eb->fs_info);
1091                                 /*
1092                                  * Every shared one has parent tree
1093                                  * block, which must be aligned to
1094                                  * nodesize.
1095                                  */
1096                                 if (offset &&
1097                                     IS_ALIGNED(offset, eb->fs_info->nodesize))
1098                                         return type;
1099                         }
1100                 } else {
1101                         ASSERT(is_data == BTRFS_REF_TYPE_ANY);
1102                         return type;
1103                 }
1104         }
1105
1106         btrfs_print_leaf((struct extent_buffer *)eb);
1107         btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
1108                   eb->start, type);
1109         WARN_ON(1);
1110
1111         return BTRFS_REF_TYPE_INVALID;
1112 }
1113
1114 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1115 {
1116         u32 high_crc = ~(u32)0;
1117         u32 low_crc = ~(u32)0;
1118         __le64 lenum;
1119
1120         lenum = cpu_to_le64(root_objectid);
1121         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1122         lenum = cpu_to_le64(owner);
1123         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1124         lenum = cpu_to_le64(offset);
1125         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1126
1127         return ((u64)high_crc << 31) ^ (u64)low_crc;
1128 }
1129
1130 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1131                                      struct btrfs_extent_data_ref *ref)
1132 {
1133         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1134                                     btrfs_extent_data_ref_objectid(leaf, ref),
1135                                     btrfs_extent_data_ref_offset(leaf, ref));
1136 }
1137
1138 static int match_extent_data_ref(struct extent_buffer *leaf,
1139                                  struct btrfs_extent_data_ref *ref,
1140                                  u64 root_objectid, u64 owner, u64 offset)
1141 {
1142         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1143             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1144             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1145                 return 0;
1146         return 1;
1147 }
1148
1149 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1150                                            struct btrfs_path *path,
1151                                            u64 bytenr, u64 parent,
1152                                            u64 root_objectid,
1153                                            u64 owner, u64 offset)
1154 {
1155         struct btrfs_root *root = trans->fs_info->extent_root;
1156         struct btrfs_key key;
1157         struct btrfs_extent_data_ref *ref;
1158         struct extent_buffer *leaf;
1159         u32 nritems;
1160         int ret;
1161         int recow;
1162         int err = -ENOENT;
1163
1164         key.objectid = bytenr;
1165         if (parent) {
1166                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1167                 key.offset = parent;
1168         } else {
1169                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1170                 key.offset = hash_extent_data_ref(root_objectid,
1171                                                   owner, offset);
1172         }
1173 again:
1174         recow = 0;
1175         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1176         if (ret < 0) {
1177                 err = ret;
1178                 goto fail;
1179         }
1180
1181         if (parent) {
1182                 if (!ret)
1183                         return 0;
1184                 goto fail;
1185         }
1186
1187         leaf = path->nodes[0];
1188         nritems = btrfs_header_nritems(leaf);
1189         while (1) {
1190                 if (path->slots[0] >= nritems) {
1191                         ret = btrfs_next_leaf(root, path);
1192                         if (ret < 0)
1193                                 err = ret;
1194                         if (ret)
1195                                 goto fail;
1196
1197                         leaf = path->nodes[0];
1198                         nritems = btrfs_header_nritems(leaf);
1199                         recow = 1;
1200                 }
1201
1202                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1203                 if (key.objectid != bytenr ||
1204                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1205                         goto fail;
1206
1207                 ref = btrfs_item_ptr(leaf, path->slots[0],
1208                                      struct btrfs_extent_data_ref);
1209
1210                 if (match_extent_data_ref(leaf, ref, root_objectid,
1211                                           owner, offset)) {
1212                         if (recow) {
1213                                 btrfs_release_path(path);
1214                                 goto again;
1215                         }
1216                         err = 0;
1217                         break;
1218                 }
1219                 path->slots[0]++;
1220         }
1221 fail:
1222         return err;
1223 }
1224
1225 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1226                                            struct btrfs_path *path,
1227                                            u64 bytenr, u64 parent,
1228                                            u64 root_objectid, u64 owner,
1229                                            u64 offset, int refs_to_add)
1230 {
1231         struct btrfs_root *root = trans->fs_info->extent_root;
1232         struct btrfs_key key;
1233         struct extent_buffer *leaf;
1234         u32 size;
1235         u32 num_refs;
1236         int ret;
1237
1238         key.objectid = bytenr;
1239         if (parent) {
1240                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1241                 key.offset = parent;
1242                 size = sizeof(struct btrfs_shared_data_ref);
1243         } else {
1244                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1245                 key.offset = hash_extent_data_ref(root_objectid,
1246                                                   owner, offset);
1247                 size = sizeof(struct btrfs_extent_data_ref);
1248         }
1249
1250         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1251         if (ret && ret != -EEXIST)
1252                 goto fail;
1253
1254         leaf = path->nodes[0];
1255         if (parent) {
1256                 struct btrfs_shared_data_ref *ref;
1257                 ref = btrfs_item_ptr(leaf, path->slots[0],
1258                                      struct btrfs_shared_data_ref);
1259                 if (ret == 0) {
1260                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1261                 } else {
1262                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1263                         num_refs += refs_to_add;
1264                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1265                 }
1266         } else {
1267                 struct btrfs_extent_data_ref *ref;
1268                 while (ret == -EEXIST) {
1269                         ref = btrfs_item_ptr(leaf, path->slots[0],
1270                                              struct btrfs_extent_data_ref);
1271                         if (match_extent_data_ref(leaf, ref, root_objectid,
1272                                                   owner, offset))
1273                                 break;
1274                         btrfs_release_path(path);
1275                         key.offset++;
1276                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1277                                                       size);
1278                         if (ret && ret != -EEXIST)
1279                                 goto fail;
1280
1281                         leaf = path->nodes[0];
1282                 }
1283                 ref = btrfs_item_ptr(leaf, path->slots[0],
1284                                      struct btrfs_extent_data_ref);
1285                 if (ret == 0) {
1286                         btrfs_set_extent_data_ref_root(leaf, ref,
1287                                                        root_objectid);
1288                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1289                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1290                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1291                 } else {
1292                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1293                         num_refs += refs_to_add;
1294                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1295                 }
1296         }
1297         btrfs_mark_buffer_dirty(leaf);
1298         ret = 0;
1299 fail:
1300         btrfs_release_path(path);
1301         return ret;
1302 }
1303
1304 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1305                                            struct btrfs_path *path,
1306                                            int refs_to_drop, int *last_ref)
1307 {
1308         struct btrfs_key key;
1309         struct btrfs_extent_data_ref *ref1 = NULL;
1310         struct btrfs_shared_data_ref *ref2 = NULL;
1311         struct extent_buffer *leaf;
1312         u32 num_refs = 0;
1313         int ret = 0;
1314
1315         leaf = path->nodes[0];
1316         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1317
1318         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1319                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1320                                       struct btrfs_extent_data_ref);
1321                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1322         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1323                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1324                                       struct btrfs_shared_data_ref);
1325                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1326         } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
1327                 btrfs_print_v0_err(trans->fs_info);
1328                 btrfs_abort_transaction(trans, -EINVAL);
1329                 return -EINVAL;
1330         } else {
1331                 BUG();
1332         }
1333
1334         BUG_ON(num_refs < refs_to_drop);
1335         num_refs -= refs_to_drop;
1336
1337         if (num_refs == 0) {
1338                 ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
1339                 *last_ref = 1;
1340         } else {
1341                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1342                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1343                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1344                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1345                 btrfs_mark_buffer_dirty(leaf);
1346         }
1347         return ret;
1348 }
1349
1350 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1351                                           struct btrfs_extent_inline_ref *iref)
1352 {
1353         struct btrfs_key key;
1354         struct extent_buffer *leaf;
1355         struct btrfs_extent_data_ref *ref1;
1356         struct btrfs_shared_data_ref *ref2;
1357         u32 num_refs = 0;
1358         int type;
1359
1360         leaf = path->nodes[0];
1361         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1362
1363         BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
1364         if (iref) {
1365                 /*
1366                  * If type is invalid, we should have bailed out earlier than
1367                  * this call.
1368                  */
1369                 type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
1370                 ASSERT(type != BTRFS_REF_TYPE_INVALID);
1371                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1372                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1373                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1374                 } else {
1375                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1376                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1377                 }
1378         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1379                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1380                                       struct btrfs_extent_data_ref);
1381                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1382         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1383                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1384                                       struct btrfs_shared_data_ref);
1385                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1386         } else {
1387                 WARN_ON(1);
1388         }
1389         return num_refs;
1390 }
1391
1392 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1393                                           struct btrfs_path *path,
1394                                           u64 bytenr, u64 parent,
1395                                           u64 root_objectid)
1396 {
1397         struct btrfs_root *root = trans->fs_info->extent_root;
1398         struct btrfs_key key;
1399         int ret;
1400
1401         key.objectid = bytenr;
1402         if (parent) {
1403                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1404                 key.offset = parent;
1405         } else {
1406                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1407                 key.offset = root_objectid;
1408         }
1409
1410         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1411         if (ret > 0)
1412                 ret = -ENOENT;
1413         return ret;
1414 }
1415
1416 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1417                                           struct btrfs_path *path,
1418                                           u64 bytenr, u64 parent,
1419                                           u64 root_objectid)
1420 {
1421         struct btrfs_key key;
1422         int ret;
1423
1424         key.objectid = bytenr;
1425         if (parent) {
1426                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1427                 key.offset = parent;
1428         } else {
1429                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1430                 key.offset = root_objectid;
1431         }
1432
1433         ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root,
1434                                       path, &key, 0);
1435         btrfs_release_path(path);
1436         return ret;
1437 }
1438
1439 static inline int extent_ref_type(u64 parent, u64 owner)
1440 {
1441         int type;
1442         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1443                 if (parent > 0)
1444                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1445                 else
1446                         type = BTRFS_TREE_BLOCK_REF_KEY;
1447         } else {
1448                 if (parent > 0)
1449                         type = BTRFS_SHARED_DATA_REF_KEY;
1450                 else
1451                         type = BTRFS_EXTENT_DATA_REF_KEY;
1452         }
1453         return type;
1454 }
1455
1456 static int find_next_key(struct btrfs_path *path, int level,
1457                          struct btrfs_key *key)
1458
1459 {
1460         for (; level < BTRFS_MAX_LEVEL; level++) {
1461                 if (!path->nodes[level])
1462                         break;
1463                 if (path->slots[level] + 1 >=
1464                     btrfs_header_nritems(path->nodes[level]))
1465                         continue;
1466                 if (level == 0)
1467                         btrfs_item_key_to_cpu(path->nodes[level], key,
1468                                               path->slots[level] + 1);
1469                 else
1470                         btrfs_node_key_to_cpu(path->nodes[level], key,
1471                                               path->slots[level] + 1);
1472                 return 0;
1473         }
1474         return 1;
1475 }
1476
1477 /*
1478  * look for inline back ref. if back ref is found, *ref_ret is set
1479  * to the address of inline back ref, and 0 is returned.
1480  *
1481  * if back ref isn't found, *ref_ret is set to the address where it
1482  * should be inserted, and -ENOENT is returned.
1483  *
1484  * if insert is true and there are too many inline back refs, the path
1485  * points to the extent item, and -EAGAIN is returned.
1486  *
1487  * NOTE: inline back refs are ordered in the same way that back ref
1488  *       items in the tree are ordered.
1489  */
1490 static noinline_for_stack
1491 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1492                                  struct btrfs_path *path,
1493                                  struct btrfs_extent_inline_ref **ref_ret,
1494                                  u64 bytenr, u64 num_bytes,
1495                                  u64 parent, u64 root_objectid,
1496                                  u64 owner, u64 offset, int insert)
1497 {
1498         struct btrfs_fs_info *fs_info = trans->fs_info;
1499         struct btrfs_root *root = fs_info->extent_root;
1500         struct btrfs_key key;
1501         struct extent_buffer *leaf;
1502         struct btrfs_extent_item *ei;
1503         struct btrfs_extent_inline_ref *iref;
1504         u64 flags;
1505         u64 item_size;
1506         unsigned long ptr;
1507         unsigned long end;
1508         int extra_size;
1509         int type;
1510         int want;
1511         int ret;
1512         int err = 0;
1513         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
1514         int needed;
1515
1516         key.objectid = bytenr;
1517         key.type = BTRFS_EXTENT_ITEM_KEY;
1518         key.offset = num_bytes;
1519
1520         want = extent_ref_type(parent, owner);
1521         if (insert) {
1522                 extra_size = btrfs_extent_inline_ref_size(want);
1523                 path->keep_locks = 1;
1524         } else
1525                 extra_size = -1;
1526
1527         /*
1528          * Owner is our level, so we can just add one to get the level for the
1529          * block we are interested in.
1530          */
1531         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1532                 key.type = BTRFS_METADATA_ITEM_KEY;
1533                 key.offset = owner;
1534         }
1535
1536 again:
1537         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1538         if (ret < 0) {
1539                 err = ret;
1540                 goto out;
1541         }
1542
1543         /*
1544          * We may be a newly converted file system which still has the old fat
1545          * extent entries for metadata, so try and see if we have one of those.
1546          */
1547         if (ret > 0 && skinny_metadata) {
1548                 skinny_metadata = false;
1549                 if (path->slots[0]) {
1550                         path->slots[0]--;
1551                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1552                                               path->slots[0]);
1553                         if (key.objectid == bytenr &&
1554                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1555                             key.offset == num_bytes)
1556                                 ret = 0;
1557                 }
1558                 if (ret) {
1559                         key.objectid = bytenr;
1560                         key.type = BTRFS_EXTENT_ITEM_KEY;
1561                         key.offset = num_bytes;
1562                         btrfs_release_path(path);
1563                         goto again;
1564                 }
1565         }
1566
1567         if (ret && !insert) {
1568                 err = -ENOENT;
1569                 goto out;
1570         } else if (WARN_ON(ret)) {
1571                 err = -EIO;
1572                 goto out;
1573         }
1574
1575         leaf = path->nodes[0];
1576         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1577         if (unlikely(item_size < sizeof(*ei))) {
1578                 err = -EINVAL;
1579                 btrfs_print_v0_err(fs_info);
1580                 btrfs_abort_transaction(trans, err);
1581                 goto out;
1582         }
1583
1584         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1585         flags = btrfs_extent_flags(leaf, ei);
1586
1587         ptr = (unsigned long)(ei + 1);
1588         end = (unsigned long)ei + item_size;
1589
1590         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1591                 ptr += sizeof(struct btrfs_tree_block_info);
1592                 BUG_ON(ptr > end);
1593         }
1594
1595         if (owner >= BTRFS_FIRST_FREE_OBJECTID)
1596                 needed = BTRFS_REF_TYPE_DATA;
1597         else
1598                 needed = BTRFS_REF_TYPE_BLOCK;
1599
1600         err = -ENOENT;
1601         while (1) {
1602                 if (ptr >= end) {
1603                         WARN_ON(ptr > end);
1604                         break;
1605                 }
1606                 iref = (struct btrfs_extent_inline_ref *)ptr;
1607                 type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
1608                 if (type == BTRFS_REF_TYPE_INVALID) {
1609                         err = -EUCLEAN;
1610                         goto out;
1611                 }
1612
1613                 if (want < type)
1614                         break;
1615                 if (want > type) {
1616                         ptr += btrfs_extent_inline_ref_size(type);
1617                         continue;
1618                 }
1619
1620                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1621                         struct btrfs_extent_data_ref *dref;
1622                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1623                         if (match_extent_data_ref(leaf, dref, root_objectid,
1624                                                   owner, offset)) {
1625                                 err = 0;
1626                                 break;
1627                         }
1628                         if (hash_extent_data_ref_item(leaf, dref) <
1629                             hash_extent_data_ref(root_objectid, owner, offset))
1630                                 break;
1631                 } else {
1632                         u64 ref_offset;
1633                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1634                         if (parent > 0) {
1635                                 if (parent == ref_offset) {
1636                                         err = 0;
1637                                         break;
1638                                 }
1639                                 if (ref_offset < parent)
1640                                         break;
1641                         } else {
1642                                 if (root_objectid == ref_offset) {
1643                                         err = 0;
1644                                         break;
1645                                 }
1646                                 if (ref_offset < root_objectid)
1647                                         break;
1648                         }
1649                 }
1650                 ptr += btrfs_extent_inline_ref_size(type);
1651         }
1652         if (err == -ENOENT && insert) {
1653                 if (item_size + extra_size >=
1654                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1655                         err = -EAGAIN;
1656                         goto out;
1657                 }
1658                 /*
1659                  * To add new inline back ref, we have to make sure
1660                  * there is no corresponding back ref item.
1661                  * For simplicity, we just do not add new inline back
1662                  * ref if there is any kind of item for this block
1663                  */
1664                 if (find_next_key(path, 0, &key) == 0 &&
1665                     key.objectid == bytenr &&
1666                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1667                         err = -EAGAIN;
1668                         goto out;
1669                 }
1670         }
1671         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1672 out:
1673         if (insert) {
1674                 path->keep_locks = 0;
1675                 btrfs_unlock_up_safe(path, 1);
1676         }
1677         return err;
1678 }
1679
1680 /*
1681  * helper to add new inline back ref
1682  */
1683 static noinline_for_stack
1684 void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
1685                                  struct btrfs_path *path,
1686                                  struct btrfs_extent_inline_ref *iref,
1687                                  u64 parent, u64 root_objectid,
1688                                  u64 owner, u64 offset, int refs_to_add,
1689                                  struct btrfs_delayed_extent_op *extent_op)
1690 {
1691         struct extent_buffer *leaf;
1692         struct btrfs_extent_item *ei;
1693         unsigned long ptr;
1694         unsigned long end;
1695         unsigned long item_offset;
1696         u64 refs;
1697         int size;
1698         int type;
1699
1700         leaf = path->nodes[0];
1701         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1702         item_offset = (unsigned long)iref - (unsigned long)ei;
1703
1704         type = extent_ref_type(parent, owner);
1705         size = btrfs_extent_inline_ref_size(type);
1706
1707         btrfs_extend_item(fs_info, path, size);
1708
1709         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1710         refs = btrfs_extent_refs(leaf, ei);
1711         refs += refs_to_add;
1712         btrfs_set_extent_refs(leaf, ei, refs);
1713         if (extent_op)
1714                 __run_delayed_extent_op(extent_op, leaf, ei);
1715
1716         ptr = (unsigned long)ei + item_offset;
1717         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1718         if (ptr < end - size)
1719                 memmove_extent_buffer(leaf, ptr + size, ptr,
1720                                       end - size - ptr);
1721
1722         iref = (struct btrfs_extent_inline_ref *)ptr;
1723         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1724         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1725                 struct btrfs_extent_data_ref *dref;
1726                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1727                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1728                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1729                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1730                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1731         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1732                 struct btrfs_shared_data_ref *sref;
1733                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1734                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1735                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1736         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1737                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1738         } else {
1739                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1740         }
1741         btrfs_mark_buffer_dirty(leaf);
1742 }
1743
1744 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1745                                  struct btrfs_path *path,
1746                                  struct btrfs_extent_inline_ref **ref_ret,
1747                                  u64 bytenr, u64 num_bytes, u64 parent,
1748                                  u64 root_objectid, u64 owner, u64 offset)
1749 {
1750         int ret;
1751
1752         ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
1753                                            num_bytes, parent, root_objectid,
1754                                            owner, offset, 0);
1755         if (ret != -ENOENT)
1756                 return ret;
1757
1758         btrfs_release_path(path);
1759         *ref_ret = NULL;
1760
1761         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1762                 ret = lookup_tree_block_ref(trans, path, bytenr, parent,
1763                                             root_objectid);
1764         } else {
1765                 ret = lookup_extent_data_ref(trans, path, bytenr, parent,
1766                                              root_objectid, owner, offset);
1767         }
1768         return ret;
1769 }
1770
1771 /*
1772  * helper to update/remove inline back ref
1773  */
1774 static noinline_for_stack
1775 void update_inline_extent_backref(struct btrfs_path *path,
1776                                   struct btrfs_extent_inline_ref *iref,
1777                                   int refs_to_mod,
1778                                   struct btrfs_delayed_extent_op *extent_op,
1779                                   int *last_ref)
1780 {
1781         struct extent_buffer *leaf = path->nodes[0];
1782         struct btrfs_fs_info *fs_info = leaf->fs_info;
1783         struct btrfs_extent_item *ei;
1784         struct btrfs_extent_data_ref *dref = NULL;
1785         struct btrfs_shared_data_ref *sref = NULL;
1786         unsigned long ptr;
1787         unsigned long end;
1788         u32 item_size;
1789         int size;
1790         int type;
1791         u64 refs;
1792
1793         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1794         refs = btrfs_extent_refs(leaf, ei);
1795         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1796         refs += refs_to_mod;
1797         btrfs_set_extent_refs(leaf, ei, refs);
1798         if (extent_op)
1799                 __run_delayed_extent_op(extent_op, leaf, ei);
1800
1801         /*
1802          * If type is invalid, we should have bailed out after
1803          * lookup_inline_extent_backref().
1804          */
1805         type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1806         ASSERT(type != BTRFS_REF_TYPE_INVALID);
1807
1808         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1809                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1810                 refs = btrfs_extent_data_ref_count(leaf, dref);
1811         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1812                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1813                 refs = btrfs_shared_data_ref_count(leaf, sref);
1814         } else {
1815                 refs = 1;
1816                 BUG_ON(refs_to_mod != -1);
1817         }
1818
1819         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1820         refs += refs_to_mod;
1821
1822         if (refs > 0) {
1823                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1824                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1825                 else
1826                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1827         } else {
1828                 *last_ref = 1;
1829                 size =  btrfs_extent_inline_ref_size(type);
1830                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1831                 ptr = (unsigned long)iref;
1832                 end = (unsigned long)ei + item_size;
1833                 if (ptr + size < end)
1834                         memmove_extent_buffer(leaf, ptr, ptr + size,
1835                                               end - ptr - size);
1836                 item_size -= size;
1837                 btrfs_truncate_item(fs_info, path, item_size, 1);
1838         }
1839         btrfs_mark_buffer_dirty(leaf);
1840 }
1841
1842 static noinline_for_stack
1843 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1844                                  struct btrfs_path *path,
1845                                  u64 bytenr, u64 num_bytes, u64 parent,
1846                                  u64 root_objectid, u64 owner,
1847                                  u64 offset, int refs_to_add,
1848                                  struct btrfs_delayed_extent_op *extent_op)
1849 {
1850         struct btrfs_extent_inline_ref *iref;
1851         int ret;
1852
1853         ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
1854                                            num_bytes, parent, root_objectid,
1855                                            owner, offset, 1);
1856         if (ret == 0) {
1857                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1858                 update_inline_extent_backref(path, iref, refs_to_add,
1859                                              extent_op, NULL);
1860         } else if (ret == -ENOENT) {
1861                 setup_inline_extent_backref(trans->fs_info, path, iref, parent,
1862                                             root_objectid, owner, offset,
1863                                             refs_to_add, extent_op);
1864                 ret = 0;
1865         }
1866         return ret;
1867 }
1868
1869 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1870                                  struct btrfs_path *path,
1871                                  u64 bytenr, u64 parent, u64 root_objectid,
1872                                  u64 owner, u64 offset, int refs_to_add)
1873 {
1874         int ret;
1875         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1876                 BUG_ON(refs_to_add != 1);
1877                 ret = insert_tree_block_ref(trans, path, bytenr, parent,
1878                                             root_objectid);
1879         } else {
1880                 ret = insert_extent_data_ref(trans, path, bytenr, parent,
1881                                              root_objectid, owner, offset,
1882                                              refs_to_add);
1883         }
1884         return ret;
1885 }
1886
1887 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1888                                  struct btrfs_path *path,
1889                                  struct btrfs_extent_inline_ref *iref,
1890                                  int refs_to_drop, int is_data, int *last_ref)
1891 {
1892         int ret = 0;
1893
1894         BUG_ON(!is_data && refs_to_drop != 1);
1895         if (iref) {
1896                 update_inline_extent_backref(path, iref, -refs_to_drop, NULL,
1897                                              last_ref);
1898         } else if (is_data) {
1899                 ret = remove_extent_data_ref(trans, path, refs_to_drop,
1900                                              last_ref);
1901         } else {
1902                 *last_ref = 1;
1903                 ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
1904         }
1905         return ret;
1906 }
1907
1908 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1909 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1910                                u64 *discarded_bytes)
1911 {
1912         int j, ret = 0;
1913         u64 bytes_left, end;
1914         u64 aligned_start = ALIGN(start, 1 << 9);
1915
1916         if (WARN_ON(start != aligned_start)) {
1917                 len -= aligned_start - start;
1918                 len = round_down(len, 1 << 9);
1919                 start = aligned_start;
1920         }
1921
1922         *discarded_bytes = 0;
1923
1924         if (!len)
1925                 return 0;
1926
1927         end = start + len;
1928         bytes_left = len;
1929
1930         /* Skip any superblocks on this device. */
1931         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1932                 u64 sb_start = btrfs_sb_offset(j);
1933                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1934                 u64 size = sb_start - start;
1935
1936                 if (!in_range(sb_start, start, bytes_left) &&
1937                     !in_range(sb_end, start, bytes_left) &&
1938                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1939                         continue;
1940
1941                 /*
1942                  * Superblock spans beginning of range.  Adjust start and
1943                  * try again.
1944                  */
1945                 if (sb_start <= start) {
1946                         start += sb_end - start;
1947                         if (start > end) {
1948                                 bytes_left = 0;
1949                                 break;
1950                         }
1951                         bytes_left = end - start;
1952                         continue;
1953                 }
1954
1955                 if (size) {
1956                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1957                                                    GFP_NOFS, 0);
1958                         if (!ret)
1959                                 *discarded_bytes += size;
1960                         else if (ret != -EOPNOTSUPP)
1961                                 return ret;
1962                 }
1963
1964                 start = sb_end;
1965                 if (start > end) {
1966                         bytes_left = 0;
1967                         break;
1968                 }
1969                 bytes_left = end - start;
1970         }
1971
1972         if (bytes_left) {
1973                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
1974                                            GFP_NOFS, 0);
1975                 if (!ret)
1976                         *discarded_bytes += bytes_left;
1977         }
1978         return ret;
1979 }
1980
1981 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1982                          u64 num_bytes, u64 *actual_bytes)
1983 {
1984         int ret;
1985         u64 discarded_bytes = 0;
1986         struct btrfs_bio *bbio = NULL;
1987
1988
1989         /*
1990          * Avoid races with device replace and make sure our bbio has devices
1991          * associated to its stripes that don't go away while we are discarding.
1992          */
1993         btrfs_bio_counter_inc_blocked(fs_info);
1994         /* Tell the block device(s) that the sectors can be discarded */
1995         ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
1996                               &bbio, 0);
1997         /* Error condition is -ENOMEM */
1998         if (!ret) {
1999                 struct btrfs_bio_stripe *stripe = bbio->stripes;
2000                 int i;
2001
2002
2003                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2004                         u64 bytes;
2005                         struct request_queue *req_q;
2006
2007                         if (!stripe->dev->bdev) {
2008                                 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
2009                                 continue;
2010                         }
2011                         req_q = bdev_get_queue(stripe->dev->bdev);
2012                         if (!blk_queue_discard(req_q))
2013                                 continue;
2014
2015                         ret = btrfs_issue_discard(stripe->dev->bdev,
2016                                                   stripe->physical,
2017                                                   stripe->length,
2018                                                   &bytes);
2019                         if (!ret)
2020                                 discarded_bytes += bytes;
2021                         else if (ret != -EOPNOTSUPP)
2022                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2023
2024                         /*
2025                          * Just in case we get back EOPNOTSUPP for some reason,
2026                          * just ignore the return value so we don't screw up
2027                          * people calling discard_extent.
2028                          */
2029                         ret = 0;
2030                 }
2031                 btrfs_put_bbio(bbio);
2032         }
2033         btrfs_bio_counter_dec(fs_info);
2034
2035         if (actual_bytes)
2036                 *actual_bytes = discarded_bytes;
2037
2038
2039         if (ret == -EOPNOTSUPP)
2040                 ret = 0;
2041         return ret;
2042 }
2043
2044 /* Can return -ENOMEM */
2045 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2046                          struct btrfs_root *root,
2047                          u64 bytenr, u64 num_bytes, u64 parent,
2048                          u64 root_objectid, u64 owner, u64 offset)
2049 {
2050         struct btrfs_fs_info *fs_info = root->fs_info;
2051         int old_ref_mod, new_ref_mod;
2052         int ret;
2053
2054         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2055                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2056
2057         btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
2058                            owner, offset, BTRFS_ADD_DELAYED_REF);
2059
2060         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2061                 ret = btrfs_add_delayed_tree_ref(trans, bytenr,
2062                                                  num_bytes, parent,
2063                                                  root_objectid, (int)owner,
2064                                                  BTRFS_ADD_DELAYED_REF, NULL,
2065                                                  &old_ref_mod, &new_ref_mod);
2066         } else {
2067                 ret = btrfs_add_delayed_data_ref(trans, bytenr,
2068                                                  num_bytes, parent,
2069                                                  root_objectid, owner, offset,
2070                                                  0, BTRFS_ADD_DELAYED_REF,
2071                                                  &old_ref_mod, &new_ref_mod);
2072         }
2073
2074         if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0) {
2075                 bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
2076
2077                 add_pinned_bytes(fs_info, -num_bytes, metadata, root_objectid);
2078         }
2079
2080         return ret;
2081 }
2082
2083 /*
2084  * __btrfs_inc_extent_ref - insert backreference for a given extent
2085  *
2086  * @trans:          Handle of transaction
2087  *
2088  * @node:           The delayed ref node used to get the bytenr/length for
2089  *                  extent whose references are incremented.
2090  *
2091  * @parent:         If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
2092  *                  BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
2093  *                  bytenr of the parent block. Since new extents are always
2094  *                  created with indirect references, this will only be the case
2095  *                  when relocating a shared extent. In that case, root_objectid
2096  *                  will be BTRFS_TREE_RELOC_OBJECTID. Otheriwse, parent must
2097  *                  be 0
2098  *
2099  * @root_objectid:  The id of the root where this modification has originated,
2100  *                  this can be either one of the well-known metadata trees or
2101  *                  the subvolume id which references this extent.
2102  *
2103  * @owner:          For data extents it is the inode number of the owning file.
2104  *                  For metadata extents this parameter holds the level in the
2105  *                  tree of the extent.
2106  *
2107  * @offset:         For metadata extents the offset is ignored and is currently
2108  *                  always passed as 0. For data extents it is the fileoffset
2109  *                  this extent belongs to.
2110  *
2111  * @refs_to_add     Number of references to add
2112  *
2113  * @extent_op       Pointer to a structure, holding information necessary when
2114  *                  updating a tree block's flags
2115  *
2116  */
2117 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2118                                   struct btrfs_delayed_ref_node *node,
2119                                   u64 parent, u64 root_objectid,
2120                                   u64 owner, u64 offset, int refs_to_add,
2121                                   struct btrfs_delayed_extent_op *extent_op)
2122 {
2123         struct btrfs_path *path;
2124         struct extent_buffer *leaf;
2125         struct btrfs_extent_item *item;
2126         struct btrfs_key key;
2127         u64 bytenr = node->bytenr;
2128         u64 num_bytes = node->num_bytes;
2129         u64 refs;
2130         int ret;
2131
2132         path = btrfs_alloc_path();
2133         if (!path)
2134                 return -ENOMEM;
2135
2136         path->reada = READA_FORWARD;
2137         path->leave_spinning = 1;
2138         /* this will setup the path even if it fails to insert the back ref */
2139         ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
2140                                            parent, root_objectid, owner,
2141                                            offset, refs_to_add, extent_op);
2142         if ((ret < 0 && ret != -EAGAIN) || !ret)
2143                 goto out;
2144
2145         /*
2146          * Ok we had -EAGAIN which means we didn't have space to insert and
2147          * inline extent ref, so just update the reference count and add a
2148          * normal backref.
2149          */
2150         leaf = path->nodes[0];
2151         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2152         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2153         refs = btrfs_extent_refs(leaf, item);
2154         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2155         if (extent_op)
2156                 __run_delayed_extent_op(extent_op, leaf, item);
2157
2158         btrfs_mark_buffer_dirty(leaf);
2159         btrfs_release_path(path);
2160
2161         path->reada = READA_FORWARD;
2162         path->leave_spinning = 1;
2163         /* now insert the actual backref */
2164         ret = insert_extent_backref(trans, path, bytenr, parent, root_objectid,
2165                                     owner, offset, refs_to_add);
2166         if (ret)
2167                 btrfs_abort_transaction(trans, ret);
2168 out:
2169         btrfs_free_path(path);
2170         return ret;
2171 }
2172
2173 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2174                                 struct btrfs_delayed_ref_node *node,
2175                                 struct btrfs_delayed_extent_op *extent_op,
2176                                 int insert_reserved)
2177 {
2178         int ret = 0;
2179         struct btrfs_delayed_data_ref *ref;
2180         struct btrfs_key ins;
2181         u64 parent = 0;
2182         u64 ref_root = 0;
2183         u64 flags = 0;
2184
2185         ins.objectid = node->bytenr;
2186         ins.offset = node->num_bytes;
2187         ins.type = BTRFS_EXTENT_ITEM_KEY;
2188
2189         ref = btrfs_delayed_node_to_data_ref(node);
2190         trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
2191
2192         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2193                 parent = ref->parent;
2194         ref_root = ref->root;
2195
2196         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2197                 if (extent_op)
2198                         flags |= extent_op->flags_to_set;
2199                 ret = alloc_reserved_file_extent(trans, parent, ref_root,
2200                                                  flags, ref->objectid,
2201                                                  ref->offset, &ins,
2202                                                  node->ref_mod);
2203         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2204                 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
2205                                              ref->objectid, ref->offset,
2206                                              node->ref_mod, extent_op);
2207         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2208                 ret = __btrfs_free_extent(trans, node, parent,
2209                                           ref_root, ref->objectid,
2210                                           ref->offset, node->ref_mod,
2211                                           extent_op);
2212         } else {
2213                 BUG();
2214         }
2215         return ret;
2216 }
2217
2218 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2219                                     struct extent_buffer *leaf,
2220                                     struct btrfs_extent_item *ei)
2221 {
2222         u64 flags = btrfs_extent_flags(leaf, ei);
2223         if (extent_op->update_flags) {
2224                 flags |= extent_op->flags_to_set;
2225                 btrfs_set_extent_flags(leaf, ei, flags);
2226         }
2227
2228         if (extent_op->update_key) {
2229                 struct btrfs_tree_block_info *bi;
2230                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2231                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2232                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2233         }
2234 }
2235
2236 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2237                                  struct btrfs_delayed_ref_head *head,
2238                                  struct btrfs_delayed_extent_op *extent_op)
2239 {
2240         struct btrfs_fs_info *fs_info = trans->fs_info;
2241         struct btrfs_key key;
2242         struct btrfs_path *path;
2243         struct btrfs_extent_item *ei;
2244         struct extent_buffer *leaf;
2245         u32 item_size;
2246         int ret;
2247         int err = 0;
2248         int metadata = !extent_op->is_data;
2249
2250         if (trans->aborted)
2251                 return 0;
2252
2253         if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2254                 metadata = 0;
2255
2256         path = btrfs_alloc_path();
2257         if (!path)
2258                 return -ENOMEM;
2259
2260         key.objectid = head->bytenr;
2261
2262         if (metadata) {
2263                 key.type = BTRFS_METADATA_ITEM_KEY;
2264                 key.offset = extent_op->level;
2265         } else {
2266                 key.type = BTRFS_EXTENT_ITEM_KEY;
2267                 key.offset = head->num_bytes;
2268         }
2269
2270 again:
2271         path->reada = READA_FORWARD;
2272         path->leave_spinning = 1;
2273         ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
2274         if (ret < 0) {
2275                 err = ret;
2276                 goto out;
2277         }
2278         if (ret > 0) {
2279                 if (metadata) {
2280                         if (path->slots[0] > 0) {
2281                                 path->slots[0]--;
2282                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2283                                                       path->slots[0]);
2284                                 if (key.objectid == head->bytenr &&
2285                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2286                                     key.offset == head->num_bytes)
2287                                         ret = 0;
2288                         }
2289                         if (ret > 0) {
2290                                 btrfs_release_path(path);
2291                                 metadata = 0;
2292
2293                                 key.objectid = head->bytenr;
2294                                 key.offset = head->num_bytes;
2295                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2296                                 goto again;
2297                         }
2298                 } else {
2299                         err = -EIO;
2300                         goto out;
2301                 }
2302         }
2303
2304         leaf = path->nodes[0];
2305         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2306
2307         if (unlikely(item_size < sizeof(*ei))) {
2308                 err = -EINVAL;
2309                 btrfs_print_v0_err(fs_info);
2310                 btrfs_abort_transaction(trans, err);
2311                 goto out;
2312         }
2313
2314         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2315         __run_delayed_extent_op(extent_op, leaf, ei);
2316
2317         btrfs_mark_buffer_dirty(leaf);
2318 out:
2319         btrfs_free_path(path);
2320         return err;
2321 }
2322
2323 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2324                                 struct btrfs_delayed_ref_node *node,
2325                                 struct btrfs_delayed_extent_op *extent_op,
2326                                 int insert_reserved)
2327 {
2328         int ret = 0;
2329         struct btrfs_delayed_tree_ref *ref;
2330         u64 parent = 0;
2331         u64 ref_root = 0;
2332
2333         ref = btrfs_delayed_node_to_tree_ref(node);
2334         trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
2335
2336         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2337                 parent = ref->parent;
2338         ref_root = ref->root;
2339
2340         if (node->ref_mod != 1) {
2341                 btrfs_err(trans->fs_info,
2342         "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2343                           node->bytenr, node->ref_mod, node->action, ref_root,
2344                           parent);
2345                 return -EIO;
2346         }
2347         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2348                 BUG_ON(!extent_op || !extent_op->update_flags);
2349                 ret = alloc_reserved_tree_block(trans, node, extent_op);
2350         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2351                 ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
2352                                              ref->level, 0, 1, extent_op);
2353         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2354                 ret = __btrfs_free_extent(trans, node, parent, ref_root,
2355                                           ref->level, 0, 1, extent_op);
2356         } else {
2357                 BUG();
2358         }
2359         return ret;
2360 }
2361
2362 /* helper function to actually process a single delayed ref entry */
2363 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2364                                struct btrfs_delayed_ref_node *node,
2365                                struct btrfs_delayed_extent_op *extent_op,
2366                                int insert_reserved)
2367 {
2368         int ret = 0;
2369
2370         if (trans->aborted) {
2371                 if (insert_reserved)
2372                         btrfs_pin_extent(trans->fs_info, node->bytenr,
2373                                          node->num_bytes, 1);
2374                 return 0;
2375         }
2376
2377         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2378             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2379                 ret = run_delayed_tree_ref(trans, node, extent_op,
2380                                            insert_reserved);
2381         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2382                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2383                 ret = run_delayed_data_ref(trans, node, extent_op,
2384                                            insert_reserved);
2385         else
2386                 BUG();
2387         if (ret && insert_reserved)
2388                 btrfs_pin_extent(trans->fs_info, node->bytenr,
2389                                  node->num_bytes, 1);
2390         return ret;
2391 }
2392
2393 static inline struct btrfs_delayed_ref_node *
2394 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2395 {
2396         struct btrfs_delayed_ref_node *ref;
2397
2398         if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
2399                 return NULL;
2400
2401         /*
2402          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2403          * This is to prevent a ref count from going down to zero, which deletes
2404          * the extent item from the extent tree, when there still are references
2405          * to add, which would fail because they would not find the extent item.
2406          */
2407         if (!list_empty(&head->ref_add_list))
2408                 return list_first_entry(&head->ref_add_list,
2409                                 struct btrfs_delayed_ref_node, add_list);
2410
2411         ref = rb_entry(rb_first_cached(&head->ref_tree),
2412                        struct btrfs_delayed_ref_node, ref_node);
2413         ASSERT(list_empty(&ref->add_list));
2414         return ref;
2415 }
2416
2417 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
2418                                       struct btrfs_delayed_ref_head *head)
2419 {
2420         spin_lock(&delayed_refs->lock);
2421         head->processing = 0;
2422         delayed_refs->num_heads_ready++;
2423         spin_unlock(&delayed_refs->lock);
2424         btrfs_delayed_ref_unlock(head);
2425 }
2426
2427 static struct btrfs_delayed_extent_op *cleanup_extent_op(
2428                                 struct btrfs_delayed_ref_head *head)
2429 {
2430         struct btrfs_delayed_extent_op *extent_op = head->extent_op;
2431
2432         if (!extent_op)
2433                 return NULL;
2434
2435         if (head->must_insert_reserved) {
2436                 head->extent_op = NULL;
2437                 btrfs_free_delayed_extent_op(extent_op);
2438                 return NULL;
2439         }
2440         return extent_op;
2441 }
2442
2443 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
2444                                      struct btrfs_delayed_ref_head *head)
2445 {
2446         struct btrfs_delayed_extent_op *extent_op;
2447         int ret;
2448
2449         extent_op = cleanup_extent_op(head);
2450         if (!extent_op)
2451                 return 0;
2452         head->extent_op = NULL;
2453         spin_unlock(&head->lock);
2454         ret = run_delayed_extent_op(trans, head, extent_op);
2455         btrfs_free_delayed_extent_op(extent_op);
2456         return ret ? ret : 1;
2457 }
2458
2459 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
2460                                   struct btrfs_delayed_ref_root *delayed_refs,
2461                                   struct btrfs_delayed_ref_head *head)
2462 {
2463         int nr_items = 1;       /* Dropping this ref head update. */
2464
2465         if (head->total_ref_mod < 0) {
2466                 struct btrfs_space_info *space_info;
2467                 u64 flags;
2468
2469                 if (head->is_data)
2470                         flags = BTRFS_BLOCK_GROUP_DATA;
2471                 else if (head->is_system)
2472                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
2473                 else
2474                         flags = BTRFS_BLOCK_GROUP_METADATA;
2475                 space_info = __find_space_info(fs_info, flags);
2476                 ASSERT(space_info);
2477                 percpu_counter_add_batch(&space_info->total_bytes_pinned,
2478                                    -head->num_bytes,
2479                                    BTRFS_TOTAL_BYTES_PINNED_BATCH);
2480
2481                 /*
2482                  * We had csum deletions accounted for in our delayed refs rsv,
2483                  * we need to drop the csum leaves for this update from our
2484                  * delayed_refs_rsv.
2485                  */
2486                 if (head->is_data) {
2487                         spin_lock(&delayed_refs->lock);
2488                         delayed_refs->pending_csums -= head->num_bytes;
2489                         spin_unlock(&delayed_refs->lock);
2490                         nr_items += btrfs_csum_bytes_to_leaves(fs_info,
2491                                 head->num_bytes);
2492                 }
2493         }
2494
2495         btrfs_delayed_refs_rsv_release(fs_info, nr_items);
2496 }
2497
2498 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
2499                             struct btrfs_delayed_ref_head *head)
2500 {
2501
2502         struct btrfs_fs_info *fs_info = trans->fs_info;
2503         struct btrfs_delayed_ref_root *delayed_refs;
2504         int ret;
2505
2506         delayed_refs = &trans->transaction->delayed_refs;
2507
2508         ret = run_and_cleanup_extent_op(trans, head);
2509         if (ret < 0) {
2510                 unselect_delayed_ref_head(delayed_refs, head);
2511                 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2512                 return ret;
2513         } else if (ret) {
2514                 return ret;
2515         }
2516
2517         /*
2518          * Need to drop our head ref lock and re-acquire the delayed ref lock
2519          * and then re-check to make sure nobody got added.
2520          */
2521         spin_unlock(&head->lock);
2522         spin_lock(&delayed_refs->lock);
2523         spin_lock(&head->lock);
2524         if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
2525                 spin_unlock(&head->lock);
2526                 spin_unlock(&delayed_refs->lock);
2527                 return 1;
2528         }
2529         btrfs_delete_ref_head(delayed_refs, head);
2530         spin_unlock(&head->lock);
2531         spin_unlock(&delayed_refs->lock);
2532
2533         if (head->must_insert_reserved) {
2534                 btrfs_pin_extent(fs_info, head->bytenr,
2535                                  head->num_bytes, 1);
2536                 if (head->is_data) {
2537                         ret = btrfs_del_csums(trans, fs_info, head->bytenr,
2538                                               head->num_bytes);
2539                 }
2540         }
2541
2542         btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
2543
2544         trace_run_delayed_ref_head(fs_info, head, 0);
2545         btrfs_delayed_ref_unlock(head);
2546         btrfs_put_delayed_ref_head(head);
2547         return 0;
2548 }
2549
2550 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
2551                                         struct btrfs_trans_handle *trans)
2552 {
2553         struct btrfs_delayed_ref_root *delayed_refs =
2554                 &trans->transaction->delayed_refs;
2555         struct btrfs_delayed_ref_head *head = NULL;
2556         int ret;
2557
2558         spin_lock(&delayed_refs->lock);
2559         head = btrfs_select_ref_head(delayed_refs);
2560         if (!head) {
2561                 spin_unlock(&delayed_refs->lock);
2562                 return head;
2563         }
2564
2565         /*
2566          * Grab the lock that says we are going to process all the refs for
2567          * this head
2568          */
2569         ret = btrfs_delayed_ref_lock(delayed_refs, head);
2570         spin_unlock(&delayed_refs->lock);
2571
2572         /*
2573          * We may have dropped the spin lock to get the head mutex lock, and
2574          * that might have given someone else time to free the head.  If that's
2575          * true, it has been removed from our list and we can move on.
2576          */
2577         if (ret == -EAGAIN)
2578                 head = ERR_PTR(-EAGAIN);
2579
2580         return head;
2581 }
2582
2583 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
2584                                     struct btrfs_delayed_ref_head *locked_ref,
2585                                     unsigned long *run_refs)
2586 {
2587         struct btrfs_fs_info *fs_info = trans->fs_info;
2588         struct btrfs_delayed_ref_root *delayed_refs;
2589         struct btrfs_delayed_extent_op *extent_op;
2590         struct btrfs_delayed_ref_node *ref;
2591         int must_insert_reserved = 0;
2592         int ret;
2593
2594         delayed_refs = &trans->transaction->delayed_refs;
2595
2596         lockdep_assert_held(&locked_ref->mutex);
2597         lockdep_assert_held(&locked_ref->lock);
2598
2599         while ((ref = select_delayed_ref(locked_ref))) {
2600                 if (ref->seq &&
2601                     btrfs_check_delayed_seq(fs_info, ref->seq)) {
2602                         spin_unlock(&locked_ref->lock);
2603                         unselect_delayed_ref_head(delayed_refs, locked_ref);
2604                         return -EAGAIN;
2605                 }
2606
2607                 (*run_refs)++;
2608                 ref->in_tree = 0;
2609                 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
2610                 RB_CLEAR_NODE(&ref->ref_node);
2611                 if (!list_empty(&ref->add_list))
2612                         list_del(&ref->add_list);
2613                 /*
2614                  * When we play the delayed ref, also correct the ref_mod on
2615                  * head
2616                  */
2617                 switch (ref->action) {
2618                 case BTRFS_ADD_DELAYED_REF:
2619                 case BTRFS_ADD_DELAYED_EXTENT:
2620                         locked_ref->ref_mod -= ref->ref_mod;
2621                         break;
2622                 case BTRFS_DROP_DELAYED_REF:
2623                         locked_ref->ref_mod += ref->ref_mod;
2624                         break;
2625                 default:
2626                         WARN_ON(1);
2627                 }
2628                 atomic_dec(&delayed_refs->num_entries);
2629
2630                 /*
2631                  * Record the must_insert_reserved flag before we drop the
2632                  * spin lock.
2633                  */
2634                 must_insert_reserved = locked_ref->must_insert_reserved;
2635                 locked_ref->must_insert_reserved = 0;
2636
2637                 extent_op = locked_ref->extent_op;
2638                 locked_ref->extent_op = NULL;
2639                 spin_unlock(&locked_ref->lock);
2640
2641                 ret = run_one_delayed_ref(trans, ref, extent_op,
2642                                           must_insert_reserved);
2643
2644                 btrfs_free_delayed_extent_op(extent_op);
2645                 if (ret) {
2646                         unselect_delayed_ref_head(delayed_refs, locked_ref);
2647                         btrfs_put_delayed_ref(ref);
2648                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
2649                                     ret);
2650                         return ret;
2651                 }
2652
2653                 btrfs_put_delayed_ref(ref);
2654                 cond_resched();
2655
2656                 spin_lock(&locked_ref->lock);
2657                 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
2658         }
2659
2660         return 0;
2661 }
2662
2663 /*
2664  * Returns 0 on success or if called with an already aborted transaction.
2665  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2666  */
2667 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2668                                              unsigned long nr)
2669 {
2670         struct btrfs_fs_info *fs_info = trans->fs_info;
2671         struct btrfs_delayed_ref_root *delayed_refs;
2672         struct btrfs_delayed_ref_head *locked_ref = NULL;
2673         ktime_t start = ktime_get();
2674         int ret;
2675         unsigned long count = 0;
2676         unsigned long actual_count = 0;
2677
2678         delayed_refs = &trans->transaction->delayed_refs;
2679         do {
2680                 if (!locked_ref) {
2681                         locked_ref = btrfs_obtain_ref_head(trans);
2682                         if (IS_ERR_OR_NULL(locked_ref)) {
2683                                 if (PTR_ERR(locked_ref) == -EAGAIN) {
2684                                         continue;
2685                                 } else {
2686                                         break;
2687                                 }
2688                         }
2689                         count++;
2690                 }
2691                 /*
2692                  * We need to try and merge add/drops of the same ref since we
2693                  * can run into issues with relocate dropping the implicit ref
2694                  * and then it being added back again before the drop can
2695                  * finish.  If we merged anything we need to re-loop so we can
2696                  * get a good ref.
2697                  * Or we can get node references of the same type that weren't
2698                  * merged when created due to bumps in the tree mod seq, and
2699                  * we need to merge them to prevent adding an inline extent
2700                  * backref before dropping it (triggering a BUG_ON at
2701                  * insert_inline_extent_backref()).
2702                  */
2703                 spin_lock(&locked_ref->lock);
2704                 btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
2705
2706                 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
2707                                                       &actual_count);
2708                 if (ret < 0 && ret != -EAGAIN) {
2709                         /*
2710                          * Error, btrfs_run_delayed_refs_for_head already
2711                          * unlocked everything so just bail out
2712                          */
2713                         return ret;
2714                 } else if (!ret) {
2715                         /*
2716                          * Success, perform the usual cleanup of a processed
2717                          * head
2718                          */
2719                         ret = cleanup_ref_head(trans, locked_ref);
2720                         if (ret > 0 ) {
2721                                 /* We dropped our lock, we need to loop. */
2722                                 ret = 0;
2723                                 continue;
2724                         } else if (ret) {
2725                                 return ret;
2726                         }
2727                 }
2728
2729                 /*
2730                  * Either success case or btrfs_run_delayed_refs_for_head
2731                  * returned -EAGAIN, meaning we need to select another head
2732                  */
2733
2734                 locked_ref = NULL;
2735                 cond_resched();
2736         } while ((nr != -1 && count < nr) || locked_ref);
2737
2738         /*
2739          * We don't want to include ref heads since we can have empty ref heads
2740          * and those will drastically skew our runtime down since we just do
2741          * accounting, no actual extent tree updates.
2742          */
2743         if (actual_count > 0) {
2744                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2745                 u64 avg;
2746
2747                 /*
2748                  * We weigh the current average higher than our current runtime
2749                  * to avoid large swings in the average.
2750                  */
2751                 spin_lock(&delayed_refs->lock);
2752                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2753                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2754                 spin_unlock(&delayed_refs->lock);
2755         }
2756         return 0;
2757 }
2758
2759 #ifdef SCRAMBLE_DELAYED_REFS
2760 /*
2761  * Normally delayed refs get processed in ascending bytenr order. This
2762  * correlates in most cases to the order added. To expose dependencies on this
2763  * order, we start to process the tree in the middle instead of the beginning
2764  */
2765 static u64 find_middle(struct rb_root *root)
2766 {
2767         struct rb_node *n = root->rb_node;
2768         struct btrfs_delayed_ref_node *entry;
2769         int alt = 1;
2770         u64 middle;
2771         u64 first = 0, last = 0;
2772
2773         n = rb_first(root);
2774         if (n) {
2775                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2776                 first = entry->bytenr;
2777         }
2778         n = rb_last(root);
2779         if (n) {
2780                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2781                 last = entry->bytenr;
2782         }
2783         n = root->rb_node;
2784
2785         while (n) {
2786                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2787                 WARN_ON(!entry->in_tree);
2788
2789                 middle = entry->bytenr;
2790
2791                 if (alt)
2792                         n = n->rb_left;
2793                 else
2794                         n = n->rb_right;
2795
2796                 alt = 1 - alt;
2797         }
2798         return middle;
2799 }
2800 #endif
2801
2802 static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
2803 {
2804         u64 num_bytes;
2805
2806         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2807                              sizeof(struct btrfs_extent_inline_ref));
2808         if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2809                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2810
2811         /*
2812          * We don't ever fill up leaves all the way so multiply by 2 just to be
2813          * closer to what we're really going to want to use.
2814          */
2815         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
2816 }
2817
2818 /*
2819  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2820  * would require to store the csums for that many bytes.
2821  */
2822 u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
2823 {
2824         u64 csum_size;
2825         u64 num_csums_per_leaf;
2826         u64 num_csums;
2827
2828         csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
2829         num_csums_per_leaf = div64_u64(csum_size,
2830                         (u64)btrfs_super_csum_size(fs_info->super_copy));
2831         num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
2832         num_csums += num_csums_per_leaf - 1;
2833         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2834         return num_csums;
2835 }
2836
2837 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
2838 {
2839         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
2840         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
2841         bool ret = false;
2842         u64 reserved;
2843
2844         spin_lock(&global_rsv->lock);
2845         reserved = global_rsv->reserved;
2846         spin_unlock(&global_rsv->lock);
2847
2848         /*
2849          * Since the global reserve is just kind of magic we don't really want
2850          * to rely on it to save our bacon, so if our size is more than the
2851          * delayed_refs_rsv and the global rsv then it's time to think about
2852          * bailing.
2853          */
2854         spin_lock(&delayed_refs_rsv->lock);
2855         reserved += delayed_refs_rsv->reserved;
2856         if (delayed_refs_rsv->size >= reserved)
2857                 ret = true;
2858         spin_unlock(&delayed_refs_rsv->lock);
2859         return ret;
2860 }
2861
2862 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
2863 {
2864         u64 num_entries =
2865                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2866         u64 avg_runtime;
2867         u64 val;
2868
2869         smp_mb();
2870         avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
2871         val = num_entries * avg_runtime;
2872         if (val >= NSEC_PER_SEC)
2873                 return 1;
2874         if (val >= NSEC_PER_SEC / 2)
2875                 return 2;
2876
2877         return btrfs_check_space_for_delayed_refs(trans->fs_info);
2878 }
2879
2880 struct async_delayed_refs {
2881         struct btrfs_root *root;
2882         u64 transid;
2883         int count;
2884         int error;
2885         int sync;
2886         struct completion wait;
2887         struct btrfs_work work;
2888 };
2889
2890 static inline struct async_delayed_refs *
2891 to_async_delayed_refs(struct btrfs_work *work)
2892 {
2893         return container_of(work, struct async_delayed_refs, work);
2894 }
2895
2896 static void delayed_ref_async_start(struct btrfs_work *work)
2897 {
2898         struct async_delayed_refs *async = to_async_delayed_refs(work);
2899         struct btrfs_trans_handle *trans;
2900         struct btrfs_fs_info *fs_info = async->root->fs_info;
2901         int ret;
2902
2903         /* if the commit is already started, we don't need to wait here */
2904         if (btrfs_transaction_blocked(fs_info))
2905                 goto done;
2906
2907         trans = btrfs_join_transaction(async->root);
2908         if (IS_ERR(trans)) {
2909                 async->error = PTR_ERR(trans);
2910                 goto done;
2911         }
2912
2913         /* Don't bother flushing if we got into a different transaction */
2914         if (trans->transid > async->transid)
2915                 goto end;
2916
2917         ret = btrfs_run_delayed_refs(trans, async->count);
2918         if (ret)
2919                 async->error = ret;
2920 end:
2921         ret = btrfs_end_transaction(trans);
2922         if (ret && !async->error)
2923                 async->error = ret;
2924 done:
2925         if (async->sync)
2926                 complete(&async->wait);
2927         else
2928                 kfree(async);
2929 }
2930
2931 int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
2932                                  unsigned long count, u64 transid, int wait)
2933 {
2934         struct async_delayed_refs *async;
2935         int ret;
2936
2937         async = kmalloc(sizeof(*async), GFP_NOFS);
2938         if (!async)
2939                 return -ENOMEM;
2940
2941         async->root = fs_info->tree_root;
2942         async->count = count;
2943         async->error = 0;
2944         async->transid = transid;
2945         if (wait)
2946                 async->sync = 1;
2947         else
2948                 async->sync = 0;
2949         init_completion(&async->wait);
2950
2951         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2952                         delayed_ref_async_start, NULL, NULL);
2953
2954         btrfs_queue_work(fs_info->extent_workers, &async->work);
2955
2956         if (wait) {
2957                 wait_for_completion(&async->wait);
2958                 ret = async->error;
2959                 kfree(async);
2960                 return ret;
2961         }
2962         return 0;
2963 }
2964
2965 /*
2966  * this starts processing the delayed reference count updates and
2967  * extent insertions we have queued up so far.  count can be
2968  * 0, which means to process everything in the tree at the start
2969  * of the run (but not newly added entries), or it can be some target
2970  * number you'd like to process.
2971  *
2972  * Returns 0 on success or if called with an aborted transaction
2973  * Returns <0 on error and aborts the transaction
2974  */
2975 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2976                            unsigned long count)
2977 {
2978         struct btrfs_fs_info *fs_info = trans->fs_info;
2979         struct rb_node *node;
2980         struct btrfs_delayed_ref_root *delayed_refs;
2981         struct btrfs_delayed_ref_head *head;
2982         int ret;
2983         int run_all = count == (unsigned long)-1;
2984
2985         /* We'll clean this up in btrfs_cleanup_transaction */
2986         if (trans->aborted)
2987                 return 0;
2988
2989         if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
2990                 return 0;
2991
2992         delayed_refs = &trans->transaction->delayed_refs;
2993         if (count == 0)
2994                 count = atomic_read(&delayed_refs->num_entries) * 2;
2995
2996 again:
2997 #ifdef SCRAMBLE_DELAYED_REFS
2998         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2999 #endif
3000         ret = __btrfs_run_delayed_refs(trans, count);
3001         if (ret < 0) {
3002                 btrfs_abort_transaction(trans, ret);
3003                 return ret;
3004         }
3005
3006         if (run_all) {
3007                 btrfs_create_pending_block_groups(trans);
3008
3009                 spin_lock(&delayed_refs->lock);
3010                 node = rb_first_cached(&delayed_refs->href_root);
3011                 if (!node) {
3012                         spin_unlock(&delayed_refs->lock);
3013                         goto out;
3014                 }
3015                 head = rb_entry(node, struct btrfs_delayed_ref_head,
3016                                 href_node);
3017                 refcount_inc(&head->refs);
3018                 spin_unlock(&delayed_refs->lock);
3019
3020                 /* Mutex was contended, block until it's released and retry. */
3021                 mutex_lock(&head->mutex);
3022                 mutex_unlock(&head->mutex);
3023
3024                 btrfs_put_delayed_ref_head(head);
3025                 cond_resched();
3026                 goto again;
3027         }
3028 out:
3029         return 0;
3030 }
3031
3032 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3033                                 struct btrfs_fs_info *fs_info,
3034                                 u64 bytenr, u64 num_bytes, u64 flags,
3035                                 int level, int is_data)
3036 {
3037         struct btrfs_delayed_extent_op *extent_op;
3038         int ret;
3039
3040         extent_op = btrfs_alloc_delayed_extent_op();
3041         if (!extent_op)
3042                 return -ENOMEM;
3043
3044         extent_op->flags_to_set = flags;
3045         extent_op->update_flags = true;
3046         extent_op->update_key = false;
3047         extent_op->is_data = is_data ? true : false;
3048         extent_op->level = level;
3049
3050         ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
3051                                           num_bytes, extent_op);
3052         if (ret)
3053                 btrfs_free_delayed_extent_op(extent_op);
3054         return ret;
3055 }
3056
3057 static noinline int check_delayed_ref(struct btrfs_root *root,
3058                                       struct btrfs_path *path,
3059                                       u64 objectid, u64 offset, u64 bytenr)
3060 {
3061         struct btrfs_delayed_ref_head *head;
3062         struct btrfs_delayed_ref_node *ref;
3063         struct btrfs_delayed_data_ref *data_ref;
3064         struct btrfs_delayed_ref_root *delayed_refs;
3065         struct btrfs_transaction *cur_trans;
3066         struct rb_node *node;
3067         int ret = 0;
3068
3069         spin_lock(&root->fs_info->trans_lock);
3070         cur_trans = root->fs_info->running_transaction;
3071         if (cur_trans)
3072                 refcount_inc(&cur_trans->use_count);
3073         spin_unlock(&root->fs_info->trans_lock);
3074         if (!cur_trans)
3075                 return 0;
3076
3077         delayed_refs = &cur_trans->delayed_refs;
3078         spin_lock(&delayed_refs->lock);
3079         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
3080         if (!head) {
3081                 spin_unlock(&delayed_refs->lock);
3082                 btrfs_put_transaction(cur_trans);
3083                 return 0;
3084         }
3085
3086         if (!mutex_trylock(&head->mutex)) {
3087                 refcount_inc(&head->refs);
3088                 spin_unlock(&delayed_refs->lock);
3089
3090                 btrfs_release_path(path);
3091
3092                 /*
3093                  * Mutex was contended, block until it's released and let
3094                  * caller try again
3095                  */
3096                 mutex_lock(&head->mutex);
3097                 mutex_unlock(&head->mutex);
3098                 btrfs_put_delayed_ref_head(head);
3099                 btrfs_put_transaction(cur_trans);
3100                 return -EAGAIN;
3101         }
3102         spin_unlock(&delayed_refs->lock);
3103
3104         spin_lock(&head->lock);
3105         /*
3106          * XXX: We should replace this with a proper search function in the
3107          * future.
3108          */
3109         for (node = rb_first_cached(&head->ref_tree); node;
3110              node = rb_next(node)) {
3111                 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
3112                 /* If it's a shared ref we know a cross reference exists */
3113                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3114                         ret = 1;
3115                         break;
3116                 }
3117
3118                 data_ref = btrfs_delayed_node_to_data_ref(ref);
3119
3120                 /*
3121                  * If our ref doesn't match the one we're currently looking at
3122                  * then we have a cross reference.
3123                  */
3124                 if (data_ref->root != root->root_key.objectid ||
3125                     data_ref->objectid != objectid ||
3126                     data_ref->offset != offset) {
3127                         ret = 1;
3128                         break;
3129                 }
3130         }
3131         spin_unlock(&head->lock);
3132         mutex_unlock(&head->mutex);
3133         btrfs_put_transaction(cur_trans);
3134         return ret;
3135 }
3136
3137 static noinline int check_committed_ref(struct btrfs_root *root,
3138                                         struct btrfs_path *path,
3139                                         u64 objectid, u64 offset, u64 bytenr)
3140 {
3141         struct btrfs_fs_info *fs_info = root->fs_info;
3142         struct btrfs_root *extent_root = fs_info->extent_root;
3143         struct extent_buffer *leaf;
3144         struct btrfs_extent_data_ref *ref;
3145         struct btrfs_extent_inline_ref *iref;
3146         struct btrfs_extent_item *ei;
3147         struct btrfs_key key;
3148         u32 item_size;
3149         int type;
3150         int ret;
3151
3152         key.objectid = bytenr;
3153         key.offset = (u64)-1;
3154         key.type = BTRFS_EXTENT_ITEM_KEY;
3155
3156         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3157         if (ret < 0)
3158                 goto out;
3159         BUG_ON(ret == 0); /* Corruption */
3160
3161         ret = -ENOENT;
3162         if (path->slots[0] == 0)
3163                 goto out;
3164
3165         path->slots[0]--;
3166         leaf = path->nodes[0];
3167         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3168
3169         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3170                 goto out;
3171
3172         ret = 1;
3173         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3174         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3175
3176         if (item_size != sizeof(*ei) +
3177             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3178                 goto out;
3179
3180         if (btrfs_extent_generation(leaf, ei) <=
3181             btrfs_root_last_snapshot(&root->root_item))
3182                 goto out;
3183
3184         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3185
3186         type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
3187         if (type != BTRFS_EXTENT_DATA_REF_KEY)
3188                 goto out;
3189
3190         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3191         if (btrfs_extent_refs(leaf, ei) !=
3192             btrfs_extent_data_ref_count(leaf, ref) ||
3193             btrfs_extent_data_ref_root(leaf, ref) !=
3194             root->root_key.objectid ||
3195             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3196             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3197                 goto out;
3198
3199         ret = 0;
3200 out:
3201         return ret;
3202 }
3203
3204 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
3205                           u64 bytenr)
3206 {
3207         struct btrfs_path *path;
3208         int ret;
3209
3210         path = btrfs_alloc_path();
3211         if (!path)
3212                 return -ENOMEM;
3213
3214         do {
3215                 ret = check_committed_ref(root, path, objectid,
3216                                           offset, bytenr);
3217                 if (ret && ret != -ENOENT)
3218                         goto out;
3219
3220                 ret = check_delayed_ref(root, path, objectid, offset, bytenr);
3221         } while (ret == -EAGAIN);
3222
3223 out:
3224         btrfs_free_path(path);
3225         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3226                 WARN_ON(ret > 0);
3227         return ret;
3228 }
3229
3230 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3231                            struct btrfs_root *root,
3232                            struct extent_buffer *buf,
3233                            int full_backref, int inc)
3234 {
3235         struct btrfs_fs_info *fs_info = root->fs_info;
3236         u64 bytenr;
3237         u64 num_bytes;
3238         u64 parent;
3239         u64 ref_root;
3240         u32 nritems;
3241         struct btrfs_key key;
3242         struct btrfs_file_extent_item *fi;
3243         int i;
3244         int level;
3245         int ret = 0;
3246         int (*process_func)(struct btrfs_trans_handle *,
3247                             struct btrfs_root *,
3248                             u64, u64, u64, u64, u64, u64);
3249
3250
3251         if (btrfs_is_testing(fs_info))
3252                 return 0;
3253
3254         ref_root = btrfs_header_owner(buf);
3255         nritems = btrfs_header_nritems(buf);
3256         level = btrfs_header_level(buf);
3257
3258         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3259                 return 0;
3260
3261         if (inc)
3262                 process_func = btrfs_inc_extent_ref;
3263         else
3264                 process_func = btrfs_free_extent;
3265
3266         if (full_backref)
3267                 parent = buf->start;
3268         else
3269                 parent = 0;
3270
3271         for (i = 0; i < nritems; i++) {
3272                 if (level == 0) {
3273                         btrfs_item_key_to_cpu(buf, &key, i);
3274                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3275                                 continue;
3276                         fi = btrfs_item_ptr(buf, i,
3277                                             struct btrfs_file_extent_item);
3278                         if (btrfs_file_extent_type(buf, fi) ==
3279                             BTRFS_FILE_EXTENT_INLINE)
3280                                 continue;
3281                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3282                         if (bytenr == 0)
3283                                 continue;
3284
3285                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3286                         key.offset -= btrfs_file_extent_offset(buf, fi);
3287                         ret = process_func(trans, root, bytenr, num_bytes,
3288                                            parent, ref_root, key.objectid,
3289                                            key.offset);
3290                         if (ret)
3291                                 goto fail;
3292                 } else {
3293                         bytenr = btrfs_node_blockptr(buf, i);
3294                         num_bytes = fs_info->nodesize;
3295                         ret = process_func(trans, root, bytenr, num_bytes,
3296                                            parent, ref_root, level - 1, 0);
3297                         if (ret)
3298                                 goto fail;
3299                 }
3300         }
3301         return 0;
3302 fail:
3303         return ret;
3304 }
3305
3306 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3307                   struct extent_buffer *buf, int full_backref)
3308 {
3309         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3310 }
3311
3312 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3313                   struct extent_buffer *buf, int full_backref)
3314 {
3315         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3316 }
3317
3318 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3319                                  struct btrfs_fs_info *fs_info,
3320                                  struct btrfs_path *path,
3321                                  struct btrfs_block_group_cache *cache)
3322 {
3323         int ret;
3324         struct btrfs_root *extent_root = fs_info->extent_root;
3325         unsigned long bi;
3326         struct extent_buffer *leaf;
3327
3328         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3329         if (ret) {
3330                 if (ret > 0)
3331                         ret = -ENOENT;
3332                 goto fail;
3333         }
3334
3335         leaf = path->nodes[0];
3336         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3337         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3338         btrfs_mark_buffer_dirty(leaf);
3339 fail:
3340         btrfs_release_path(path);
3341         return ret;
3342
3343 }
3344
3345 static struct btrfs_block_group_cache *
3346 next_block_group(struct btrfs_fs_info *fs_info,
3347                  struct btrfs_block_group_cache *cache)
3348 {
3349         struct rb_node *node;
3350
3351         spin_lock(&fs_info->block_group_cache_lock);
3352
3353         /* If our block group was removed, we need a full search. */
3354         if (RB_EMPTY_NODE(&cache->cache_node)) {
3355                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3356
3357                 spin_unlock(&fs_info->block_group_cache_lock);
3358                 btrfs_put_block_group(cache);
3359                 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
3360         }
3361         node = rb_next(&cache->cache_node);
3362         btrfs_put_block_group(cache);
3363         if (node) {
3364                 cache = rb_entry(node, struct btrfs_block_group_cache,
3365                                  cache_node);
3366                 btrfs_get_block_group(cache);
3367         } else
3368                 cache = NULL;
3369         spin_unlock(&fs_info->block_group_cache_lock);
3370         return cache;
3371 }
3372
3373 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3374                             struct btrfs_trans_handle *trans,
3375                             struct btrfs_path *path)
3376 {
3377         struct btrfs_fs_info *fs_info = block_group->fs_info;
3378         struct btrfs_root *root = fs_info->tree_root;
3379         struct inode *inode = NULL;
3380         struct extent_changeset *data_reserved = NULL;
3381         u64 alloc_hint = 0;
3382         int dcs = BTRFS_DC_ERROR;
3383         u64 num_pages = 0;
3384         int retries = 0;
3385         int ret = 0;
3386
3387         /*
3388          * If this block group is smaller than 100 megs don't bother caching the
3389          * block group.
3390          */
3391         if (block_group->key.offset < (100 * SZ_1M)) {
3392                 spin_lock(&block_group->lock);
3393                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3394                 spin_unlock(&block_group->lock);
3395                 return 0;
3396         }
3397
3398         if (trans->aborted)
3399                 return 0;
3400 again:
3401         inode = lookup_free_space_inode(fs_info, block_group, path);
3402         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3403                 ret = PTR_ERR(inode);
3404                 btrfs_release_path(path);
3405                 goto out;
3406         }
3407
3408         if (IS_ERR(inode)) {
3409                 BUG_ON(retries);
3410                 retries++;
3411
3412                 if (block_group->ro)
3413                         goto out_free;
3414
3415                 ret = create_free_space_inode(fs_info, trans, block_group,
3416                                               path);
3417                 if (ret)
3418                         goto out_free;
3419                 goto again;
3420         }
3421
3422         /*
3423          * We want to set the generation to 0, that way if anything goes wrong
3424          * from here on out we know not to trust this cache when we load up next
3425          * time.
3426          */
3427         BTRFS_I(inode)->generation = 0;
3428         ret = btrfs_update_inode(trans, root, inode);
3429         if (ret) {
3430                 /*
3431                  * So theoretically we could recover from this, simply set the
3432                  * super cache generation to 0 so we know to invalidate the
3433                  * cache, but then we'd have to keep track of the block groups
3434                  * that fail this way so we know we _have_ to reset this cache
3435                  * before the next commit or risk reading stale cache.  So to
3436                  * limit our exposure to horrible edge cases lets just abort the
3437                  * transaction, this only happens in really bad situations
3438                  * anyway.
3439                  */
3440                 btrfs_abort_transaction(trans, ret);
3441                 goto out_put;
3442         }
3443         WARN_ON(ret);
3444
3445         /* We've already setup this transaction, go ahead and exit */
3446         if (block_group->cache_generation == trans->transid &&
3447             i_size_read(inode)) {
3448                 dcs = BTRFS_DC_SETUP;
3449                 goto out_put;
3450         }
3451
3452         if (i_size_read(inode) > 0) {
3453                 ret = btrfs_check_trunc_cache_free_space(fs_info,
3454                                         &fs_info->global_block_rsv);
3455                 if (ret)
3456                         goto out_put;
3457
3458                 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
3459                 if (ret)
3460                         goto out_put;
3461         }
3462
3463         spin_lock(&block_group->lock);
3464         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3465             !btrfs_test_opt(fs_info, SPACE_CACHE)) {
3466                 /*
3467                  * don't bother trying to write stuff out _if_
3468                  * a) we're not cached,
3469                  * b) we're with nospace_cache mount option,
3470                  * c) we're with v2 space_cache (FREE_SPACE_TREE).
3471                  */
3472                 dcs = BTRFS_DC_WRITTEN;
3473                 spin_unlock(&block_group->lock);
3474                 goto out_put;
3475         }
3476         spin_unlock(&block_group->lock);
3477
3478         /*
3479          * We hit an ENOSPC when setting up the cache in this transaction, just
3480          * skip doing the setup, we've already cleared the cache so we're safe.
3481          */
3482         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3483                 ret = -ENOSPC;
3484                 goto out_put;
3485         }
3486
3487         /*
3488          * Try to preallocate enough space based on how big the block group is.
3489          * Keep in mind this has to include any pinned space which could end up
3490          * taking up quite a bit since it's not folded into the other space
3491          * cache.
3492          */
3493         num_pages = div_u64(block_group->key.offset, SZ_256M);
3494         if (!num_pages)
3495                 num_pages = 1;
3496
3497         num_pages *= 16;
3498         num_pages *= PAGE_SIZE;
3499
3500         ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
3501         if (ret)
3502                 goto out_put;
3503
3504         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3505                                               num_pages, num_pages,
3506                                               &alloc_hint);
3507         /*
3508          * Our cache requires contiguous chunks so that we don't modify a bunch
3509          * of metadata or split extents when writing the cache out, which means
3510          * we can enospc if we are heavily fragmented in addition to just normal
3511          * out of space conditions.  So if we hit this just skip setting up any
3512          * other block groups for this transaction, maybe we'll unpin enough
3513          * space the next time around.
3514          */
3515         if (!ret)
3516                 dcs = BTRFS_DC_SETUP;
3517         else if (ret == -ENOSPC)
3518                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3519
3520 out_put:
3521         iput(inode);
3522 out_free:
3523         btrfs_release_path(path);
3524 out:
3525         spin_lock(&block_group->lock);
3526         if (!ret && dcs == BTRFS_DC_SETUP)
3527                 block_group->cache_generation = trans->transid;
3528         block_group->disk_cache_state = dcs;
3529         spin_unlock(&block_group->lock);
3530
3531         extent_changeset_free(data_reserved);
3532         return ret;
3533 }
3534
3535 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3536                             struct btrfs_fs_info *fs_info)
3537 {
3538         struct btrfs_block_group_cache *cache, *tmp;
3539         struct btrfs_transaction *cur_trans = trans->transaction;
3540         struct btrfs_path *path;
3541
3542         if (list_empty(&cur_trans->dirty_bgs) ||
3543             !btrfs_test_opt(fs_info, SPACE_CACHE))
3544                 return 0;
3545
3546         path = btrfs_alloc_path();
3547         if (!path)
3548                 return -ENOMEM;
3549
3550         /* Could add new block groups, use _safe just in case */
3551         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3552                                  dirty_list) {
3553                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3554                         cache_save_setup(cache, trans, path);
3555         }
3556
3557         btrfs_free_path(path);
3558         return 0;
3559 }
3560
3561 /*
3562  * transaction commit does final block group cache writeback during a
3563  * critical section where nothing is allowed to change the FS.  This is
3564  * required in order for the cache to actually match the block group,
3565  * but can introduce a lot of latency into the commit.
3566  *
3567  * So, btrfs_start_dirty_block_groups is here to kick off block group
3568  * cache IO.  There's a chance we'll have to redo some of it if the
3569  * block group changes again during the commit, but it greatly reduces
3570  * the commit latency by getting rid of the easy block groups while
3571  * we're still allowing others to join the commit.
3572  */
3573 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
3574 {
3575         struct btrfs_fs_info *fs_info = trans->fs_info;
3576         struct btrfs_block_group_cache *cache;
3577         struct btrfs_transaction *cur_trans = trans->transaction;
3578         int ret = 0;
3579         int should_put;
3580         struct btrfs_path *path = NULL;
3581         LIST_HEAD(dirty);
3582         struct list_head *io = &cur_trans->io_bgs;
3583         int num_started = 0;
3584         int loops = 0;
3585
3586         spin_lock(&cur_trans->dirty_bgs_lock);
3587         if (list_empty(&cur_trans->dirty_bgs)) {
3588                 spin_unlock(&cur_trans->dirty_bgs_lock);
3589                 return 0;
3590         }
3591         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3592         spin_unlock(&cur_trans->dirty_bgs_lock);
3593
3594 again:
3595         /*
3596          * make sure all the block groups on our dirty list actually
3597          * exist
3598          */
3599         btrfs_create_pending_block_groups(trans);
3600
3601         if (!path) {
3602                 path = btrfs_alloc_path();
3603                 if (!path)
3604                         return -ENOMEM;
3605         }
3606
3607         /*
3608          * cache_write_mutex is here only to save us from balance or automatic
3609          * removal of empty block groups deleting this block group while we are
3610          * writing out the cache
3611          */
3612         mutex_lock(&trans->transaction->cache_write_mutex);
3613         while (!list_empty(&dirty)) {
3614                 bool drop_reserve = true;
3615
3616                 cache = list_first_entry(&dirty,
3617                                          struct btrfs_block_group_cache,
3618                                          dirty_list);
3619                 /*
3620                  * this can happen if something re-dirties a block
3621                  * group that is already under IO.  Just wait for it to
3622                  * finish and then do it all again
3623                  */
3624                 if (!list_empty(&cache->io_list)) {
3625                         list_del_init(&cache->io_list);
3626                         btrfs_wait_cache_io(trans, cache, path);
3627                         btrfs_put_block_group(cache);
3628                 }
3629
3630
3631                 /*
3632                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3633                  * if it should update the cache_state.  Don't delete
3634                  * until after we wait.
3635                  *
3636                  * Since we're not running in the commit critical section
3637                  * we need the dirty_bgs_lock to protect from update_block_group
3638                  */
3639                 spin_lock(&cur_trans->dirty_bgs_lock);
3640                 list_del_init(&cache->dirty_list);
3641                 spin_unlock(&cur_trans->dirty_bgs_lock);
3642
3643                 should_put = 1;
3644
3645                 cache_save_setup(cache, trans, path);
3646
3647                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3648                         cache->io_ctl.inode = NULL;
3649                         ret = btrfs_write_out_cache(fs_info, trans,
3650                                                     cache, path);
3651                         if (ret == 0 && cache->io_ctl.inode) {
3652                                 num_started++;
3653                                 should_put = 0;
3654
3655                                 /*
3656                                  * The cache_write_mutex is protecting the
3657                                  * io_list, also refer to the definition of
3658                                  * btrfs_transaction::io_bgs for more details
3659                                  */
3660                                 list_add_tail(&cache->io_list, io);
3661                         } else {
3662                                 /*
3663                                  * if we failed to write the cache, the
3664                                  * generation will be bad and life goes on
3665                                  */
3666                                 ret = 0;
3667                         }
3668                 }
3669                 if (!ret) {
3670                         ret = write_one_cache_group(trans, fs_info,
3671                                                     path, cache);
3672                         /*
3673                          * Our block group might still be attached to the list
3674                          * of new block groups in the transaction handle of some
3675                          * other task (struct btrfs_trans_handle->new_bgs). This
3676                          * means its block group item isn't yet in the extent
3677                          * tree. If this happens ignore the error, as we will
3678                          * try again later in the critical section of the
3679                          * transaction commit.
3680                          */
3681                         if (ret == -ENOENT) {
3682                                 ret = 0;
3683                                 spin_lock(&cur_trans->dirty_bgs_lock);
3684                                 if (list_empty(&cache->dirty_list)) {
3685                                         list_add_tail(&cache->dirty_list,
3686                                                       &cur_trans->dirty_bgs);
3687                                         btrfs_get_block_group(cache);
3688                                         drop_reserve = false;
3689                                 }
3690                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3691                         } else if (ret) {
3692                                 btrfs_abort_transaction(trans, ret);
3693                         }
3694                 }
3695
3696                 /* if it's not on the io list, we need to put the block group */
3697                 if (should_put)
3698                         btrfs_put_block_group(cache);
3699                 if (drop_reserve)
3700                         btrfs_delayed_refs_rsv_release(fs_info, 1);
3701
3702                 if (ret)
3703                         break;
3704
3705                 /*
3706                  * Avoid blocking other tasks for too long. It might even save
3707                  * us from writing caches for block groups that are going to be
3708                  * removed.
3709                  */
3710                 mutex_unlock(&trans->transaction->cache_write_mutex);
3711                 mutex_lock(&trans->transaction->cache_write_mutex);
3712         }
3713         mutex_unlock(&trans->transaction->cache_write_mutex);
3714
3715         /*
3716          * go through delayed refs for all the stuff we've just kicked off
3717          * and then loop back (just once)
3718          */
3719         ret = btrfs_run_delayed_refs(trans, 0);
3720         if (!ret && loops == 0) {
3721                 loops++;
3722                 spin_lock(&cur_trans->dirty_bgs_lock);
3723                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3724                 /*
3725                  * dirty_bgs_lock protects us from concurrent block group
3726                  * deletes too (not just cache_write_mutex).
3727                  */
3728                 if (!list_empty(&dirty)) {
3729                         spin_unlock(&cur_trans->dirty_bgs_lock);
3730                         goto again;
3731                 }
3732                 spin_unlock(&cur_trans->dirty_bgs_lock);
3733         } else if (ret < 0) {
3734                 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
3735         }
3736
3737         btrfs_free_path(path);
3738         return ret;
3739 }
3740
3741 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3742                                    struct btrfs_fs_info *fs_info)
3743 {
3744         struct btrfs_block_group_cache *cache;
3745         struct btrfs_transaction *cur_trans = trans->transaction;
3746         int ret = 0;
3747         int should_put;
3748         struct btrfs_path *path;
3749         struct list_head *io = &cur_trans->io_bgs;
3750         int num_started = 0;
3751
3752         path = btrfs_alloc_path();
3753         if (!path)
3754                 return -ENOMEM;
3755
3756         /*
3757          * Even though we are in the critical section of the transaction commit,
3758          * we can still have concurrent tasks adding elements to this
3759          * transaction's list of dirty block groups. These tasks correspond to
3760          * endio free space workers started when writeback finishes for a
3761          * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3762          * allocate new block groups as a result of COWing nodes of the root
3763          * tree when updating the free space inode. The writeback for the space
3764          * caches is triggered by an earlier call to
3765          * btrfs_start_dirty_block_groups() and iterations of the following
3766          * loop.
3767          * Also we want to do the cache_save_setup first and then run the
3768          * delayed refs to make sure we have the best chance at doing this all
3769          * in one shot.
3770          */
3771         spin_lock(&cur_trans->dirty_bgs_lock);
3772         while (!list_empty(&cur_trans->dirty_bgs)) {
3773                 cache = list_first_entry(&cur_trans->dirty_bgs,
3774                                          struct btrfs_block_group_cache,
3775                                          dirty_list);
3776
3777                 /*
3778                  * this can happen if cache_save_setup re-dirties a block
3779                  * group that is already under IO.  Just wait for it to
3780                  * finish and then do it all again
3781                  */
3782                 if (!list_empty(&cache->io_list)) {
3783                         spin_unlock(&cur_trans->dirty_bgs_lock);
3784                         list_del_init(&cache->io_list);
3785                         btrfs_wait_cache_io(trans, cache, path);
3786                         btrfs_put_block_group(cache);
3787                         spin_lock(&cur_trans->dirty_bgs_lock);
3788                 }
3789
3790                 /*
3791                  * don't remove from the dirty list until after we've waited
3792                  * on any pending IO
3793                  */
3794                 list_del_init(&cache->dirty_list);
3795                 spin_unlock(&cur_trans->dirty_bgs_lock);
3796                 should_put = 1;
3797
3798                 cache_save_setup(cache, trans, path);
3799
3800                 if (!ret)
3801                         ret = btrfs_run_delayed_refs(trans,
3802                                                      (unsigned long) -1);
3803
3804                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3805                         cache->io_ctl.inode = NULL;
3806                         ret = btrfs_write_out_cache(fs_info, trans,
3807                                                     cache, path);
3808                         if (ret == 0 && cache->io_ctl.inode) {
3809                                 num_started++;
3810                                 should_put = 0;
3811                                 list_add_tail(&cache->io_list, io);
3812                         } else {
3813                                 /*
3814                                  * if we failed to write the cache, the
3815                                  * generation will be bad and life goes on
3816                                  */
3817                                 ret = 0;
3818                         }
3819                 }
3820                 if (!ret) {
3821                         ret = write_one_cache_group(trans, fs_info,
3822                                                     path, cache);
3823                         /*
3824                          * One of the free space endio workers might have
3825                          * created a new block group while updating a free space
3826                          * cache's inode (at inode.c:btrfs_finish_ordered_io())
3827                          * and hasn't released its transaction handle yet, in
3828                          * which case the new block group is still attached to
3829                          * its transaction handle and its creation has not
3830                          * finished yet (no block group item in the extent tree
3831                          * yet, etc). If this is the case, wait for all free
3832                          * space endio workers to finish and retry. This is a
3833                          * a very rare case so no need for a more efficient and
3834                          * complex approach.
3835                          */
3836                         if (ret == -ENOENT) {
3837                                 wait_event(cur_trans->writer_wait,
3838                                    atomic_read(&cur_trans->num_writers) == 1);
3839                                 ret = write_one_cache_group(trans, fs_info,
3840                                                             path, cache);
3841                         }
3842                         if (ret)
3843                                 btrfs_abort_transaction(trans, ret);
3844                 }
3845
3846                 /* if its not on the io list, we need to put the block group */
3847                 if (should_put)
3848                         btrfs_put_block_group(cache);
3849                 btrfs_delayed_refs_rsv_release(fs_info, 1);
3850                 spin_lock(&cur_trans->dirty_bgs_lock);
3851         }
3852         spin_unlock(&cur_trans->dirty_bgs_lock);
3853
3854         /*
3855          * Refer to the definition of io_bgs member for details why it's safe
3856          * to use it without any locking
3857          */
3858         while (!list_empty(io)) {
3859                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3860                                          io_list);
3861                 list_del_init(&cache->io_list);
3862                 btrfs_wait_cache_io(trans, cache, path);
3863                 btrfs_put_block_group(cache);
3864         }
3865
3866         btrfs_free_path(path);
3867         return ret;
3868 }
3869
3870 int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
3871 {
3872         struct btrfs_block_group_cache *block_group;
3873         int readonly = 0;
3874
3875         block_group = btrfs_lookup_block_group(fs_info, bytenr);
3876         if (!block_group || block_group->ro)
3877                 readonly = 1;
3878         if (block_group)
3879                 btrfs_put_block_group(block_group);
3880         return readonly;
3881 }
3882
3883 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3884 {
3885         struct btrfs_block_group_cache *bg;
3886         bool ret = true;
3887
3888         bg = btrfs_lookup_block_group(fs_info, bytenr);
3889         if (!bg)
3890                 return false;
3891
3892         spin_lock(&bg->lock);
3893         if (bg->ro)
3894                 ret = false;
3895         else
3896                 atomic_inc(&bg->nocow_writers);
3897         spin_unlock(&bg->lock);
3898
3899         /* no put on block group, done by btrfs_dec_nocow_writers */
3900         if (!ret)
3901                 btrfs_put_block_group(bg);
3902
3903         return ret;
3904
3905 }
3906
3907 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
3908 {
3909         struct btrfs_block_group_cache *bg;
3910
3911         bg = btrfs_lookup_block_group(fs_info, bytenr);
3912         ASSERT(bg);
3913         if (atomic_dec_and_test(&bg->nocow_writers))
3914                 wake_up_var(&bg->nocow_writers);
3915         /*
3916          * Once for our lookup and once for the lookup done by a previous call
3917          * to btrfs_inc_nocow_writers()
3918          */
3919         btrfs_put_block_group(bg);
3920         btrfs_put_block_group(bg);
3921 }
3922
3923 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
3924 {
3925         wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
3926 }
3927
3928 static const char *alloc_name(u64 flags)
3929 {
3930         switch (flags) {
3931         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3932                 return "mixed";
3933         case BTRFS_BLOCK_GROUP_METADATA:
3934                 return "metadata";
3935         case BTRFS_BLOCK_GROUP_DATA:
3936                 return "data";
3937         case BTRFS_BLOCK_GROUP_SYSTEM:
3938                 return "system";
3939         default:
3940                 WARN_ON(1);
3941                 return "invalid-combination";
3942         };
3943 }
3944
3945 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
3946 {
3947
3948         struct btrfs_space_info *space_info;
3949         int i;
3950         int ret;
3951
3952         space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
3953         if (!space_info)
3954                 return -ENOMEM;
3955
3956         ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
3957                                  GFP_KERNEL);
3958         if (ret) {
3959                 kfree(space_info);
3960                 return ret;
3961         }
3962
3963         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3964                 INIT_LIST_HEAD(&space_info->block_groups[i]);
3965         init_rwsem(&space_info->groups_sem);
3966         spin_lock_init(&space_info->lock);
3967         space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3968         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3969         init_waitqueue_head(&space_info->wait);
3970         INIT_LIST_HEAD(&space_info->ro_bgs);
3971         INIT_LIST_HEAD(&space_info->tickets);
3972         INIT_LIST_HEAD(&space_info->priority_tickets);
3973
3974         ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
3975                                     info->space_info_kobj, "%s",
3976                                     alloc_name(space_info->flags));
3977         if (ret) {
3978                 percpu_counter_destroy(&space_info->total_bytes_pinned);
3979                 kfree(space_info);
3980                 return ret;
3981         }
3982
3983         list_add_rcu(&space_info->list, &info->space_info);
3984         if (flags & BTRFS_BLOCK_GROUP_DATA)
3985                 info->data_sinfo = space_info;
3986
3987         return ret;
3988 }
3989
3990 static void update_space_info(struct btrfs_fs_info *info, u64 flags,
3991                              u64 total_bytes, u64 bytes_used,
3992                              u64 bytes_readonly,
3993                              struct btrfs_space_info **space_info)
3994 {
3995         struct btrfs_space_info *found;
3996         int factor;
3997
3998         factor = btrfs_bg_type_to_factor(flags);
3999
4000         found = __find_space_info(info, flags);
4001         ASSERT(found);
4002         spin_lock(&found->lock);
4003         found->total_bytes += total_bytes;
4004         found->disk_total += total_bytes * factor;
4005         found->bytes_used += bytes_used;
4006         found->disk_used += bytes_used * factor;
4007         found->bytes_readonly += bytes_readonly;
4008         if (total_bytes > 0)
4009                 found->full = 0;
4010         space_info_add_new_bytes(info, found, total_bytes -
4011                                  bytes_used - bytes_readonly);
4012         spin_unlock(&found->lock);
4013         *space_info = found;
4014 }
4015
4016 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
4017 {
4018         u64 extra_flags = chunk_to_extended(flags) &
4019                                 BTRFS_EXTENDED_PROFILE_MASK;
4020
4021         write_seqlock(&fs_info->profiles_lock);
4022         if (flags & BTRFS_BLOCK_GROUP_DATA)
4023                 fs_info->avail_data_alloc_bits |= extra_flags;
4024         if (flags & BTRFS_BLOCK_GROUP_METADATA)
4025                 fs_info->avail_metadata_alloc_bits |= extra_flags;
4026         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4027                 fs_info->avail_system_alloc_bits |= extra_flags;
4028         write_sequnlock(&fs_info->profiles_lock);
4029 }
4030
4031 /*
4032  * returns target flags in extended format or 0 if restripe for this
4033  * chunk_type is not in progress
4034  *
4035  * should be called with balance_lock held
4036  */
4037 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
4038 {
4039         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4040         u64 target = 0;
4041
4042         if (!bctl)
4043                 return 0;
4044
4045         if (flags & BTRFS_BLOCK_GROUP_DATA &&
4046             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4047                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
4048         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
4049                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4050                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
4051         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
4052                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
4053                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
4054         }
4055
4056         return target;
4057 }
4058
4059 /*
4060  * @flags: available profiles in extended format (see ctree.h)
4061  *
4062  * Returns reduced profile in chunk format.  If profile changing is in
4063  * progress (either running or paused) picks the target profile (if it's
4064  * already available), otherwise falls back to plain reducing.
4065  */
4066 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
4067 {
4068         u64 num_devices = fs_info->fs_devices->rw_devices;
4069         u64 target;
4070         u64 raid_type;
4071         u64 allowed = 0;
4072
4073         /*
4074          * see if restripe for this chunk_type is in progress, if so
4075          * try to reduce to the target profile
4076          */
4077         spin_lock(&fs_info->balance_lock);
4078         target = get_restripe_target(fs_info, flags);
4079         if (target) {
4080                 /* pick target profile only if it's already available */
4081                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
4082                         spin_unlock(&fs_info->balance_lock);
4083                         return extended_to_chunk(target);
4084                 }
4085         }
4086         spin_unlock(&fs_info->balance_lock);
4087
4088         /* First, mask out the RAID levels which aren't possible */
4089         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4090                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
4091                         allowed |= btrfs_raid_array[raid_type].bg_flag;
4092         }
4093         allowed &= flags;
4094
4095         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
4096                 allowed = BTRFS_BLOCK_GROUP_RAID6;
4097         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4098                 allowed = BTRFS_BLOCK_GROUP_RAID5;
4099         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4100                 allowed = BTRFS_BLOCK_GROUP_RAID10;
4101         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4102                 allowed = BTRFS_BLOCK_GROUP_RAID1;
4103         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4104                 allowed = BTRFS_BLOCK_GROUP_RAID0;
4105
4106         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4107
4108         return extended_to_chunk(flags | allowed);
4109 }
4110
4111 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
4112 {
4113         unsigned seq;
4114         u64 flags;
4115
4116         do {
4117                 flags = orig_flags;
4118                 seq = read_seqbegin(&fs_info->profiles_lock);
4119
4120                 if (flags & BTRFS_BLOCK_GROUP_DATA)
4121                         flags |= fs_info->avail_data_alloc_bits;
4122                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4123                         flags |= fs_info->avail_system_alloc_bits;
4124                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4125                         flags |= fs_info->avail_metadata_alloc_bits;
4126         } while (read_seqretry(&fs_info->profiles_lock, seq));
4127
4128         return btrfs_reduce_alloc_profile(fs_info, flags);
4129 }
4130
4131 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
4132 {
4133         struct btrfs_fs_info *fs_info = root->fs_info;
4134         u64 flags;
4135         u64 ret;
4136
4137         if (data)
4138                 flags = BTRFS_BLOCK_GROUP_DATA;
4139         else if (root == fs_info->chunk_root)
4140                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
4141         else
4142                 flags = BTRFS_BLOCK_GROUP_METADATA;
4143
4144         ret = get_alloc_profile(fs_info, flags);
4145         return ret;
4146 }
4147
4148 u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
4149 {
4150         return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
4151 }
4152
4153 u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
4154 {
4155         return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4156 }
4157
4158 u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
4159 {
4160         return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4161 }
4162
4163 static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
4164                                  bool may_use_included)
4165 {
4166         ASSERT(s_info);
4167         return s_info->bytes_used + s_info->bytes_reserved +
4168                 s_info->bytes_pinned + s_info->bytes_readonly +
4169                 (may_use_included ? s_info->bytes_may_use : 0);
4170 }
4171
4172 int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
4173 {
4174         struct btrfs_root *root = inode->root;
4175         struct btrfs_fs_info *fs_info = root->fs_info;
4176         struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
4177         u64 used;
4178         int ret = 0;
4179         int need_commit = 2;
4180         int have_pinned_space;
4181
4182         /* make sure bytes are sectorsize aligned */
4183         bytes = ALIGN(bytes, fs_info->sectorsize);
4184
4185         if (btrfs_is_free_space_inode(inode)) {
4186                 need_commit = 0;
4187                 ASSERT(current->journal_info);
4188         }
4189
4190 again:
4191         /* make sure we have enough space to handle the data first */
4192         spin_lock(&data_sinfo->lock);
4193         used = btrfs_space_info_used(data_sinfo, true);
4194
4195         if (used + bytes > data_sinfo->total_bytes) {
4196                 struct btrfs_trans_handle *trans;
4197
4198                 /*
4199                  * if we don't have enough free bytes in this space then we need
4200                  * to alloc a new chunk.
4201                  */
4202                 if (!data_sinfo->full) {
4203                         u64 alloc_target;
4204
4205                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4206                         spin_unlock(&data_sinfo->lock);
4207
4208                         alloc_target = btrfs_data_alloc_profile(fs_info);
4209                         /*
4210                          * It is ugly that we don't call nolock join
4211                          * transaction for the free space inode case here.
4212                          * But it is safe because we only do the data space
4213                          * reservation for the free space cache in the
4214                          * transaction context, the common join transaction
4215                          * just increase the counter of the current transaction
4216                          * handler, doesn't try to acquire the trans_lock of
4217                          * the fs.
4218                          */
4219                         trans = btrfs_join_transaction(root);
4220                         if (IS_ERR(trans))
4221                                 return PTR_ERR(trans);
4222
4223                         ret = do_chunk_alloc(trans, alloc_target,
4224                                              CHUNK_ALLOC_NO_FORCE);
4225                         btrfs_end_transaction(trans);
4226                         if (ret < 0) {
4227                                 if (ret != -ENOSPC)
4228                                         return ret;
4229                                 else {
4230                                         have_pinned_space = 1;
4231                                         goto commit_trans;
4232                                 }
4233                         }
4234
4235                         goto again;
4236                 }
4237
4238                 /*
4239                  * If we don't have enough pinned space to deal with this
4240                  * allocation, and no removed chunk in current transaction,
4241                  * don't bother committing the transaction.
4242                  */
4243                 have_pinned_space = __percpu_counter_compare(
4244                         &data_sinfo->total_bytes_pinned,
4245                         used + bytes - data_sinfo->total_bytes,
4246                         BTRFS_TOTAL_BYTES_PINNED_BATCH);
4247                 spin_unlock(&data_sinfo->lock);
4248
4249                 /* commit the current transaction and try again */
4250 commit_trans:
4251                 if (need_commit) {
4252                         need_commit--;
4253
4254                         if (need_commit > 0) {
4255                                 btrfs_start_delalloc_roots(fs_info, -1);
4256                                 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
4257                                                          (u64)-1);
4258                         }
4259
4260                         trans = btrfs_join_transaction(root);
4261                         if (IS_ERR(trans))
4262                                 return PTR_ERR(trans);
4263                         if (have_pinned_space >= 0 ||
4264                             test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4265                                      &trans->transaction->flags) ||
4266                             need_commit > 0) {
4267                                 ret = btrfs_commit_transaction(trans);
4268                                 if (ret)
4269                                         return ret;
4270                                 /*
4271                                  * The cleaner kthread might still be doing iput
4272                                  * operations. Wait for it to finish so that
4273                                  * more space is released.  We don't need to
4274                                  * explicitly run the delayed iputs here because
4275                                  * the commit_transaction would have woken up
4276                                  * the cleaner.
4277                                  */
4278                                 ret = btrfs_wait_on_delayed_iputs(fs_info);
4279                                 if (ret)
4280                                         return ret;
4281                                 goto again;
4282                         } else {
4283                                 btrfs_end_transaction(trans);
4284                         }
4285                 }
4286
4287                 trace_btrfs_space_reservation(fs_info,
4288                                               "space_info:enospc",
4289                                               data_sinfo->flags, bytes, 1);
4290                 return -ENOSPC;
4291         }
4292         update_bytes_may_use(data_sinfo, bytes);
4293         trace_btrfs_space_reservation(fs_info, "space_info",
4294                                       data_sinfo->flags, bytes, 1);
4295         spin_unlock(&data_sinfo->lock);
4296
4297         return 0;
4298 }
4299
4300 int btrfs_check_data_free_space(struct inode *inode,
4301                         struct extent_changeset **reserved, u64 start, u64 len)
4302 {
4303         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4304         int ret;
4305
4306         /* align the range */
4307         len = round_up(start + len, fs_info->sectorsize) -
4308               round_down(start, fs_info->sectorsize);
4309         start = round_down(start, fs_info->sectorsize);
4310
4311         ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
4312         if (ret < 0)
4313                 return ret;
4314
4315         /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
4316         ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
4317         if (ret < 0)
4318                 btrfs_free_reserved_data_space_noquota(inode, start, len);
4319         else
4320                 ret = 0;
4321         return ret;
4322 }
4323
4324 /*
4325  * Called if we need to clear a data reservation for this inode
4326  * Normally in a error case.
4327  *
4328  * This one will *NOT* use accurate qgroup reserved space API, just for case
4329  * which we can't sleep and is sure it won't affect qgroup reserved space.
4330  * Like clear_bit_hook().
4331  */
4332 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4333                                             u64 len)
4334 {
4335         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4336         struct btrfs_space_info *data_sinfo;
4337
4338         /* Make sure the range is aligned to sectorsize */
4339         len = round_up(start + len, fs_info->sectorsize) -
4340               round_down(start, fs_info->sectorsize);
4341         start = round_down(start, fs_info->sectorsize);
4342
4343         data_sinfo = fs_info->data_sinfo;
4344         spin_lock(&data_sinfo->lock);
4345         update_bytes_may_use(data_sinfo, -len);
4346         trace_btrfs_space_reservation(fs_info, "space_info",
4347                                       data_sinfo->flags, len, 0);
4348         spin_unlock(&data_sinfo->lock);
4349 }
4350
4351 /*
4352  * Called if we need to clear a data reservation for this inode
4353  * Normally in a error case.
4354  *
4355  * This one will handle the per-inode data rsv map for accurate reserved
4356  * space framework.
4357  */
4358 void btrfs_free_reserved_data_space(struct inode *inode,
4359                         struct extent_changeset *reserved, u64 start, u64 len)
4360 {
4361         struct btrfs_root *root = BTRFS_I(inode)->root;
4362
4363         /* Make sure the range is aligned to sectorsize */
4364         len = round_up(start + len, root->fs_info->sectorsize) -
4365               round_down(start, root->fs_info->sectorsize);
4366         start = round_down(start, root->fs_info->sectorsize);
4367
4368         btrfs_free_reserved_data_space_noquota(inode, start, len);
4369         btrfs_qgroup_free_data(inode, reserved, start, len);
4370 }
4371
4372 static void force_metadata_allocation(struct btrfs_fs_info *info)
4373 {
4374         struct list_head *head = &info->space_info;
4375         struct btrfs_space_info *found;
4376
4377         rcu_read_lock();
4378         list_for_each_entry_rcu(found, head, list) {
4379                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4380                         found->force_alloc = CHUNK_ALLOC_FORCE;
4381         }
4382         rcu_read_unlock();
4383 }
4384
4385 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4386 {
4387         return (global->size << 1);
4388 }
4389
4390 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
4391                               struct btrfs_space_info *sinfo, int force)
4392 {
4393         u64 bytes_used = btrfs_space_info_used(sinfo, false);
4394         u64 thresh;
4395
4396         if (force == CHUNK_ALLOC_FORCE)
4397                 return 1;
4398
4399         /*
4400          * in limited mode, we want to have some free space up to
4401          * about 1% of the FS size.
4402          */
4403         if (force == CHUNK_ALLOC_LIMITED) {
4404                 thresh = btrfs_super_total_bytes(fs_info->super_copy);
4405                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4406
4407                 if (sinfo->total_bytes - bytes_used < thresh)
4408                         return 1;
4409         }
4410
4411         if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
4412                 return 0;
4413         return 1;
4414 }
4415
4416 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
4417 {
4418         u64 num_dev;
4419
4420         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4421                     BTRFS_BLOCK_GROUP_RAID0 |
4422                     BTRFS_BLOCK_GROUP_RAID5 |
4423                     BTRFS_BLOCK_GROUP_RAID6))
4424                 num_dev = fs_info->fs_devices->rw_devices;
4425         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4426                 num_dev = 2;
4427         else
4428                 num_dev = 1;    /* DUP or single */
4429
4430         return num_dev;
4431 }
4432
4433 /*
4434  * If @is_allocation is true, reserve space in the system space info necessary
4435  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4436  * removing a chunk.
4437  */
4438 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
4439 {
4440         struct btrfs_fs_info *fs_info = trans->fs_info;
4441         struct btrfs_space_info *info;
4442         u64 left;
4443         u64 thresh;
4444         int ret = 0;
4445         u64 num_devs;
4446
4447         /*
4448          * Needed because we can end up allocating a system chunk and for an
4449          * atomic and race free space reservation in the chunk block reserve.
4450          */
4451         lockdep_assert_held(&fs_info->chunk_mutex);
4452
4453         info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4454         spin_lock(&info->lock);
4455         left = info->total_bytes - btrfs_space_info_used(info, true);
4456         spin_unlock(&info->lock);
4457
4458         num_devs = get_profile_num_devs(fs_info, type);
4459
4460         /* num_devs device items to update and 1 chunk item to add or remove */
4461         thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
4462                 btrfs_calc_trans_metadata_size(fs_info, 1);
4463
4464         if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4465                 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
4466                            left, thresh, type);
4467                 dump_space_info(fs_info, info, 0, 0);
4468         }
4469
4470         if (left < thresh) {
4471                 u64 flags = btrfs_system_alloc_profile(fs_info);
4472
4473                 /*
4474                  * Ignore failure to create system chunk. We might end up not
4475                  * needing it, as we might not need to COW all nodes/leafs from
4476                  * the paths we visit in the chunk tree (they were already COWed
4477                  * or created in the current transaction for example).
4478                  */
4479                 ret = btrfs_alloc_chunk(trans, flags);
4480         }
4481
4482         if (!ret) {
4483                 ret = btrfs_block_rsv_add(fs_info->chunk_root,
4484                                           &fs_info->chunk_block_rsv,
4485                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4486                 if (!ret)
4487                         trans->chunk_bytes_reserved += thresh;
4488         }
4489 }
4490
4491 /*
4492  * If force is CHUNK_ALLOC_FORCE:
4493  *    - return 1 if it successfully allocates a chunk,
4494  *    - return errors including -ENOSPC otherwise.
4495  * If force is NOT CHUNK_ALLOC_FORCE:
4496  *    - return 0 if it doesn't need to allocate a new chunk,
4497  *    - return 1 if it successfully allocates a chunk,
4498  *    - return errors including -ENOSPC otherwise.
4499  */
4500 static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
4501                           int force)
4502 {
4503         struct btrfs_fs_info *fs_info = trans->fs_info;
4504         struct btrfs_space_info *space_info;
4505         bool wait_for_alloc = false;
4506         bool should_alloc = false;
4507         int ret = 0;
4508
4509         /* Don't re-enter if we're already allocating a chunk */
4510         if (trans->allocating_chunk)
4511                 return -ENOSPC;
4512
4513         space_info = __find_space_info(fs_info, flags);
4514         ASSERT(space_info);
4515
4516         do {
4517                 spin_lock(&space_info->lock);
4518                 if (force < space_info->force_alloc)
4519                         force = space_info->force_alloc;
4520                 should_alloc = should_alloc_chunk(fs_info, space_info, force);
4521                 if (space_info->full) {
4522                         /* No more free physical space */
4523                         if (should_alloc)
4524                                 ret = -ENOSPC;
4525                         else
4526                                 ret = 0;
4527                         spin_unlock(&space_info->lock);
4528                         return ret;
4529                 } else if (!should_alloc) {
4530                         spin_unlock(&space_info->lock);
4531                         return 0;
4532                 } else if (space_info->chunk_alloc) {
4533                         /*
4534                          * Someone is already allocating, so we need to block
4535                          * until this someone is finished and then loop to
4536                          * recheck if we should continue with our allocation
4537                          * attempt.
4538                          */
4539                         wait_for_alloc = true;
4540                         spin_unlock(&space_info->lock);
4541                         mutex_lock(&fs_info->chunk_mutex);
4542                         mutex_unlock(&fs_info->chunk_mutex);
4543                 } else {
4544                         /* Proceed with allocation */
4545                         space_info->chunk_alloc = 1;
4546                         wait_for_alloc = false;
4547                         spin_unlock(&space_info->lock);
4548                 }
4549
4550                 cond_resched();
4551         } while (wait_for_alloc);
4552
4553         mutex_lock(&fs_info->chunk_mutex);
4554         trans->allocating_chunk = true;
4555
4556         /*
4557          * If we have mixed data/metadata chunks we want to make sure we keep
4558          * allocating mixed chunks instead of individual chunks.
4559          */
4560         if (btrfs_mixed_space_info(space_info))
4561                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4562
4563         /*
4564          * if we're doing a data chunk, go ahead and make sure that
4565          * we keep a reasonable number of metadata chunks allocated in the
4566          * FS as well.
4567          */
4568         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4569                 fs_info->data_chunk_allocations++;
4570                 if (!(fs_info->data_chunk_allocations %
4571                       fs_info->metadata_ratio))
4572                         force_metadata_allocation(fs_info);
4573         }
4574
4575         /*
4576          * Check if we have enough space in SYSTEM chunk because we may need
4577          * to update devices.
4578          */
4579         check_system_chunk(trans, flags);
4580
4581         ret = btrfs_alloc_chunk(trans, flags);
4582         trans->allocating_chunk = false;
4583
4584         spin_lock(&space_info->lock);
4585         if (ret < 0) {
4586                 if (ret == -ENOSPC)
4587                         space_info->full = 1;
4588                 else
4589                         goto out;
4590         } else {
4591                 ret = 1;
4592                 space_info->max_extent_size = 0;
4593         }
4594
4595         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4596 out:
4597         space_info->chunk_alloc = 0;
4598         spin_unlock(&space_info->lock);
4599         mutex_unlock(&fs_info->chunk_mutex);
4600         /*
4601          * When we allocate a new chunk we reserve space in the chunk block
4602          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4603          * add new nodes/leafs to it if we end up needing to do it when
4604          * inserting the chunk item and updating device items as part of the
4605          * second phase of chunk allocation, performed by
4606          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4607          * large number of new block groups to create in our transaction
4608          * handle's new_bgs list to avoid exhausting the chunk block reserve
4609          * in extreme cases - like having a single transaction create many new
4610          * block groups when starting to write out the free space caches of all
4611          * the block groups that were made dirty during the lifetime of the
4612          * transaction.
4613          */
4614         if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
4615                 btrfs_create_pending_block_groups(trans);
4616
4617         return ret;
4618 }
4619
4620 static int can_overcommit(struct btrfs_fs_info *fs_info,
4621                           struct btrfs_space_info *space_info, u64 bytes,
4622                           enum btrfs_reserve_flush_enum flush,
4623                           bool system_chunk)
4624 {
4625         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4626         u64 profile;
4627         u64 space_size;
4628         u64 avail;
4629         u64 used;
4630         int factor;
4631
4632         /* Don't overcommit when in mixed mode. */
4633         if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
4634                 return 0;
4635
4636         if (system_chunk)
4637                 profile = btrfs_system_alloc_profile(fs_info);
4638         else
4639                 profile = btrfs_metadata_alloc_profile(fs_info);
4640
4641         used = btrfs_space_info_used(space_info, false);
4642
4643         /*
4644          * We only want to allow over committing if we have lots of actual space
4645          * free, but if we don't have enough space to handle the global reserve
4646          * space then we could end up having a real enospc problem when trying
4647          * to allocate a chunk or some other such important allocation.
4648          */
4649         spin_lock(&global_rsv->lock);
4650         space_size = calc_global_rsv_need_space(global_rsv);
4651         spin_unlock(&global_rsv->lock);
4652         if (used + space_size >= space_info->total_bytes)
4653                 return 0;
4654
4655         used += space_info->bytes_may_use;
4656
4657         avail = atomic64_read(&fs_info->free_chunk_space);
4658
4659         /*
4660          * If we have dup, raid1 or raid10 then only half of the free
4661          * space is actually usable.  For raid56, the space info used
4662          * doesn't include the parity drive, so we don't have to
4663          * change the math
4664          */
4665         factor = btrfs_bg_type_to_factor(profile);
4666         avail = div_u64(avail, factor);
4667
4668         /*
4669          * If we aren't flushing all things, let us overcommit up to
4670          * 1/2th of the space. If we can flush, don't let us overcommit
4671          * too much, let it overcommit up to 1/8 of the space.
4672          */
4673         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4674                 avail >>= 3;
4675         else
4676                 avail >>= 1;
4677
4678         if (used + bytes < space_info->total_bytes + avail)
4679                 return 1;
4680         return 0;
4681 }
4682
4683 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
4684                                          unsigned long nr_pages, int nr_items)
4685 {
4686         struct super_block *sb = fs_info->sb;
4687
4688         if (down_read_trylock(&sb->s_umount)) {
4689                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4690                 up_read(&sb->s_umount);
4691         } else {
4692                 /*
4693                  * We needn't worry the filesystem going from r/w to r/o though
4694                  * we don't acquire ->s_umount mutex, because the filesystem
4695                  * should guarantee the delalloc inodes list be empty after
4696                  * the filesystem is readonly(all dirty pages are written to
4697                  * the disk).
4698                  */
4699                 btrfs_start_delalloc_roots(fs_info, nr_items);
4700                 if (!current->journal_info)
4701                         btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
4702         }
4703 }
4704
4705 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
4706                                         u64 to_reclaim)
4707 {
4708         u64 bytes;
4709         u64 nr;
4710
4711         bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
4712         nr = div64_u64(to_reclaim, bytes);
4713         if (!nr)
4714                 nr = 1;
4715         return nr;
4716 }
4717
4718 #define EXTENT_SIZE_PER_ITEM    SZ_256K
4719
4720 /*
4721  * shrink metadata reservation for delalloc
4722  */
4723 static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
4724                             u64 orig, bool wait_ordered)
4725 {
4726         struct btrfs_space_info *space_info;
4727         struct btrfs_trans_handle *trans;
4728         u64 delalloc_bytes;
4729         u64 async_pages;
4730         u64 items;
4731         long time_left;
4732         unsigned long nr_pages;
4733         int loops;
4734
4735         /* Calc the number of the pages we need flush for space reservation */
4736         items = calc_reclaim_items_nr(fs_info, to_reclaim);
4737         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4738
4739         trans = (struct btrfs_trans_handle *)current->journal_info;
4740         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4741
4742         delalloc_bytes = percpu_counter_sum_positive(
4743                                                 &fs_info->delalloc_bytes);
4744         if (delalloc_bytes == 0) {
4745                 if (trans)
4746                         return;
4747                 if (wait_ordered)
4748                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
4749                 return;
4750         }
4751
4752         loops = 0;
4753         while (delalloc_bytes && loops < 3) {
4754                 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
4755
4756                 /*
4757                  * Triggers inode writeback for up to nr_pages. This will invoke
4758                  * ->writepages callback and trigger delalloc filling
4759                  *  (btrfs_run_delalloc_range()).
4760                  */
4761                 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
4762
4763                 /*
4764                  * We need to wait for the compressed pages to start before
4765                  * we continue.
4766                  */
4767                 async_pages = atomic_read(&fs_info->async_delalloc_pages);
4768                 if (!async_pages)
4769                         goto skip_async;
4770
4771                 /*
4772                  * Calculate how many compressed pages we want to be written
4773                  * before we continue. I.e if there are more async pages than we
4774                  * require wait_event will wait until nr_pages are written.
4775                  */
4776                 if (async_pages <= nr_pages)
4777                         async_pages = 0;
4778                 else
4779                         async_pages -= nr_pages;
4780
4781                 wait_event(fs_info->async_submit_wait,
4782                            atomic_read(&fs_info->async_delalloc_pages) <=
4783                            (int)async_pages);
4784 skip_async:
4785                 spin_lock(&space_info->lock);
4786                 if (list_empty(&space_info->tickets) &&
4787                     list_empty(&space_info->priority_tickets)) {
4788                         spin_unlock(&space_info->lock);
4789                         break;
4790                 }
4791                 spin_unlock(&space_info->lock);
4792
4793                 loops++;
4794                 if (wait_ordered && !trans) {
4795                         btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
4796                 } else {
4797                         time_left = schedule_timeout_killable(1);
4798                         if (time_left)
4799                                 break;
4800                 }
4801                 delalloc_bytes = percpu_counter_sum_positive(
4802                                                 &fs_info->delalloc_bytes);
4803         }
4804 }
4805
4806 struct reserve_ticket {
4807         u64 orig_bytes;
4808         u64 bytes;
4809         int error;
4810         struct list_head list;
4811         wait_queue_head_t wait;
4812 };
4813
4814 /**
4815  * maybe_commit_transaction - possibly commit the transaction if its ok to
4816  * @root - the root we're allocating for
4817  * @bytes - the number of bytes we want to reserve
4818  * @force - force the commit
4819  *
4820  * This will check to make sure that committing the transaction will actually
4821  * get us somewhere and then commit the transaction if it does.  Otherwise it
4822  * will return -ENOSPC.
4823  */
4824 static int may_commit_transaction(struct btrfs_fs_info *fs_info,
4825                                   struct btrfs_space_info *space_info)
4826 {
4827         struct reserve_ticket *ticket = NULL;
4828         struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
4829         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
4830         struct btrfs_trans_handle *trans;
4831         u64 bytes_needed;
4832         u64 reclaim_bytes = 0;
4833
4834         trans = (struct btrfs_trans_handle *)current->journal_info;
4835         if (trans)
4836                 return -EAGAIN;
4837
4838         spin_lock(&space_info->lock);
4839         if (!list_empty(&space_info->priority_tickets))
4840                 ticket = list_first_entry(&space_info->priority_tickets,
4841                                           struct reserve_ticket, list);
4842         else if (!list_empty(&space_info->tickets))
4843                 ticket = list_first_entry(&space_info->tickets,
4844                                           struct reserve_ticket, list);
4845         bytes_needed = (ticket) ? ticket->bytes : 0;
4846         spin_unlock(&space_info->lock);
4847
4848         if (!bytes_needed)
4849                 return 0;
4850
4851         trans = btrfs_join_transaction(fs_info->extent_root);
4852         if (IS_ERR(trans))
4853                 return PTR_ERR(trans);
4854
4855         /*
4856          * See if there is enough pinned space to make this reservation, or if
4857          * we have block groups that are going to be freed, allowing us to
4858          * possibly do a chunk allocation the next loop through.
4859          */
4860         if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
4861             __percpu_counter_compare(&space_info->total_bytes_pinned,
4862                                      bytes_needed,
4863                                      BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
4864                 goto commit;
4865
4866         /*
4867          * See if there is some space in the delayed insertion reservation for
4868          * this reservation.
4869          */
4870         if (space_info != delayed_rsv->space_info)
4871                 goto enospc;
4872
4873         spin_lock(&delayed_rsv->lock);
4874         reclaim_bytes += delayed_rsv->reserved;
4875         spin_unlock(&delayed_rsv->lock);
4876
4877         spin_lock(&delayed_refs_rsv->lock);
4878         reclaim_bytes += delayed_refs_rsv->reserved;
4879         spin_unlock(&delayed_refs_rsv->lock);
4880         if (reclaim_bytes >= bytes_needed)
4881                 goto commit;
4882         bytes_needed -= reclaim_bytes;
4883
4884         if (__percpu_counter_compare(&space_info->total_bytes_pinned,
4885                                    bytes_needed,
4886                                    BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
4887                 goto enospc;
4888
4889 commit:
4890         return btrfs_commit_transaction(trans);
4891 enospc:
4892         btrfs_end_transaction(trans);
4893         return -ENOSPC;
4894 }
4895
4896 /*
4897  * Try to flush some data based on policy set by @state. This is only advisory
4898  * and may fail for various reasons. The caller is supposed to examine the
4899  * state of @space_info to detect the outcome.
4900  */
4901 static void flush_space(struct btrfs_fs_info *fs_info,
4902                        struct btrfs_space_info *space_info, u64 num_bytes,
4903                        int state)
4904 {
4905         struct btrfs_root *root = fs_info->extent_root;
4906         struct btrfs_trans_handle *trans;
4907         int nr;
4908         int ret = 0;
4909
4910         switch (state) {
4911         case FLUSH_DELAYED_ITEMS_NR:
4912         case FLUSH_DELAYED_ITEMS:
4913                 if (state == FLUSH_DELAYED_ITEMS_NR)
4914                         nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
4915                 else
4916                         nr = -1;
4917
4918                 trans = btrfs_join_transaction(root);
4919                 if (IS_ERR(trans)) {
4920                         ret = PTR_ERR(trans);
4921                         break;
4922                 }
4923                 ret = btrfs_run_delayed_items_nr(trans, nr);
4924                 btrfs_end_transaction(trans);
4925                 break;
4926         case FLUSH_DELALLOC:
4927         case FLUSH_DELALLOC_WAIT:
4928                 shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
4929                                 state == FLUSH_DELALLOC_WAIT);
4930                 break;
4931         case FLUSH_DELAYED_REFS_NR:
4932         case FLUSH_DELAYED_REFS:
4933                 trans = btrfs_join_transaction(root);
4934                 if (IS_ERR(trans)) {
4935                         ret = PTR_ERR(trans);
4936                         break;
4937                 }
4938                 if (state == FLUSH_DELAYED_REFS_NR)
4939                         nr = calc_reclaim_items_nr(fs_info, num_bytes);
4940                 else
4941                         nr = 0;
4942                 btrfs_run_delayed_refs(trans, nr);
4943                 btrfs_end_transaction(trans);
4944                 break;
4945         case ALLOC_CHUNK:
4946         case ALLOC_CHUNK_FORCE:
4947                 trans = btrfs_join_transaction(root);
4948                 if (IS_ERR(trans)) {
4949                         ret = PTR_ERR(trans);
4950                         break;
4951                 }
4952                 ret = do_chunk_alloc(trans,
4953                                      btrfs_metadata_alloc_profile(fs_info),
4954                                      (state == ALLOC_CHUNK) ?
4955                                       CHUNK_ALLOC_NO_FORCE : CHUNK_ALLOC_FORCE);
4956                 btrfs_end_transaction(trans);
4957                 if (ret > 0 || ret == -ENOSPC)
4958                         ret = 0;
4959                 break;
4960         case COMMIT_TRANS:
4961                 /*
4962                  * If we have pending delayed iputs then we could free up a
4963                  * bunch of pinned space, so make sure we run the iputs before
4964                  * we do our pinned bytes check below.
4965                  */
4966                 btrfs_run_delayed_iputs(fs_info);
4967                 btrfs_wait_on_delayed_iputs(fs_info);
4968
4969                 ret = may_commit_transaction(fs_info, space_info);
4970                 break;
4971         default:
4972                 ret = -ENOSPC;
4973                 break;
4974         }
4975
4976         trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
4977                                 ret);
4978         return;
4979 }
4980
4981 static inline u64
4982 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
4983                                  struct btrfs_space_info *space_info,
4984                                  bool system_chunk)
4985 {
4986         struct reserve_ticket *ticket;
4987         u64 used;
4988         u64 expected;
4989         u64 to_reclaim = 0;
4990
4991         list_for_each_entry(ticket, &space_info->tickets, list)
4992                 to_reclaim += ticket->bytes;
4993         list_for_each_entry(ticket, &space_info->priority_tickets, list)
4994                 to_reclaim += ticket->bytes;
4995         if (to_reclaim)
4996                 return to_reclaim;
4997
4998         to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4999         if (can_overcommit(fs_info, space_info, to_reclaim,
5000                            BTRFS_RESERVE_FLUSH_ALL, system_chunk))
5001                 return 0;
5002
5003         used = btrfs_space_info_used(space_info, true);
5004
5005         if (can_overcommit(fs_info, space_info, SZ_1M,
5006                            BTRFS_RESERVE_FLUSH_ALL, system_chunk))
5007                 expected = div_factor_fine(space_info->total_bytes, 95);
5008         else
5009                 expected = div_factor_fine(space_info->total_bytes, 90);
5010
5011         if (used > expected)
5012                 to_reclaim = used - expected;
5013         else
5014                 to_reclaim = 0;
5015         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
5016                                      space_info->bytes_reserved);
5017         return to_reclaim;
5018 }
5019
5020 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
5021                                         struct btrfs_space_info *space_info,
5022                                         u64 used, bool system_chunk)
5023 {
5024         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
5025
5026         /* If we're just plain full then async reclaim just slows us down. */
5027         if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
5028                 return 0;
5029
5030         if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
5031                                               system_chunk))
5032                 return 0;
5033
5034         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
5035                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
5036 }
5037
5038 static bool wake_all_tickets(struct list_head *head)
5039 {
5040         struct reserve_ticket *ticket;
5041
5042         while (!list_empty(head)) {
5043                 ticket = list_first_entry(head, struct reserve_ticket, list);
5044                 list_del_init(&ticket->list);
5045                 ticket->error = -ENOSPC;
5046                 wake_up(&ticket->wait);
5047                 if (ticket->bytes != ticket->orig_bytes)
5048                         return true;
5049         }
5050         return false;
5051 }
5052
5053 /*
5054  * This is for normal flushers, we can wait all goddamned day if we want to.  We
5055  * will loop and continuously try to flush as long as we are making progress.
5056  * We count progress as clearing off tickets each time we have to loop.
5057  */
5058 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
5059 {
5060         struct btrfs_fs_info *fs_info;
5061         struct btrfs_space_info *space_info;
5062         u64 to_reclaim;
5063         int flush_state;
5064         int commit_cycles = 0;
5065         u64 last_tickets_id;
5066
5067         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
5068         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5069
5070         spin_lock(&space_info->lock);
5071         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
5072                                                       false);
5073         if (!to_reclaim) {
5074                 space_info->flush = 0;
5075                 spin_unlock(&space_info->lock);
5076                 return;
5077         }
5078         last_tickets_id = space_info->tickets_id;
5079         spin_unlock(&space_info->lock);
5080
5081         flush_state = FLUSH_DELAYED_ITEMS_NR;
5082         do {
5083                 flush_space(fs_info, space_info, to_reclaim, flush_state);
5084                 spin_lock(&space_info->lock);
5085                 if (list_empty(&space_info->tickets)) {
5086                         space_info->flush = 0;
5087                         spin_unlock(&space_info->lock);
5088                         return;
5089                 }
5090                 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
5091                                                               space_info,
5092                                                               false);
5093                 if (last_tickets_id == space_info->tickets_id) {
5094                         flush_state++;
5095                 } else {
5096                         last_tickets_id = space_info->tickets_id;
5097                         flush_state = FLUSH_DELAYED_ITEMS_NR;
5098                         if (commit_cycles)
5099                                 commit_cycles--;
5100                 }
5101
5102                 /*
5103                  * We don't want to force a chunk allocation until we've tried
5104                  * pretty hard to reclaim space.  Think of the case where we
5105                  * freed up a bunch of space and so have a lot of pinned space
5106                  * to reclaim.  We would rather use that than possibly create a
5107                  * underutilized metadata chunk.  So if this is our first run
5108                  * through the flushing state machine skip ALLOC_CHUNK_FORCE and
5109                  * commit the transaction.  If nothing has changed the next go
5110                  * around then we can force a chunk allocation.
5111                  */
5112                 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
5113                         flush_state++;
5114
5115                 if (flush_state > COMMIT_TRANS) {
5116                         commit_cycles++;
5117                         if (commit_cycles > 2) {
5118                                 if (wake_all_tickets(&space_info->tickets)) {
5119                                         flush_state = FLUSH_DELAYED_ITEMS_NR;
5120                                         commit_cycles--;
5121                                 } else {
5122                                         space_info->flush = 0;
5123                                 }
5124                         } else {
5125                                 flush_state = FLUSH_DELAYED_ITEMS_NR;
5126                         }
5127                 }
5128                 spin_unlock(&space_info->lock);
5129         } while (flush_state <= COMMIT_TRANS);
5130 }
5131
5132 void btrfs_init_async_reclaim_work(struct work_struct *work)
5133 {
5134         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
5135 }
5136
5137 static const enum btrfs_flush_state priority_flush_states[] = {
5138         FLUSH_DELAYED_ITEMS_NR,
5139         FLUSH_DELAYED_ITEMS,
5140         ALLOC_CHUNK,
5141 };
5142
5143 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5144                                             struct btrfs_space_info *space_info,
5145                                             struct reserve_ticket *ticket)
5146 {
5147         u64 to_reclaim;
5148         int flush_state;
5149
5150         spin_lock(&space_info->lock);
5151         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
5152                                                       false);
5153         if (!to_reclaim) {
5154                 spin_unlock(&space_info->lock);
5155                 return;
5156         }
5157         spin_unlock(&space_info->lock);
5158
5159         flush_state = 0;
5160         do {
5161                 flush_space(fs_info, space_info, to_reclaim,
5162                             priority_flush_states[flush_state]);
5163                 flush_state++;
5164                 spin_lock(&space_info->lock);
5165                 if (ticket->bytes == 0) {
5166                         spin_unlock(&space_info->lock);
5167                         return;
5168                 }
5169                 spin_unlock(&space_info->lock);
5170         } while (flush_state < ARRAY_SIZE(priority_flush_states));
5171 }
5172
5173 static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5174                                struct btrfs_space_info *space_info,
5175                                struct reserve_ticket *ticket)
5176
5177 {
5178         DEFINE_WAIT(wait);
5179         u64 reclaim_bytes = 0;
5180         int ret = 0;
5181
5182         spin_lock(&space_info->lock);
5183         while (ticket->bytes > 0 && ticket->error == 0) {
5184                 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
5185                 if (ret) {
5186                         ret = -EINTR;
5187                         break;
5188                 }
5189                 spin_unlock(&space_info->lock);
5190
5191                 schedule();
5192
5193                 finish_wait(&ticket->wait, &wait);
5194                 spin_lock(&space_info->lock);
5195         }
5196         if (!ret)
5197                 ret = ticket->error;
5198         if (!list_empty(&ticket->list))
5199                 list_del_init(&ticket->list);
5200         if (ticket->bytes && ticket->bytes < ticket->orig_bytes)
5201                 reclaim_bytes = ticket->orig_bytes - ticket->bytes;
5202         spin_unlock(&space_info->lock);
5203
5204         if (reclaim_bytes)
5205                 space_info_add_old_bytes(fs_info, space_info, reclaim_bytes);
5206         return ret;
5207 }
5208
5209 /**
5210  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5211  * @root - the root we're allocating for
5212  * @space_info - the space info we want to allocate from
5213  * @orig_bytes - the number of bytes we want
5214  * @flush - whether or not we can flush to make our reservation
5215  *
5216  * This will reserve orig_bytes number of bytes from the space info associated
5217  * with the block_rsv.  If there is not enough space it will make an attempt to
5218  * flush out space to make room.  It will do this by flushing delalloc if
5219  * possible or committing the transaction.  If flush is 0 then no attempts to
5220  * regain reservations will be made and this will fail if there is not enough
5221  * space already.
5222  */
5223 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
5224                                     struct btrfs_space_info *space_info,
5225                                     u64 orig_bytes,
5226                                     enum btrfs_reserve_flush_enum flush,
5227                                     bool system_chunk)
5228 {
5229         struct reserve_ticket ticket;
5230         u64 used;
5231         u64 reclaim_bytes = 0;
5232         int ret = 0;
5233
5234         ASSERT(orig_bytes);
5235         ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
5236
5237         spin_lock(&space_info->lock);
5238         ret = -ENOSPC;
5239         used = btrfs_space_info_used(space_info, true);
5240
5241         /*
5242          * If we have enough space then hooray, make our reservation and carry
5243          * on.  If not see if we can overcommit, and if we can, hooray carry on.
5244          * If not things get more complicated.
5245          */
5246         if (used + orig_bytes <= space_info->total_bytes) {
5247                 update_bytes_may_use(space_info, orig_bytes);
5248                 trace_btrfs_space_reservation(fs_info, "space_info",
5249                                               space_info->flags, orig_bytes, 1);
5250                 ret = 0;
5251         } else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
5252                                   system_chunk)) {
5253                 update_bytes_may_use(space_info, orig_bytes);
5254                 trace_btrfs_space_reservation(fs_info, "space_info",
5255                                               space_info->flags, orig_bytes, 1);
5256                 ret = 0;
5257         }
5258
5259         /*
5260          * If we couldn't make a reservation then setup our reservation ticket
5261          * and kick the async worker if it's not already running.
5262          *
5263          * If we are a priority flusher then we just need to add our ticket to
5264          * the list and we will do our own flushing further down.
5265          */
5266         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5267                 ticket.orig_bytes = orig_bytes;
5268                 ticket.bytes = orig_bytes;
5269                 ticket.error = 0;
5270                 init_waitqueue_head(&ticket.wait);
5271                 if (flush == BTRFS_RESERVE_FLUSH_ALL) {
5272                         list_add_tail(&ticket.list, &space_info->tickets);
5273                         if (!space_info->flush) {
5274                                 space_info->flush = 1;
5275                                 trace_btrfs_trigger_flush(fs_info,
5276                                                           space_info->flags,
5277                                                           orig_bytes, flush,
5278                                                           "enospc");
5279                                 queue_work(system_unbound_wq,
5280                                            &fs_info->async_reclaim_work);
5281                         }
5282                 } else {
5283                         list_add_tail(&ticket.list,
5284                                       &space_info->priority_tickets);
5285                 }
5286         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5287                 used += orig_bytes;
5288                 /*
5289                  * We will do the space reservation dance during log replay,
5290                  * which means we won't have fs_info->fs_root set, so don't do
5291                  * the async reclaim as we will panic.
5292                  */
5293                 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
5294                     need_do_async_reclaim(fs_info, space_info,
5295                                           used, system_chunk) &&
5296                     !work_busy(&fs_info->async_reclaim_work)) {
5297                         trace_btrfs_trigger_flush(fs_info, space_info->flags,
5298                                                   orig_bytes, flush, "preempt");
5299                         queue_work(system_unbound_wq,
5300                                    &fs_info->async_reclaim_work);
5301                 }
5302         }
5303         spin_unlock(&space_info->lock);
5304         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5305                 return ret;
5306
5307         if (flush == BTRFS_RESERVE_FLUSH_ALL)
5308                 return wait_reserve_ticket(fs_info, space_info, &ticket);
5309
5310         ret = 0;
5311         priority_reclaim_metadata_space(fs_info, space_info, &ticket);
5312         spin_lock(&space_info->lock);
5313         if (ticket.bytes) {
5314                 if (ticket.bytes < orig_bytes)
5315                         reclaim_bytes = orig_bytes - ticket.bytes;
5316                 list_del_init(&ticket.list);
5317                 ret = -ENOSPC;
5318         }
5319         spin_unlock(&space_info->lock);
5320
5321         if (reclaim_bytes)
5322                 space_info_add_old_bytes(fs_info, space_info, reclaim_bytes);
5323         ASSERT(list_empty(&ticket.list));
5324         return ret;
5325 }
5326
5327 /**
5328  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
5329  * @root - the root we're allocating for
5330  * @block_rsv - the block_rsv we're allocating for
5331  * @orig_bytes - the number of bytes we want
5332  * @flush - whether or not we can flush to make our reservation
5333  *
5334  * This will reserve orig_bytes number of bytes from the space info associated
5335  * with the block_rsv.  If there is not enough space it will make an attempt to
5336  * flush out space to make room.  It will do this by flushing delalloc if
5337  * possible or committing the transaction.  If flush is 0 then no attempts to
5338  * regain reservations will be made and this will fail if there is not enough
5339  * space already.
5340  */
5341 static int reserve_metadata_bytes(struct btrfs_root *root,
5342                                   struct btrfs_block_rsv *block_rsv,
5343                                   u64 orig_bytes,
5344                                   enum btrfs_reserve_flush_enum flush)
5345 {
5346         struct btrfs_fs_info *fs_info = root->fs_info;
5347         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5348         int ret;
5349         bool system_chunk = (root == fs_info->chunk_root);
5350
5351         ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
5352                                        orig_bytes, flush, system_chunk);
5353         if (ret == -ENOSPC &&
5354             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5355                 if (block_rsv != global_rsv &&
5356                     !block_rsv_use_bytes(global_rsv, orig_bytes))
5357                         ret = 0;
5358         }
5359         if (ret == -ENOSPC) {
5360                 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
5361                                               block_rsv->space_info->flags,
5362                                               orig_bytes, 1);
5363
5364                 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
5365                         dump_space_info(fs_info, block_rsv->space_info,
5366                                         orig_bytes, 0);
5367         }
5368         return ret;
5369 }
5370
5371 static struct btrfs_block_rsv *get_block_rsv(
5372                                         const struct btrfs_trans_handle *trans,
5373                                         const struct btrfs_root *root)
5374 {
5375         struct btrfs_fs_info *fs_info = root->fs_info;
5376         struct btrfs_block_rsv *block_rsv = NULL;
5377
5378         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5379             (root == fs_info->csum_root && trans->adding_csums) ||
5380             (root == fs_info->uuid_root))
5381                 block_rsv = trans->block_rsv;
5382
5383         if (!block_rsv)
5384                 block_rsv = root->block_rsv;
5385
5386         if (!block_rsv)
5387                 block_rsv = &fs_info->empty_block_rsv;
5388
5389         return block_rsv;
5390 }
5391
5392 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5393                                u64 num_bytes)
5394 {
5395         int ret = -ENOSPC;
5396         spin_lock(&block_rsv->lock);
5397         if (block_rsv->reserved >= num_bytes) {
5398                 block_rsv->reserved -= num_bytes;
5399                 if (block_rsv->reserved < block_rsv->size)
5400                         block_rsv->full = 0;
5401                 ret = 0;
5402         }
5403         spin_unlock(&block_rsv->lock);
5404         return ret;
5405 }
5406
5407 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5408                                 u64 num_bytes, bool update_size)
5409 {
5410         spin_lock(&block_rsv->lock);
5411         block_rsv->reserved += num_bytes;
5412         if (update_size)
5413                 block_rsv->size += num_bytes;
5414         else if (block_rsv->reserved >= block_rsv->size)
5415                 block_rsv->full = 1;
5416         spin_unlock(&block_rsv->lock);
5417 }
5418
5419 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5420                              struct btrfs_block_rsv *dest, u64 num_bytes,
5421                              int min_factor)
5422 {
5423         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5424         u64 min_bytes;
5425
5426         if (global_rsv->space_info != dest->space_info)
5427                 return -ENOSPC;
5428
5429         spin_lock(&global_rsv->lock);
5430         min_bytes = div_factor(global_rsv->size, min_factor);
5431         if (global_rsv->reserved < min_bytes + num_bytes) {
5432                 spin_unlock(&global_rsv->lock);
5433                 return -ENOSPC;
5434         }
5435         global_rsv->reserved -= num_bytes;
5436         if (global_rsv->reserved < global_rsv->size)
5437                 global_rsv->full = 0;
5438         spin_unlock(&global_rsv->lock);
5439
5440         block_rsv_add_bytes(dest, num_bytes, true);
5441         return 0;
5442 }
5443
5444 /**
5445  * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
5446  * @fs_info - the fs info for our fs.
5447  * @src - the source block rsv to transfer from.
5448  * @num_bytes - the number of bytes to transfer.
5449  *
5450  * This transfers up to the num_bytes amount from the src rsv to the
5451  * delayed_refs_rsv.  Any extra bytes are returned to the space info.
5452  */
5453 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
5454                                        struct btrfs_block_rsv *src,
5455                                        u64 num_bytes)
5456 {
5457         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
5458         u64 to_free = 0;
5459
5460         spin_lock(&src->lock);
5461         src->reserved -= num_bytes;
5462         src->size -= num_bytes;
5463         spin_unlock(&src->lock);
5464
5465         spin_lock(&delayed_refs_rsv->lock);
5466         if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
5467                 u64 delta = delayed_refs_rsv->size -
5468                         delayed_refs_rsv->reserved;
5469                 if (num_bytes > delta) {
5470                         to_free = num_bytes - delta;
5471                         num_bytes = delta;
5472                 }
5473         } else {
5474                 to_free = num_bytes;
5475                 num_bytes = 0;
5476         }
5477
5478         if (num_bytes)
5479                 delayed_refs_rsv->reserved += num_bytes;
5480         if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
5481                 delayed_refs_rsv->full = 1;
5482         spin_unlock(&delayed_refs_rsv->lock);
5483
5484         if (num_bytes)
5485                 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
5486                                               0, num_bytes, 1);
5487         if (to_free)
5488                 space_info_add_old_bytes(fs_info, delayed_refs_rsv->space_info,
5489                                          to_free);
5490 }
5491
5492 /**
5493  * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
5494  * @fs_info - the fs_info for our fs.
5495  * @flush - control how we can flush for this reservation.
5496  *
5497  * This will refill the delayed block_rsv up to 1 items size worth of space and
5498  * will return -ENOSPC if we can't make the reservation.
5499  */
5500 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
5501                                   enum btrfs_reserve_flush_enum flush)
5502 {
5503         struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
5504         u64 limit = btrfs_calc_trans_metadata_size(fs_info, 1);
5505         u64 num_bytes = 0;
5506         int ret = -ENOSPC;
5507
5508         spin_lock(&block_rsv->lock);
5509         if (block_rsv->reserved < block_rsv->size) {
5510                 num_bytes = block_rsv->size - block_rsv->reserved;
5511                 num_bytes = min(num_bytes, limit);
5512         }
5513         spin_unlock(&block_rsv->lock);
5514
5515         if (!num_bytes)
5516                 return 0;
5517
5518         ret = reserve_metadata_bytes(fs_info->extent_root, block_rsv,
5519                                      num_bytes, flush);
5520         if (ret)
5521                 return ret;
5522         block_rsv_add_bytes(block_rsv, num_bytes, 0);
5523         trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
5524                                       0, num_bytes, 1);
5525         return 0;
5526 }
5527
5528 /*
5529  * This is for space we already have accounted in space_info->bytes_may_use, so
5530  * basically when we're returning space from block_rsv's.
5531  */
5532 static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
5533                                      struct btrfs_space_info *space_info,
5534                                      u64 num_bytes)
5535 {
5536         struct reserve_ticket *ticket;
5537         struct list_head *head;
5538         u64 used;
5539         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
5540         bool check_overcommit = false;
5541
5542         spin_lock(&space_info->lock);
5543         head = &space_info->priority_tickets;
5544
5545         /*
5546          * If we are over our limit then we need to check and see if we can
5547          * overcommit, and if we can't then we just need to free up our space
5548          * and not satisfy any requests.
5549          */
5550         used = btrfs_space_info_used(space_info, true);
5551         if (used - num_bytes >= space_info->total_bytes)
5552                 check_overcommit = true;
5553 again:
5554         while (!list_empty(head) && num_bytes) {
5555                 ticket = list_first_entry(head, struct reserve_ticket,
5556                                           list);
5557                 /*
5558                  * We use 0 bytes because this space is already reserved, so
5559                  * adding the ticket space would be a double count.
5560                  */
5561                 if (check_overcommit &&
5562                     !can_overcommit(fs_info, space_info, 0, flush, false))
5563                         break;
5564                 if (num_bytes >= ticket->bytes) {
5565                         list_del_init(&ticket->list);
5566                         num_bytes -= ticket->bytes;
5567                         ticket->bytes = 0;
5568                         space_info->tickets_id++;
5569                         wake_up(&ticket->wait);
5570                 } else {
5571                         ticket->bytes -= num_bytes;
5572                         num_bytes = 0;
5573                 }
5574         }
5575
5576         if (num_bytes && head == &space_info->priority_tickets) {
5577                 head = &space_info->tickets;
5578                 flush = BTRFS_RESERVE_FLUSH_ALL;
5579                 goto again;
5580         }
5581         update_bytes_may_use(space_info, -num_bytes);
5582         trace_btrfs_space_reservation(fs_info, "space_info",
5583                                       space_info->flags, num_bytes, 0);
5584         spin_unlock(&space_info->lock);
5585 }
5586
5587 /*
5588  * This is for newly allocated space that isn't accounted in
5589  * space_info->bytes_may_use yet.  So if we allocate a chunk or unpin an extent
5590  * we use this helper.
5591  */
5592 static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
5593                                      struct btrfs_space_info *space_info,
5594                                      u64 num_bytes)
5595 {
5596         struct reserve_ticket *ticket;
5597         struct list_head *head = &space_info->priority_tickets;
5598
5599 again:
5600         while (!list_empty(head) && num_bytes) {
5601                 ticket = list_first_entry(head, struct reserve_ticket,
5602                                           list);
5603                 if (num_bytes >= ticket->bytes) {
5604                         trace_btrfs_space_reservation(fs_info, "space_info",
5605                                                       space_info->flags,
5606                                                       ticket->bytes, 1);
5607                         list_del_init(&ticket->list);
5608                         num_bytes -= ticket->bytes;
5609                         update_bytes_may_use(space_info, ticket->bytes);
5610                         ticket->bytes = 0;
5611                         space_info->tickets_id++;
5612                         wake_up(&ticket->wait);
5613                 } else {
5614                         trace_btrfs_space_reservation(fs_info, "space_info",
5615                                                       space_info->flags,
5616                                                       num_bytes, 1);
5617                         update_bytes_may_use(space_info, num_bytes);
5618                         ticket->bytes -= num_bytes;
5619                         num_bytes = 0;
5620                 }
5621         }
5622
5623         if (num_bytes && head == &space_info->priority_tickets) {
5624                 head = &space_info->tickets;
5625                 goto again;
5626         }
5627 }
5628
5629 static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5630                                     struct btrfs_block_rsv *block_rsv,
5631                                     struct btrfs_block_rsv *dest, u64 num_bytes,
5632                                     u64 *qgroup_to_release_ret)
5633 {
5634         struct btrfs_space_info *space_info = block_rsv->space_info;
5635         u64 qgroup_to_release = 0;
5636         u64 ret;
5637
5638         spin_lock(&block_rsv->lock);
5639         if (num_bytes == (u64)-1) {
5640                 num_bytes = block_rsv->size;
5641                 qgroup_to_release = block_rsv->qgroup_rsv_size;
5642         }
5643         block_rsv->size -= num_bytes;
5644         if (block_rsv->reserved >= block_rsv->size) {
5645                 num_bytes = block_rsv->reserved - block_rsv->size;
5646                 block_rsv->reserved = block_rsv->size;
5647                 block_rsv->full = 1;
5648         } else {
5649                 num_bytes = 0;
5650         }
5651         if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
5652                 qgroup_to_release = block_rsv->qgroup_rsv_reserved -
5653                                     block_rsv->qgroup_rsv_size;
5654                 block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
5655         } else {
5656                 qgroup_to_release = 0;
5657         }
5658         spin_unlock(&block_rsv->lock);
5659
5660         ret = num_bytes;
5661         if (num_bytes > 0) {
5662                 if (dest) {
5663                         spin_lock(&dest->lock);
5664                         if (!dest->full) {
5665                                 u64 bytes_to_add;
5666
5667                                 bytes_to_add = dest->size - dest->reserved;
5668                                 bytes_to_add = min(num_bytes, bytes_to_add);
5669                                 dest->reserved += bytes_to_add;
5670                                 if (dest->reserved >= dest->size)
5671                                         dest->full = 1;
5672                                 num_bytes -= bytes_to_add;
5673                         }
5674                         spin_unlock(&dest->lock);
5675                 }
5676                 if (num_bytes)
5677                         space_info_add_old_bytes(fs_info, space_info,
5678                                                  num_bytes);
5679         }
5680         if (qgroup_to_release_ret)
5681                 *qgroup_to_release_ret = qgroup_to_release;
5682         return ret;
5683 }
5684
5685 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
5686                             struct btrfs_block_rsv *dst, u64 num_bytes,
5687                             bool update_size)
5688 {
5689         int ret;
5690
5691         ret = block_rsv_use_bytes(src, num_bytes);
5692         if (ret)
5693                 return ret;
5694
5695         block_rsv_add_bytes(dst, num_bytes, update_size);
5696         return 0;
5697 }
5698
5699 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5700 {
5701         memset(rsv, 0, sizeof(*rsv));
5702         spin_lock_init(&rsv->lock);
5703         rsv->type = type;
5704 }
5705
5706 void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
5707                                    struct btrfs_block_rsv *rsv,
5708                                    unsigned short type)
5709 {
5710         btrfs_init_block_rsv(rsv, type);
5711         rsv->space_info = __find_space_info(fs_info,
5712                                             BTRFS_BLOCK_GROUP_METADATA);
5713 }
5714
5715 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
5716                                               unsigned short type)
5717 {
5718         struct btrfs_block_rsv *block_rsv;
5719
5720         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5721         if (!block_rsv)
5722                 return NULL;
5723
5724         btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
5725         return block_rsv;
5726 }
5727
5728 void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
5729                           struct btrfs_block_rsv *rsv)
5730 {
5731         if (!rsv)
5732                 return;
5733         btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
5734         kfree(rsv);
5735 }
5736
5737 int btrfs_block_rsv_add(struct btrfs_root *root,
5738                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5739                         enum btrfs_reserve_flush_enum flush)
5740 {
5741         int ret;
5742
5743         if (num_bytes == 0)
5744                 return 0;
5745
5746         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5747         if (!ret)
5748                 block_rsv_add_bytes(block_rsv, num_bytes, true);
5749
5750         return ret;
5751 }
5752
5753 int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
5754 {
5755         u64 num_bytes = 0;
5756         int ret = -ENOSPC;
5757
5758         if (!block_rsv)
5759                 return 0;
5760
5761         spin_lock(&block_rsv->lock);
5762         num_bytes = div_factor(block_rsv->size, min_factor);
5763         if (block_rsv->reserved >= num_bytes)
5764                 ret = 0;
5765         spin_unlock(&block_rsv->lock);
5766
5767         return ret;
5768 }
5769
5770 int btrfs_block_rsv_refill(struct btrfs_root *root,
5771                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5772                            enum btrfs_reserve_flush_enum flush)
5773 {
5774         u64 num_bytes = 0;
5775         int ret = -ENOSPC;
5776
5777         if (!block_rsv)
5778                 return 0;
5779
5780         spin_lock(&block_rsv->lock);
5781         num_bytes = min_reserved;
5782         if (block_rsv->reserved >= num_bytes)
5783                 ret = 0;
5784         else
5785                 num_bytes -= block_rsv->reserved;
5786         spin_unlock(&block_rsv->lock);
5787
5788         if (!ret)
5789                 return 0;
5790
5791         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5792         if (!ret) {
5793                 block_rsv_add_bytes(block_rsv, num_bytes, false);
5794                 return 0;
5795         }
5796
5797         return ret;
5798 }
5799
5800 static void calc_refill_bytes(struct btrfs_block_rsv *block_rsv,
5801                                 u64 *metadata_bytes, u64 *qgroup_bytes)
5802 {
5803         *metadata_bytes = 0;
5804         *qgroup_bytes = 0;
5805
5806         spin_lock(&block_rsv->lock);
5807         if (block_rsv->reserved < block_rsv->size)
5808                 *metadata_bytes = block_rsv->size - block_rsv->reserved;
5809         if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
5810                 *qgroup_bytes = block_rsv->qgroup_rsv_size -
5811                         block_rsv->qgroup_rsv_reserved;
5812         spin_unlock(&block_rsv->lock);
5813 }
5814
5815 /**
5816  * btrfs_inode_rsv_refill - refill the inode block rsv.
5817  * @inode - the inode we are refilling.
5818  * @flush - the flushing restriction.
5819  *
5820  * Essentially the same as btrfs_block_rsv_refill, except it uses the
5821  * block_rsv->size as the minimum size.  We'll either refill the missing amount
5822  * or return if we already have enough space.  This will also handle the reserve
5823  * tracepoint for the reserved amount.
5824  */
5825 static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
5826                                   enum btrfs_reserve_flush_enum flush)
5827 {
5828         struct btrfs_root *root = inode->root;
5829         struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5830         u64 num_bytes, last = 0;
5831         u64 qgroup_num_bytes;
5832         int ret = -ENOSPC;
5833
5834         calc_refill_bytes(block_rsv, &num_bytes, &qgroup_num_bytes);
5835         if (num_bytes == 0)
5836                 return 0;
5837
5838         do {
5839                 ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes,
5840                                                          true);
5841                 if (ret)
5842                         return ret;
5843                 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5844                 if (ret) {
5845                         btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
5846                         last = num_bytes;
5847                         /*
5848                          * If we are fragmented we can end up with a lot of
5849                          * outstanding extents which will make our size be much
5850                          * larger than our reserved amount.
5851                          *
5852                          * If the reservation happens here, it might be very
5853                          * big though not needed in the end, if the delalloc
5854                          * flushing happens.
5855                          *
5856                          * If this is the case try and do the reserve again.
5857                          */
5858                         if (flush == BTRFS_RESERVE_FLUSH_ALL)
5859                                 calc_refill_bytes(block_rsv, &num_bytes,
5860                                                    &qgroup_num_bytes);
5861                         if (num_bytes == 0)
5862                                 return 0;
5863                 }
5864         } while (ret && last != num_bytes);
5865
5866         if (!ret) {
5867                 block_rsv_add_bytes(block_rsv, num_bytes, false);
5868                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5869                                               btrfs_ino(inode), num_bytes, 1);
5870
5871                 /* Don't forget to increase qgroup_rsv_reserved */
5872                 spin_lock(&block_rsv->lock);
5873                 block_rsv->qgroup_rsv_reserved += qgroup_num_bytes;
5874                 spin_unlock(&block_rsv->lock);
5875         }
5876         return ret;
5877 }
5878
5879 static u64 __btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
5880                                      struct btrfs_block_rsv *block_rsv,
5881                                      u64 num_bytes, u64 *qgroup_to_release)
5882 {
5883         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5884         struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
5885         struct btrfs_block_rsv *target = delayed_rsv;
5886
5887         if (target->full || target == block_rsv)
5888                 target = global_rsv;
5889
5890         if (block_rsv->space_info != target->space_info)
5891                 target = NULL;
5892
5893         return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
5894                                        qgroup_to_release);
5895 }
5896
5897 void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
5898                              struct btrfs_block_rsv *block_rsv,
5899                              u64 num_bytes)
5900 {
5901         __btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
5902 }
5903
5904 /**
5905  * btrfs_inode_rsv_release - release any excessive reservation.
5906  * @inode - the inode we need to release from.
5907  * @qgroup_free - free or convert qgroup meta.
5908  *   Unlike normal operation, qgroup meta reservation needs to know if we are
5909  *   freeing qgroup reservation or just converting it into per-trans.  Normally
5910  *   @qgroup_free is true for error handling, and false for normal release.
5911  *
5912  * This is the same as btrfs_block_rsv_release, except that it handles the
5913  * tracepoint for the reservation.
5914  */
5915 static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
5916 {
5917         struct btrfs_fs_info *fs_info = inode->root->fs_info;
5918         struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
5919         u64 released = 0;
5920         u64 qgroup_to_release = 0;
5921
5922         /*
5923          * Since we statically set the block_rsv->size we just want to say we
5924          * are releasing 0 bytes, and then we'll just get the reservation over
5925          * the size free'd.
5926          */
5927         released = __btrfs_block_rsv_release(fs_info, block_rsv, 0,
5928                                              &qgroup_to_release);
5929         if (released > 0)
5930                 trace_btrfs_space_reservation(fs_info, "delalloc",
5931                                               btrfs_ino(inode), released, 0);
5932         if (qgroup_free)
5933                 btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release);
5934         else
5935                 btrfs_qgroup_convert_reserved_meta(inode->root,
5936                                                    qgroup_to_release);
5937 }
5938
5939 /**
5940  * btrfs_delayed_refs_rsv_release - release a ref head's reservation.
5941  * @fs_info - the fs_info for our fs.
5942  * @nr - the number of items to drop.
5943  *
5944  * This drops the delayed ref head's count from the delayed refs rsv and frees
5945  * any excess reservation we had.
5946  */
5947 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
5948 {
5949         struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
5950         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5951         u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, nr);
5952         u64 released = 0;
5953
5954         released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv,
5955                                            num_bytes, NULL);
5956         if (released)
5957                 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
5958                                               0, released, 0);
5959 }
5960
5961 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5962 {
5963         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5964         struct btrfs_space_info *sinfo = block_rsv->space_info;
5965         u64 num_bytes;
5966
5967         /*
5968          * The global block rsv is based on the size of the extent tree, the
5969          * checksum tree and the root tree.  If the fs is empty we want to set
5970          * it to a minimal amount for safety.
5971          */
5972         num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
5973                 btrfs_root_used(&fs_info->csum_root->root_item) +
5974                 btrfs_root_used(&fs_info->tree_root->root_item);
5975         num_bytes = max_t(u64, num_bytes, SZ_16M);
5976
5977         spin_lock(&sinfo->lock);
5978         spin_lock(&block_rsv->lock);
5979
5980         block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5981
5982         if (block_rsv->reserved < block_rsv->size) {
5983                 num_bytes = btrfs_space_info_used(sinfo, true);
5984                 if (sinfo->total_bytes > num_bytes) {
5985                         num_bytes = sinfo->total_bytes - num_bytes;
5986                         num_bytes = min(num_bytes,
5987                                         block_rsv->size - block_rsv->reserved);
5988                         block_rsv->reserved += num_bytes;
5989                         update_bytes_may_use(sinfo, num_bytes);
5990                         trace_btrfs_space_reservation(fs_info, "space_info",
5991                                                       sinfo->flags, num_bytes,
5992                                                       1);
5993                 }
5994         } else if (block_rsv->reserved > block_rsv->size) {
5995                 num_bytes = block_rsv->reserved - block_rsv->size;
5996                 update_bytes_may_use(sinfo, -num_bytes);
5997                 trace_btrfs_space_reservation(fs_info, "space_info",
5998                                       sinfo->flags, num_bytes, 0);
5999                 block_rsv->reserved = block_rsv->size;
6000         }
6001
6002         if (block_rsv->reserved == block_rsv->size)
6003                 block_rsv->full = 1;
6004         else
6005                 block_rsv->full = 0;
6006
6007         spin_unlock(&block_rsv->lock);
6008         spin_unlock(&sinfo->lock);
6009 }
6010
6011 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
6012 {
6013         struct btrfs_space_info *space_info;
6014
6015         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
6016         fs_info->chunk_block_rsv.space_info = space_info;
6017
6018         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
6019         fs_info->global_block_rsv.space_info = space_info;
6020         fs_info->trans_block_rsv.space_info = space_info;
6021         fs_info->empty_block_rsv.space_info = space_info;
6022         fs_info->delayed_block_rsv.space_info = space_info;
6023         fs_info->delayed_refs_rsv.space_info = space_info;
6024
6025         fs_info->extent_root->block_rsv = &fs_info->delayed_refs_rsv;
6026         fs_info->csum_root->block_rsv = &fs_info->delayed_refs_rsv;
6027         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
6028         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
6029         if (fs_info->quota_root)
6030                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
6031         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
6032
6033         update_global_block_rsv(fs_info);
6034 }
6035
6036 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
6037 {
6038         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
6039                                 (u64)-1, NULL);
6040         WARN_ON(fs_info->trans_block_rsv.size > 0);
6041         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
6042         WARN_ON(fs_info->chunk_block_rsv.size > 0);
6043         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
6044         WARN_ON(fs_info->delayed_block_rsv.size > 0);
6045         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
6046         WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
6047         WARN_ON(fs_info->delayed_refs_rsv.size > 0);
6048 }
6049
6050 /*
6051  * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
6052  * @trans - the trans that may have generated delayed refs
6053  *
6054  * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
6055  * it'll calculate the additional size and add it to the delayed_refs_rsv.
6056  */
6057 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
6058 {
6059         struct btrfs_fs_info *fs_info = trans->fs_info;
6060         struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
6061         u64 num_bytes;
6062
6063         if (!trans->delayed_ref_updates)
6064                 return;
6065
6066         num_bytes = btrfs_calc_trans_metadata_size(fs_info,
6067                                                    trans->delayed_ref_updates);
6068         spin_lock(&delayed_rsv->lock);
6069         delayed_rsv->size += num_bytes;
6070         delayed_rsv->full = 0;
6071         spin_unlock(&delayed_rsv->lock);
6072         trans->delayed_ref_updates = 0;
6073 }
6074
6075 /*
6076  * To be called after all the new block groups attached to the transaction
6077  * handle have been created (btrfs_create_pending_block_groups()).
6078  */
6079 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
6080 {
6081         struct btrfs_fs_info *fs_info = trans->fs_info;
6082
6083         if (!trans->chunk_bytes_reserved)
6084                 return;
6085
6086         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
6087
6088         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
6089                                 trans->chunk_bytes_reserved, NULL);
6090         trans->chunk_bytes_reserved = 0;
6091 }
6092
6093 /*
6094  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
6095  * root: the root of the parent directory
6096  * rsv: block reservation
6097  * items: the number of items that we need do reservation
6098  * use_global_rsv: allow fallback to the global block reservation
6099  *
6100  * This function is used to reserve the space for snapshot/subvolume
6101  * creation and deletion. Those operations are different with the
6102  * common file/directory operations, they change two fs/file trees
6103  * and root tree, the number of items that the qgroup reserves is
6104  * different with the free space reservation. So we can not use
6105  * the space reservation mechanism in start_transaction().
6106  */
6107 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
6108                                      struct btrfs_block_rsv *rsv, int items,
6109                                      bool use_global_rsv)
6110 {
6111         u64 qgroup_num_bytes = 0;
6112         u64 num_bytes;
6113         int ret;
6114         struct btrfs_fs_info *fs_info = root->fs_info;
6115         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6116
6117         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
6118                 /* One for parent inode, two for dir entries */
6119                 qgroup_num_bytes = 3 * fs_info->nodesize;
6120                 ret = btrfs_qgroup_reserve_meta_prealloc(root,
6121                                 qgroup_num_bytes, true);
6122                 if (ret)
6123                         return ret;
6124         }
6125
6126         num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
6127         rsv->space_info = __find_space_info(fs_info,
6128                                             BTRFS_BLOCK_GROUP_METADATA);
6129         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
6130                                   BTRFS_RESERVE_FLUSH_ALL);
6131
6132         if (ret == -ENOSPC && use_global_rsv)
6133                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, true);
6134
6135         if (ret && qgroup_num_bytes)
6136                 btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
6137
6138         return ret;
6139 }
6140
6141 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
6142                                       struct btrfs_block_rsv *rsv)
6143 {
6144         btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
6145 }
6146
6147 static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
6148                                                  struct btrfs_inode *inode)
6149 {
6150         struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
6151         u64 reserve_size = 0;
6152         u64 qgroup_rsv_size = 0;
6153         u64 csum_leaves;
6154         unsigned outstanding_extents;
6155
6156         lockdep_assert_held(&inode->lock);
6157         outstanding_extents = inode->outstanding_extents;
6158         if (outstanding_extents)
6159                 reserve_size = btrfs_calc_trans_metadata_size(fs_info,
6160                                                 outstanding_extents + 1);
6161         csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
6162                                                  inode->csum_bytes);
6163         reserve_size += btrfs_calc_trans_metadata_size(fs_info,
6164                                                        csum_leaves);
6165         /*
6166          * For qgroup rsv, the calculation is very simple:
6167          * account one nodesize for each outstanding extent
6168          *
6169          * This is overestimating in most cases.
6170          */
6171         qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
6172
6173         spin_lock(&block_rsv->lock);
6174         block_rsv->size = reserve_size;
6175         block_rsv->qgroup_rsv_size = qgroup_rsv_size;
6176         spin_unlock(&block_rsv->lock);
6177 }
6178
6179 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
6180 {
6181         struct btrfs_fs_info *fs_info = inode->root->fs_info;
6182         unsigned nr_extents;
6183         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
6184         int ret = 0;
6185         bool delalloc_lock = true;
6186
6187         /* If we are a free space inode we need to not flush since we will be in
6188          * the middle of a transaction commit.  We also don't need the delalloc
6189          * mutex since we won't race with anybody.  We need this mostly to make
6190          * lockdep shut its filthy mouth.
6191          *
6192          * If we have a transaction open (can happen if we call truncate_block
6193          * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
6194          */
6195         if (btrfs_is_free_space_inode(inode)) {
6196                 flush = BTRFS_RESERVE_NO_FLUSH;
6197                 delalloc_lock = false;
6198         } else {
6199                 if (current->journal_info)
6200                         flush = BTRFS_RESERVE_FLUSH_LIMIT;
6201
6202                 if (btrfs_transaction_in_commit(fs_info))
6203                         schedule_timeout(1);
6204         }
6205
6206         if (delalloc_lock)
6207                 mutex_lock(&inode->delalloc_mutex);
6208
6209         num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
6210
6211         /* Add our new extents and calculate the new rsv size. */
6212         spin_lock(&inode->lock);
6213         nr_extents = count_max_extents(num_bytes);
6214         btrfs_mod_outstanding_extents(inode, nr_extents);
6215         inode->csum_bytes += num_bytes;
6216         btrfs_calculate_inode_block_rsv_size(fs_info, inode);
6217         spin_unlock(&inode->lock);
6218
6219         ret = btrfs_inode_rsv_refill(inode, flush);
6220         if (unlikely(ret))
6221                 goto out_fail;
6222
6223         if (delalloc_lock)
6224                 mutex_unlock(&inode->delalloc_mutex);
6225         return 0;
6226
6227 out_fail:
6228         spin_lock(&inode->lock);
6229         nr_extents = count_max_extents(num_bytes);
6230         btrfs_mod_outstanding_extents(inode, -nr_extents);
6231         inode->csum_bytes -= num_bytes;
6232         btrfs_calculate_inode_block_rsv_size(fs_info, inode);
6233         spin_unlock(&inode->lock);
6234
6235         btrfs_inode_rsv_release(inode, true);
6236         if (delalloc_lock)
6237                 mutex_unlock(&inode->delalloc_mutex);
6238         return ret;
6239 }
6240
6241 /**
6242  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
6243  * @inode: the inode to release the reservation for.
6244  * @num_bytes: the number of bytes we are releasing.
6245  * @qgroup_free: free qgroup reservation or convert it to per-trans reservation
6246  *
6247  * This will release the metadata reservation for an inode.  This can be called
6248  * once we complete IO for a given set of bytes to release their metadata
6249  * reservations, or on error for the same reason.
6250  */
6251 void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
6252                                      bool qgroup_free)
6253 {
6254         struct btrfs_fs_info *fs_info = inode->root->fs_info;
6255
6256         num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
6257         spin_lock(&inode->lock);
6258         inode->csum_bytes -= num_bytes;
6259         btrfs_calculate_inode_block_rsv_size(fs_info, inode);
6260         spin_unlock(&inode->lock);
6261
6262         if (btrfs_is_testing(fs_info))
6263                 return;
6264
6265         btrfs_inode_rsv_release(inode, qgroup_free);
6266 }
6267
6268 /**
6269  * btrfs_delalloc_release_extents - release our outstanding_extents
6270  * @inode: the inode to balance the reservation for.
6271  * @num_bytes: the number of bytes we originally reserved with
6272  * @qgroup_free: do we need to free qgroup meta reservation or convert them.
6273  *
6274  * When we reserve space we increase outstanding_extents for the extents we may
6275  * add.  Once we've set the range as delalloc or created our ordered extents we
6276  * have outstanding_extents to track the real usage, so we use this to free our
6277  * temporarily tracked outstanding_extents.  This _must_ be used in conjunction
6278  * with btrfs_delalloc_reserve_metadata.
6279  */
6280 void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
6281                                     bool qgroup_free)
6282 {
6283         struct btrfs_fs_info *fs_info = inode->root->fs_info;
6284         unsigned num_extents;
6285
6286         spin_lock(&inode->lock);
6287         num_extents = count_max_extents(num_bytes);
6288         btrfs_mod_outstanding_extents(inode, -num_extents);
6289         btrfs_calculate_inode_block_rsv_size(fs_info, inode);
6290         spin_unlock(&inode->lock);
6291
6292         if (btrfs_is_testing(fs_info))
6293                 return;
6294
6295         btrfs_inode_rsv_release(inode, qgroup_free);
6296 }
6297
6298 /**
6299  * btrfs_delalloc_reserve_space - reserve data and metadata space for
6300  * delalloc
6301  * @inode: inode we're writing to
6302  * @start: start range we are writing to
6303  * @len: how long the range we are writing to
6304  * @reserved: mandatory parameter, record actually reserved qgroup ranges of
6305  *            current reservation.
6306  *
6307  * This will do the following things
6308  *
6309  * o reserve space in data space info for num bytes
6310  *   and reserve precious corresponding qgroup space
6311  *   (Done in check_data_free_space)
6312  *
6313  * o reserve space for metadata space, based on the number of outstanding
6314  *   extents and how much csums will be needed
6315  *   also reserve metadata space in a per root over-reserve method.
6316  * o add to the inodes->delalloc_bytes
6317  * o add it to the fs_info's delalloc inodes list.
6318  *   (Above 3 all done in delalloc_reserve_metadata)
6319  *
6320  * Return 0 for success
6321  * Return <0 for error(-ENOSPC or -EQUOT)
6322  */
6323 int btrfs_delalloc_reserve_space(struct inode *inode,
6324                         struct extent_changeset **reserved, u64 start, u64 len)
6325 {
6326         int ret;
6327
6328         ret = btrfs_check_data_free_space(inode, reserved, start, len);
6329         if (ret < 0)
6330                 return ret;
6331         ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
6332         if (ret < 0)
6333                 btrfs_free_reserved_data_space(inode, *reserved, start, len);
6334         return ret;
6335 }
6336
6337 /**
6338  * btrfs_delalloc_release_space - release data and metadata space for delalloc
6339  * @inode: inode we're releasing space for
6340  * @start: start position of the space already reserved
6341  * @len: the len of the space already reserved
6342  * @release_bytes: the len of the space we consumed or didn't use
6343  *
6344  * This function will release the metadata space that was not used and will
6345  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
6346  * list if there are no delalloc bytes left.
6347  * Also it will handle the qgroup reserved space.
6348  */
6349 void btrfs_delalloc_release_space(struct inode *inode,
6350                                   struct extent_changeset *reserved,
6351                                   u64 start, u64 len, bool qgroup_free)
6352 {
6353         btrfs_delalloc_release_metadata(BTRFS_I(inode), len, qgroup_free);
6354         btrfs_free_reserved_data_space(inode, reserved, start, len);
6355 }
6356
6357 static int update_block_group(struct btrfs_trans_handle *trans,
6358                               struct btrfs_fs_info *info, u64 bytenr,
6359                               u64 num_bytes, int alloc)
6360 {
6361         struct btrfs_block_group_cache *cache = NULL;
6362         u64 total = num_bytes;
6363         u64 old_val;
6364         u64 byte_in_group;
6365         int factor;
6366         int ret = 0;
6367
6368         /* block accounting for super block */
6369         spin_lock(&info->delalloc_root_lock);
6370         old_val = btrfs_super_bytes_used(info->super_copy);
6371         if (alloc)
6372                 old_val += num_bytes;
6373         else
6374                 old_val -= num_bytes;
6375         btrfs_set_super_bytes_used(info->super_copy, old_val);
6376         spin_unlock(&info->delalloc_root_lock);
6377
6378         while (total) {
6379                 cache = btrfs_lookup_block_group(info, bytenr);
6380                 if (!cache) {
6381                         ret = -ENOENT;
6382                         break;
6383                 }
6384                 factor = btrfs_bg_type_to_factor(cache->flags);
6385
6386                 /*
6387                  * If this block group has free space cache written out, we
6388                  * need to make sure to load it if we are removing space.  This
6389                  * is because we need the unpinning stage to actually add the
6390                  * space back to the block group, otherwise we will leak space.
6391                  */
6392                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
6393                         cache_block_group(cache, 1);
6394
6395                 byte_in_group = bytenr - cache->key.objectid;
6396                 WARN_ON(byte_in_group > cache->key.offset);
6397
6398                 spin_lock(&cache->space_info->lock);
6399                 spin_lock(&cache->lock);
6400
6401                 if (btrfs_test_opt(info, SPACE_CACHE) &&
6402                     cache->disk_cache_state < BTRFS_DC_CLEAR)
6403                         cache->disk_cache_state = BTRFS_DC_CLEAR;
6404
6405                 old_val = btrfs_block_group_used(&cache->item);
6406                 num_bytes = min(total, cache->key.offset - byte_in_group);
6407                 if (alloc) {
6408                         old_val += num_bytes;
6409                         btrfs_set_block_group_used(&cache->item, old_val);
6410                         cache->reserved -= num_bytes;
6411                         cache->space_info->bytes_reserved -= num_bytes;
6412                         cache->space_info->bytes_used += num_bytes;
6413                         cache->space_info->disk_used += num_bytes * factor;
6414                         spin_unlock(&cache->lock);
6415                         spin_unlock(&cache->space_info->lock);
6416                 } else {
6417                         old_val -= num_bytes;
6418                         btrfs_set_block_group_used(&cache->item, old_val);
6419                         cache->pinned += num_bytes;
6420                         update_bytes_pinned(cache->space_info, num_bytes);
6421                         cache->space_info->bytes_used -= num_bytes;
6422                         cache->space_info->disk_used -= num_bytes * factor;
6423                         spin_unlock(&cache->lock);
6424                         spin_unlock(&cache->space_info->lock);
6425
6426                         trace_btrfs_space_reservation(info, "pinned",
6427                                                       cache->space_info->flags,
6428                                                       num_bytes, 1);
6429                         percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
6430                                            num_bytes,
6431                                            BTRFS_TOTAL_BYTES_PINNED_BATCH);
6432                         set_extent_dirty(info->pinned_extents,
6433                                          bytenr, bytenr + num_bytes - 1,
6434                                          GFP_NOFS | __GFP_NOFAIL);
6435                 }
6436
6437                 spin_lock(&trans->transaction->dirty_bgs_lock);
6438                 if (list_empty(&cache->dirty_list)) {
6439                         list_add_tail(&cache->dirty_list,
6440                                       &trans->transaction->dirty_bgs);
6441                         trans->transaction->num_dirty_bgs++;
6442                         trans->delayed_ref_updates++;
6443                         btrfs_get_block_group(cache);
6444                 }
6445                 spin_unlock(&trans->transaction->dirty_bgs_lock);
6446
6447                 /*
6448                  * No longer have used bytes in this block group, queue it for
6449                  * deletion. We do this after adding the block group to the
6450                  * dirty list to avoid races between cleaner kthread and space
6451                  * cache writeout.
6452                  */
6453                 if (!alloc && old_val == 0)
6454                         btrfs_mark_bg_unused(cache);
6455
6456                 btrfs_put_block_group(cache);
6457                 total -= num_bytes;
6458                 bytenr += num_bytes;
6459         }
6460
6461         /* Modified block groups are accounted for in the delayed_refs_rsv. */
6462         btrfs_update_delayed_refs_rsv(trans);
6463         return ret;
6464 }
6465
6466 static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
6467 {
6468         struct btrfs_block_group_cache *cache;
6469         u64 bytenr;
6470
6471         spin_lock(&fs_info->block_group_cache_lock);
6472         bytenr = fs_info->first_logical_byte;
6473         spin_unlock(&fs_info->block_group_cache_lock);
6474
6475         if (bytenr < (u64)-1)
6476                 return bytenr;
6477
6478         cache = btrfs_lookup_first_block_group(fs_info, search_start);
6479         if (!cache)
6480                 return 0;
6481
6482         bytenr = cache->key.objectid;
6483         btrfs_put_block_group(cache);
6484
6485         return bytenr;
6486 }
6487
6488 static int pin_down_extent(struct btrfs_fs_info *fs_info,
6489                            struct btrfs_block_group_cache *cache,
6490                            u64 bytenr, u64 num_bytes, int reserved)
6491 {
6492         spin_lock(&cache->space_info->lock);
6493         spin_lock(&cache->lock);
6494         cache->pinned += num_bytes;
6495         update_bytes_pinned(cache->space_info, num_bytes);
6496         if (reserved) {
6497                 cache->reserved -= num_bytes;
6498                 cache->space_info->bytes_reserved -= num_bytes;
6499         }
6500         spin_unlock(&cache->lock);
6501         spin_unlock(&cache->space_info->lock);
6502
6503         trace_btrfs_space_reservation(fs_info, "pinned",
6504                                       cache->space_info->flags, num_bytes, 1);
6505         percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
6506                     num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
6507         set_extent_dirty(fs_info->pinned_extents, bytenr,
6508                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6509         return 0;
6510 }
6511
6512 /*
6513  * this function must be called within transaction
6514  */
6515 int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
6516                      u64 bytenr, u64 num_bytes, int reserved)
6517 {
6518         struct btrfs_block_group_cache *cache;
6519
6520         cache = btrfs_lookup_block_group(fs_info, bytenr);
6521         BUG_ON(!cache); /* Logic error */
6522
6523         pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
6524
6525         btrfs_put_block_group(cache);
6526         return 0;
6527 }
6528
6529 /*
6530  * this function must be called within transaction
6531  */
6532 int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
6533                                     u64 bytenr, u64 num_bytes)
6534 {
6535         struct btrfs_block_group_cache *cache;
6536         int ret;
6537
6538         cache = btrfs_lookup_block_group(fs_info, bytenr);
6539         if (!cache)
6540                 return -EINVAL;
6541
6542         /*
6543          * pull in the free space cache (if any) so that our pin
6544          * removes the free space from the cache.  We have load_only set
6545          * to one because the slow code to read in the free extents does check
6546          * the pinned extents.
6547          */
6548         cache_block_group(cache, 1);
6549
6550         pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
6551
6552         /* remove us from the free space cache (if we're there at all) */
6553         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6554         btrfs_put_block_group(cache);
6555         return ret;
6556 }
6557
6558 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
6559                                    u64 start, u64 num_bytes)
6560 {
6561         int ret;
6562         struct btrfs_block_group_cache *block_group;
6563         struct btrfs_caching_control *caching_ctl;
6564
6565         block_group = btrfs_lookup_block_group(fs_info, start);
6566         if (!block_group)
6567                 return -EINVAL;
6568
6569         cache_block_group(block_group, 0);
6570         caching_ctl = get_caching_control(block_group);
6571
6572         if (!caching_ctl) {
6573                 /* Logic error */
6574                 BUG_ON(!block_group_cache_done(block_group));
6575                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6576         } else {
6577                 mutex_lock(&caching_ctl->mutex);
6578
6579                 if (start >= caching_ctl->progress) {
6580                         ret = add_excluded_extent(fs_info, start, num_bytes);
6581                 } else if (start + num_bytes <= caching_ctl->progress) {
6582                         ret = btrfs_remove_free_space(block_group,
6583                                                       start, num_bytes);
6584                 } else {
6585                         num_bytes = caching_ctl->progress - start;
6586                         ret = btrfs_remove_free_space(block_group,
6587                                                       start, num_bytes);
6588                         if (ret)
6589                                 goto out_lock;
6590
6591                         num_bytes = (start + num_bytes) -
6592                                 caching_ctl->progress;
6593                         start = caching_ctl->progress;
6594                         ret = add_excluded_extent(fs_info, start, num_bytes);
6595                 }
6596 out_lock:
6597                 mutex_unlock(&caching_ctl->mutex);
6598                 put_caching_control(caching_ctl);
6599         }
6600         btrfs_put_block_group(block_group);
6601         return ret;
6602 }
6603
6604 int btrfs_exclude_logged_extents(struct extent_buffer *eb)
6605 {
6606         struct btrfs_fs_info *fs_info = eb->fs_info;
6607         struct btrfs_file_extent_item *item;
6608         struct btrfs_key key;
6609         int found_type;
6610         int i;
6611         int ret = 0;
6612
6613         if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
6614                 return 0;
6615
6616         for (i = 0; i < btrfs_header_nritems(eb); i++) {
6617                 btrfs_item_key_to_cpu(eb, &key, i);
6618                 if (key.type != BTRFS_EXTENT_DATA_KEY)
6619                         continue;
6620                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6621                 found_type = btrfs_file_extent_type(eb, item);
6622                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6623                         continue;
6624                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6625                         continue;
6626                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6627                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6628                 ret = __exclude_logged_extent(fs_info, key.objectid, key.offset);
6629                 if (ret)
6630                         break;
6631         }
6632
6633         return ret;
6634 }
6635
6636 static void
6637 btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
6638 {
6639         atomic_inc(&bg->reservations);
6640 }
6641
6642 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
6643                                         const u64 start)
6644 {
6645         struct btrfs_block_group_cache *bg;
6646
6647         bg = btrfs_lookup_block_group(fs_info, start);
6648         ASSERT(bg);
6649         if (atomic_dec_and_test(&bg->reservations))
6650                 wake_up_var(&bg->reservations);
6651         btrfs_put_block_group(bg);
6652 }
6653
6654 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
6655 {
6656         struct btrfs_space_info *space_info = bg->space_info;
6657
6658         ASSERT(bg->ro);
6659
6660         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
6661                 return;
6662
6663         /*
6664          * Our block group is read only but before we set it to read only,
6665          * some task might have had allocated an extent from it already, but it
6666          * has not yet created a respective ordered extent (and added it to a
6667          * root's list of ordered extents).
6668          * Therefore wait for any task currently allocating extents, since the
6669          * block group's reservations counter is incremented while a read lock
6670          * on the groups' semaphore is held and decremented after releasing
6671          * the read access on that semaphore and creating the ordered extent.
6672          */
6673         down_write(&space_info->groups_sem);
6674         up_write(&space_info->groups_sem);
6675
6676         wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
6677 }
6678
6679 /**
6680  * btrfs_add_reserved_bytes - update the block_group and space info counters
6681  * @cache:      The cache we are manipulating
6682  * @ram_bytes:  The number of bytes of file content, and will be same to
6683  *              @num_bytes except for the compress path.
6684  * @num_bytes:  The number of bytes in question
6685  * @delalloc:   The blocks are allocated for the delalloc write
6686  *
6687  * This is called by the allocator when it reserves space. If this is a
6688  * reservation and the block group has become read only we cannot make the
6689  * reservation and return -EAGAIN, otherwise this function always succeeds.
6690  */
6691 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
6692                                     u64 ram_bytes, u64 num_bytes, int delalloc)
6693 {
6694         struct btrfs_space_info *space_info = cache->space_info;
6695         int ret = 0;
6696
6697         spin_lock(&space_info->lock);
6698         spin_lock(&cache->lock);
6699         if (cache->ro) {
6700                 ret = -EAGAIN;
6701         } else {
6702                 cache->reserved += num_bytes;
6703                 space_info->bytes_reserved += num_bytes;
6704                 update_bytes_may_use(space_info, -ram_bytes);
6705                 if (delalloc)
6706                         cache->delalloc_bytes += num_bytes;
6707         }
6708         spin_unlock(&cache->lock);
6709         spin_unlock(&space_info->lock);
6710         return ret;
6711 }
6712
6713 /**
6714  * btrfs_free_reserved_bytes - update the block_group and space info counters
6715  * @cache:      The cache we are manipulating
6716  * @num_bytes:  The number of bytes in question
6717  * @delalloc:   The blocks are allocated for the delalloc write
6718  *
6719  * This is called by somebody who is freeing space that was never actually used
6720  * on disk.  For example if you reserve some space for a new leaf in transaction
6721  * A and before transaction A commits you free that leaf, you call this with
6722  * reserve set to 0 in order to clear the reservation.
6723  */
6724
6725 static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
6726                                       u64 num_bytes, int delalloc)
6727 {
6728         struct btrfs_space_info *space_info = cache->space_info;
6729
6730         spin_lock(&space_info->lock);
6731         spin_lock(&cache->lock);
6732         if (cache->ro)
6733                 space_info->bytes_readonly += num_bytes;
6734         cache->reserved -= num_bytes;
6735         space_info->bytes_reserved -= num_bytes;
6736         space_info->max_extent_size = 0;
6737
6738         if (delalloc)
6739                 cache->delalloc_bytes -= num_bytes;
6740         spin_unlock(&cache->lock);
6741         spin_unlock(&space_info->lock);
6742 }
6743 void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
6744 {
6745         struct btrfs_caching_control *next;
6746         struct btrfs_caching_control *caching_ctl;
6747         struct btrfs_block_group_cache *cache;
6748
6749         down_write(&fs_info->commit_root_sem);
6750
6751         list_for_each_entry_safe(caching_ctl, next,
6752                                  &fs_info->caching_block_groups, list) {
6753                 cache = caching_ctl->block_group;
6754                 if (block_group_cache_done(cache)) {
6755                         cache->last_byte_to_unpin = (u64)-1;
6756                         list_del_init(&caching_ctl->list);
6757                         put_caching_control(caching_ctl);
6758                 } else {
6759                         cache->last_byte_to_unpin = caching_ctl->progress;
6760                 }
6761         }
6762
6763         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6764                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6765         else
6766                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6767
6768         up_write(&fs_info->commit_root_sem);
6769
6770         update_global_block_rsv(fs_info);
6771 }
6772
6773 /*
6774  * Returns the free cluster for the given space info and sets empty_cluster to
6775  * what it should be based on the mount options.
6776  */
6777 static struct btrfs_free_cluster *
6778 fetch_cluster_info(struct btrfs_fs_info *fs_info,
6779                    struct btrfs_space_info *space_info, u64 *empty_cluster)
6780 {
6781         struct btrfs_free_cluster *ret = NULL;
6782
6783         *empty_cluster = 0;
6784         if (btrfs_mixed_space_info(space_info))
6785                 return ret;
6786
6787         if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6788                 ret = &fs_info->meta_alloc_cluster;
6789                 if (btrfs_test_opt(fs_info, SSD))
6790                         *empty_cluster = SZ_2M;
6791                 else
6792                         *empty_cluster = SZ_64K;
6793         } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
6794                    btrfs_test_opt(fs_info, SSD_SPREAD)) {
6795                 *empty_cluster = SZ_2M;
6796                 ret = &fs_info->data_alloc_cluster;
6797         }
6798
6799         return ret;
6800 }
6801
6802 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
6803                               u64 start, u64 end,
6804                               const bool return_free_space)
6805 {
6806         struct btrfs_block_group_cache *cache = NULL;
6807         struct btrfs_space_info *space_info;
6808         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6809         struct btrfs_free_cluster *cluster = NULL;
6810         u64 len;
6811         u64 total_unpinned = 0;
6812         u64 empty_cluster = 0;
6813         bool readonly;
6814
6815         while (start <= end) {
6816                 readonly = false;
6817                 if (!cache ||
6818                     start >= cache->key.objectid + cache->key.offset) {
6819                         if (cache)
6820                                 btrfs_put_block_group(cache);
6821                         total_unpinned = 0;
6822                         cache = btrfs_lookup_block_group(fs_info, start);
6823                         BUG_ON(!cache); /* Logic error */
6824
6825                         cluster = fetch_cluster_info(fs_info,
6826                                                      cache->space_info,
6827                                                      &empty_cluster);
6828                         empty_cluster <<= 1;
6829                 }
6830
6831                 len = cache->key.objectid + cache->key.offset - start;
6832                 len = min(len, end + 1 - start);
6833
6834                 if (start < cache->last_byte_to_unpin) {
6835                         len = min(len, cache->last_byte_to_unpin - start);
6836                         if (return_free_space)
6837                                 btrfs_add_free_space(cache, start, len);
6838                 }
6839
6840                 start += len;
6841                 total_unpinned += len;
6842                 space_info = cache->space_info;
6843
6844                 /*
6845                  * If this space cluster has been marked as fragmented and we've
6846                  * unpinned enough in this block group to potentially allow a
6847                  * cluster to be created inside of it go ahead and clear the
6848                  * fragmented check.
6849                  */
6850                 if (cluster && cluster->fragmented &&
6851                     total_unpinned > empty_cluster) {
6852                         spin_lock(&cluster->lock);
6853                         cluster->fragmented = 0;
6854                         spin_unlock(&cluster->lock);
6855                 }
6856
6857                 spin_lock(&space_info->lock);
6858                 spin_lock(&cache->lock);
6859                 cache->pinned -= len;
6860                 update_bytes_pinned(space_info, -len);
6861
6862                 trace_btrfs_space_reservation(fs_info, "pinned",
6863                                               space_info->flags, len, 0);
6864                 space_info->max_extent_size = 0;
6865                 percpu_counter_add_batch(&space_info->total_bytes_pinned,
6866                             -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
6867                 if (cache->ro) {
6868                         space_info->bytes_readonly += len;
6869                         readonly = true;
6870                 }
6871                 spin_unlock(&cache->lock);
6872                 if (!readonly && return_free_space &&
6873                     global_rsv->space_info == space_info) {
6874                         u64 to_add = len;
6875
6876                         spin_lock(&global_rsv->lock);
6877                         if (!global_rsv->full) {
6878                                 to_add = min(len, global_rsv->size -
6879                                              global_rsv->reserved);
6880                                 global_rsv->reserved += to_add;
6881                                 update_bytes_may_use(space_info, to_add);
6882                                 if (global_rsv->reserved >= global_rsv->size)
6883                                         global_rsv->full = 1;
6884                                 trace_btrfs_space_reservation(fs_info,
6885                                                               "space_info",
6886                                                               space_info->flags,
6887                                                               to_add, 1);
6888                                 len -= to_add;
6889                         }
6890                         spin_unlock(&global_rsv->lock);
6891                         /* Add to any tickets we may have */
6892                         if (len)
6893                                 space_info_add_new_bytes(fs_info, space_info,
6894                                                          len);
6895                 }
6896                 spin_unlock(&space_info->lock);
6897         }
6898
6899         if (cache)
6900                 btrfs_put_block_group(cache);
6901         return 0;
6902 }
6903
6904 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
6905 {
6906         struct btrfs_fs_info *fs_info = trans->fs_info;
6907         struct btrfs_block_group_cache *block_group, *tmp;
6908         struct list_head *deleted_bgs;
6909         struct extent_io_tree *unpin;
6910         u64 start;
6911         u64 end;
6912         int ret;
6913
6914         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6915                 unpin = &fs_info->freed_extents[1];
6916         else
6917                 unpin = &fs_info->freed_extents[0];
6918
6919         while (!trans->aborted) {
6920                 struct extent_state *cached_state = NULL;
6921
6922                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6923                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6924                                             EXTENT_DIRTY, &cached_state);
6925                 if (ret) {
6926                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6927                         break;
6928                 }
6929
6930                 if (btrfs_test_opt(fs_info, DISCARD))
6931                         ret = btrfs_discard_extent(fs_info, start,
6932                                                    end + 1 - start, NULL);
6933
6934                 clear_extent_dirty(unpin, start, end, &cached_state);
6935                 unpin_extent_range(fs_info, start, end, true);
6936                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6937                 free_extent_state(cached_state);
6938                 cond_resched();
6939         }
6940
6941         /*
6942          * Transaction is finished.  We don't need the lock anymore.  We
6943          * do need to clean up the block groups in case of a transaction
6944          * abort.
6945          */
6946         deleted_bgs = &trans->transaction->deleted_bgs;
6947         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6948                 u64 trimmed = 0;
6949
6950                 ret = -EROFS;
6951                 if (!trans->aborted)
6952                         ret = btrfs_discard_extent(fs_info,
6953                                                    block_group->key.objectid,
6954                                                    block_group->key.offset,
6955                                                    &trimmed);
6956
6957                 list_del_init(&block_group->bg_list);
6958                 btrfs_put_block_group_trimming(block_group);
6959                 btrfs_put_block_group(block_group);
6960
6961                 if (ret) {
6962                         const char *errstr = btrfs_decode_error(ret);
6963                         btrfs_warn(fs_info,
6964                            "discard failed while removing blockgroup: errno=%d %s",
6965                                    ret, errstr);
6966                 }
6967         }
6968
6969         return 0;
6970 }
6971
6972 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6973                                struct btrfs_delayed_ref_node *node, u64 parent,
6974                                u64 root_objectid, u64 owner_objectid,
6975                                u64 owner_offset, int refs_to_drop,
6976                                struct btrfs_delayed_extent_op *extent_op)
6977 {
6978         struct btrfs_fs_info *info = trans->fs_info;
6979         struct btrfs_key key;
6980         struct btrfs_path *path;
6981         struct btrfs_root *extent_root = info->extent_root;
6982         struct extent_buffer *leaf;
6983         struct btrfs_extent_item *ei;
6984         struct btrfs_extent_inline_ref *iref;
6985         int ret;
6986         int is_data;
6987         int extent_slot = 0;
6988         int found_extent = 0;
6989         int num_to_del = 1;
6990         u32 item_size;
6991         u64 refs;
6992         u64 bytenr = node->bytenr;
6993         u64 num_bytes = node->num_bytes;
6994         int last_ref = 0;
6995         bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
6996
6997         path = btrfs_alloc_path();
6998         if (!path)
6999                 return -ENOMEM;
7000
7001         path->reada = READA_FORWARD;
7002         path->leave_spinning = 1;
7003
7004         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
7005         BUG_ON(!is_data && refs_to_drop != 1);
7006
7007         if (is_data)
7008                 skinny_metadata = false;
7009
7010         ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
7011                                     parent, root_objectid, owner_objectid,
7012                                     owner_offset);
7013         if (ret == 0) {
7014                 extent_slot = path->slots[0];
7015                 while (extent_slot >= 0) {
7016                         btrfs_item_key_to_cpu(path->nodes[0], &key,
7017                                               extent_slot);
7018                         if (key.objectid != bytenr)
7019                                 break;
7020                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
7021                             key.offset == num_bytes) {
7022                                 found_extent = 1;
7023                                 break;
7024                         }
7025                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
7026                             key.offset == owner_objectid) {
7027                                 found_extent = 1;
7028                                 break;
7029                         }
7030                         if (path->slots[0] - extent_slot > 5)
7031                                 break;
7032                         extent_slot--;
7033                 }
7034
7035                 if (!found_extent) {
7036                         BUG_ON(iref);
7037                         ret = remove_extent_backref(trans, path, NULL,
7038                                                     refs_to_drop,
7039                                                     is_data, &last_ref);
7040                         if (ret) {
7041                                 btrfs_abort_transaction(trans, ret);
7042                                 goto out;
7043                         }
7044                         btrfs_release_path(path);
7045                         path->leave_spinning = 1;
7046
7047                         key.objectid = bytenr;
7048                         key.type = BTRFS_EXTENT_ITEM_KEY;
7049                         key.offset = num_bytes;
7050
7051                         if (!is_data && skinny_metadata) {
7052                                 key.type = BTRFS_METADATA_ITEM_KEY;
7053                                 key.offset = owner_objectid;
7054                         }
7055
7056                         ret = btrfs_search_slot(trans, extent_root,
7057                                                 &key, path, -1, 1);
7058                         if (ret > 0 && skinny_metadata && path->slots[0]) {
7059                                 /*
7060                                  * Couldn't find our skinny metadata item,
7061                                  * see if we have ye olde extent item.
7062                                  */
7063                                 path->slots[0]--;
7064                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
7065                                                       path->slots[0]);
7066                                 if (key.objectid == bytenr &&
7067                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
7068                                     key.offset == num_bytes)
7069                                         ret = 0;
7070                         }
7071
7072                         if (ret > 0 && skinny_metadata) {
7073                                 skinny_metadata = false;
7074                                 key.objectid = bytenr;
7075                                 key.type = BTRFS_EXTENT_ITEM_KEY;
7076                                 key.offset = num_bytes;
7077                                 btrfs_release_path(path);
7078                                 ret = btrfs_search_slot(trans, extent_root,
7079                                                         &key, path, -1, 1);
7080                         }
7081
7082                         if (ret) {
7083                                 btrfs_err(info,
7084                                           "umm, got %d back from search, was looking for %llu",
7085                                           ret, bytenr);
7086                                 if (ret > 0)
7087                                         btrfs_print_leaf(path->nodes[0]);
7088                         }
7089                         if (ret < 0) {
7090                                 btrfs_abort_transaction(trans, ret);
7091                                 goto out;
7092                         }
7093                         extent_slot = path->slots[0];
7094                 }
7095         } else if (WARN_ON(ret == -ENOENT)) {
7096                 btrfs_print_leaf(path->nodes[0]);
7097                 btrfs_err(info,
7098                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
7099                         bytenr, parent, root_objectid, owner_objectid,
7100                         owner_offset);
7101                 btrfs_abort_transaction(trans, ret);
7102                 goto out;
7103         } else {
7104                 btrfs_abort_transaction(trans, ret);
7105                 goto out;
7106         }
7107
7108         leaf = path->nodes[0];
7109         item_size = btrfs_item_size_nr(leaf, extent_slot);
7110         if (unlikely(item_size < sizeof(*ei))) {
7111                 ret = -EINVAL;
7112                 btrfs_print_v0_err(info);
7113                 btrfs_abort_transaction(trans, ret);
7114                 goto out;
7115         }
7116         ei = btrfs_item_ptr(leaf, extent_slot,
7117                             struct btrfs_extent_item);
7118         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
7119             key.type == BTRFS_EXTENT_ITEM_KEY) {
7120                 struct btrfs_tree_block_info *bi;
7121                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
7122                 bi = (struct btrfs_tree_block_info *)(ei + 1);
7123                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
7124         }
7125
7126         refs = btrfs_extent_refs(leaf, ei);
7127         if (refs < refs_to_drop) {
7128                 btrfs_err(info,
7129                           "trying to drop %d refs but we only have %Lu for bytenr %Lu",
7130                           refs_to_drop, refs, bytenr);
7131                 ret = -EINVAL;
7132                 btrfs_abort_transaction(trans, ret);
7133                 goto out;
7134         }
7135         refs -= refs_to_drop;
7136
7137         if (refs > 0) {
7138                 if (extent_op)
7139                         __run_delayed_extent_op(extent_op, leaf, ei);
7140                 /*
7141                  * In the case of inline back ref, reference count will
7142                  * be updated by remove_extent_backref
7143                  */
7144                 if (iref) {
7145                         BUG_ON(!found_extent);
7146                 } else {
7147                         btrfs_set_extent_refs(leaf, ei, refs);
7148                         btrfs_mark_buffer_dirty(leaf);
7149                 }
7150                 if (found_extent) {
7151                         ret = remove_extent_backref(trans, path, iref,
7152                                                     refs_to_drop, is_data,
7153                                                     &last_ref);
7154                         if (ret) {
7155                                 btrfs_abort_transaction(trans, ret);
7156                                 goto out;
7157                         }
7158                 }
7159         } else {
7160                 if (found_extent) {
7161                         BUG_ON(is_data && refs_to_drop !=
7162                                extent_data_ref_count(path, iref));
7163                         if (iref) {
7164                                 BUG_ON(path->slots[0] != extent_slot);
7165                         } else {
7166                                 BUG_ON(path->slots[0] != extent_slot + 1);
7167                                 path->slots[0] = extent_slot;
7168                                 num_to_del = 2;
7169                         }
7170                 }
7171
7172                 last_ref = 1;
7173                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
7174                                       num_to_del);
7175                 if (ret) {
7176                         btrfs_abort_transaction(trans, ret);
7177                         goto out;
7178                 }
7179                 btrfs_release_path(path);
7180
7181                 if (is_data) {
7182                         ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
7183                         if (ret) {
7184                                 btrfs_abort_transaction(trans, ret);
7185                                 goto out;
7186                         }
7187                 }
7188
7189                 ret = add_to_free_space_tree(trans, bytenr, num_bytes);
7190                 if (ret) {
7191                         btrfs_abort_transaction(trans, ret);
7192                         goto out;
7193                 }
7194
7195                 ret = update_block_group(trans, info, bytenr, num_bytes, 0);
7196                 if (ret) {
7197                         btrfs_abort_transaction(trans, ret);
7198                         goto out;
7199                 }
7200         }
7201         btrfs_release_path(path);
7202
7203 out:
7204         btrfs_free_path(path);
7205         return ret;
7206 }
7207
7208 /*
7209  * when we free an block, it is possible (and likely) that we free the last
7210  * delayed ref for that extent as well.  This searches the delayed ref tree for
7211  * a given extent, and if there are no other delayed refs to be processed, it
7212  * removes it from the tree.
7213  */
7214 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
7215                                       u64 bytenr)
7216 {
7217         struct btrfs_delayed_ref_head *head;
7218         struct btrfs_delayed_ref_root *delayed_refs;
7219         int ret = 0;
7220
7221         delayed_refs = &trans->transaction->delayed_refs;
7222         spin_lock(&delayed_refs->lock);
7223         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
7224         if (!head)
7225                 goto out_delayed_unlock;
7226
7227         spin_lock(&head->lock);
7228         if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
7229                 goto out;
7230
7231         if (cleanup_extent_op(head) != NULL)
7232                 goto out;
7233
7234         /*
7235          * waiting for the lock here would deadlock.  If someone else has it
7236          * locked they are already in the process of dropping it anyway
7237          */
7238         if (!mutex_trylock(&head->mutex))
7239                 goto out;
7240
7241         btrfs_delete_ref_head(delayed_refs, head);
7242         head->processing = 0;
7243
7244         spin_unlock(&head->lock);
7245         spin_unlock(&delayed_refs->lock);
7246
7247         BUG_ON(head->extent_op);
7248         if (head->must_insert_reserved)
7249                 ret = 1;
7250
7251         btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
7252         mutex_unlock(&head->mutex);
7253         btrfs_put_delayed_ref_head(head);
7254         return ret;
7255 out:
7256         spin_unlock(&head->lock);
7257
7258 out_delayed_unlock:
7259         spin_unlock(&delayed_refs->lock);
7260         return 0;
7261 }
7262
7263 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
7264                            struct btrfs_root *root,
7265                            struct extent_buffer *buf,
7266                            u64 parent, int last_ref)
7267 {
7268         struct btrfs_fs_info *fs_info = root->fs_info;
7269         int pin = 1;
7270         int ret;
7271
7272         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7273                 int old_ref_mod, new_ref_mod;
7274
7275                 btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
7276                                    root->root_key.objectid,
7277                                    btrfs_header_level(buf), 0,
7278                                    BTRFS_DROP_DELAYED_REF);
7279                 ret = btrfs_add_delayed_tree_ref(trans, buf->start,
7280                                                  buf->len, parent,
7281                                                  root->root_key.objectid,
7282                                                  btrfs_header_level(buf),
7283                                                  BTRFS_DROP_DELAYED_REF, NULL,
7284                                                  &old_ref_mod, &new_ref_mod);
7285                 BUG_ON(ret); /* -ENOMEM */
7286                 pin = old_ref_mod >= 0 && new_ref_mod < 0;
7287         }
7288
7289         if (last_ref && btrfs_header_generation(buf) == trans->transid) {
7290                 struct btrfs_block_group_cache *cache;
7291
7292                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
7293                         ret = check_ref_cleanup(trans, buf->start);
7294                         if (!ret)
7295                                 goto out;
7296                 }
7297
7298                 pin = 0;
7299                 cache = btrfs_lookup_block_group(fs_info, buf->start);
7300
7301                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
7302                         pin_down_extent(fs_info, cache, buf->start,
7303                                         buf->len, 1);
7304                         btrfs_put_block_group(cache);
7305                         goto out;
7306                 }
7307
7308                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
7309
7310                 btrfs_add_free_space(cache, buf->start, buf->len);
7311                 btrfs_free_reserved_bytes(cache, buf->len, 0);
7312                 btrfs_put_block_group(cache);
7313                 trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
7314         }
7315 out:
7316         if (pin)
7317                 add_pinned_bytes(fs_info, buf->len, true,
7318                                  root->root_key.objectid);
7319
7320         if (last_ref) {
7321                 /*
7322                  * Deleting the buffer, clear the corrupt flag since it doesn't
7323                  * matter anymore.
7324                  */
7325                 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
7326         }
7327 }
7328
7329 /* Can return -ENOMEM */
7330 int btrfs_free_extent(struct btrfs_trans_handle *trans,
7331                       struct btrfs_root *root,
7332                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
7333                       u64 owner, u64 offset)
7334 {
7335         struct btrfs_fs_info *fs_info = root->fs_info;
7336         int old_ref_mod, new_ref_mod;
7337         int ret;
7338
7339         if (btrfs_is_testing(fs_info))
7340                 return 0;
7341
7342         if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
7343                 btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
7344                                    root_objectid, owner, offset,
7345                                    BTRFS_DROP_DELAYED_REF);
7346
7347         /*
7348          * tree log blocks never actually go into the extent allocation
7349          * tree, just update pinning info and exit early.
7350          */
7351         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
7352                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
7353                 /* unlocks the pinned mutex */
7354                 btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
7355                 old_ref_mod = new_ref_mod = 0;
7356                 ret = 0;
7357         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
7358                 ret = btrfs_add_delayed_tree_ref(trans, bytenr,
7359                                                  num_bytes, parent,
7360                                                  root_objectid, (int)owner,
7361                                                  BTRFS_DROP_DELAYED_REF, NULL,
7362                                                  &old_ref_mod, &new_ref_mod);
7363         } else {
7364                 ret = btrfs_add_delayed_data_ref(trans, bytenr,
7365                                                  num_bytes, parent,
7366                                                  root_objectid, owner, offset,
7367                                                  0, BTRFS_DROP_DELAYED_REF,
7368                                                  &old_ref_mod, &new_ref_mod);
7369         }
7370
7371         if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0) {
7372                 bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
7373
7374                 add_pinned_bytes(fs_info, num_bytes, metadata, root_objectid);
7375         }
7376
7377         return ret;
7378 }
7379
7380 /*
7381  * when we wait for progress in the block group caching, its because
7382  * our allocation attempt failed at least once.  So, we must sleep
7383  * and let some progress happen before we try again.
7384  *
7385  * This function will sleep at least once waiting for new free space to
7386  * show up, and then it will check the block group free space numbers
7387  * for our min num_bytes.  Another option is to have it go ahead
7388  * and look in the rbtree for a free extent of a given size, but this
7389  * is a good start.
7390  *
7391  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
7392  * any of the information in this block group.
7393  */
7394 static noinline void
7395 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
7396                                 u64 num_bytes)
7397 {
7398         struct btrfs_caching_control *caching_ctl;
7399
7400         caching_ctl = get_caching_control(cache);
7401         if (!caching_ctl)
7402                 return;
7403
7404         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
7405                    (cache->free_space_ctl->free_space >= num_bytes));
7406
7407         put_caching_control(caching_ctl);
7408 }
7409
7410 static noinline int
7411 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
7412 {
7413         struct btrfs_caching_control *caching_ctl;
7414         int ret = 0;
7415
7416         caching_ctl = get_caching_control(cache);
7417         if (!caching_ctl)
7418                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
7419
7420         wait_event(caching_ctl->wait, block_group_cache_done(cache));
7421         if (cache->cached == BTRFS_CACHE_ERROR)
7422                 ret = -EIO;
7423         put_caching_control(caching_ctl);
7424         return ret;
7425 }
7426
7427 enum btrfs_loop_type {
7428         LOOP_CACHING_NOWAIT = 0,
7429         LOOP_CACHING_WAIT = 1,
7430         LOOP_ALLOC_CHUNK = 2,
7431         LOOP_NO_EMPTY_SIZE = 3,
7432 };
7433
7434 static inline void
7435 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7436                        int delalloc)
7437 {
7438         if (delalloc)
7439                 down_read(&cache->data_rwsem);
7440 }
7441
7442 static inline void
7443 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7444                        int delalloc)
7445 {
7446         btrfs_get_block_group(cache);
7447         if (delalloc)
7448                 down_read(&cache->data_rwsem);
7449 }
7450
7451 static struct btrfs_block_group_cache *
7452 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7453                    struct btrfs_free_cluster *cluster,
7454                    int delalloc)
7455 {
7456         struct btrfs_block_group_cache *used_bg = NULL;
7457
7458         spin_lock(&cluster->refill_lock);
7459         while (1) {
7460                 used_bg = cluster->block_group;
7461                 if (!used_bg)
7462                         return NULL;
7463
7464                 if (used_bg == block_group)
7465                         return used_bg;
7466
7467                 btrfs_get_block_group(used_bg);
7468
7469                 if (!delalloc)
7470                         return used_bg;
7471
7472                 if (down_read_trylock(&used_bg->data_rwsem))
7473                         return used_bg;
7474
7475                 spin_unlock(&cluster->refill_lock);
7476
7477                 /* We should only have one-level nested. */
7478                 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
7479
7480                 spin_lock(&cluster->refill_lock);
7481                 if (used_bg == cluster->block_group)
7482                         return used_bg;
7483
7484                 up_read(&used_bg->data_rwsem);
7485                 btrfs_put_block_group(used_bg);
7486         }
7487 }
7488
7489 static inline void
7490 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7491                          int delalloc)
7492 {
7493         if (delalloc)
7494                 up_read(&cache->data_rwsem);
7495         btrfs_put_block_group(cache);
7496 }
7497
7498 /*
7499  * Structure used internally for find_free_extent() function.  Wraps needed
7500  * parameters.
7501  */
7502 struct find_free_extent_ctl {
7503         /* Basic allocation info */
7504         u64 ram_bytes;
7505         u64 num_bytes;
7506         u64 empty_size;
7507         u64 flags;
7508         int delalloc;
7509
7510         /* Where to start the search inside the bg */
7511         u64 search_start;
7512
7513         /* For clustered allocation */
7514         u64 empty_cluster;
7515
7516         bool have_caching_bg;
7517         bool orig_have_caching_bg;
7518
7519         /* RAID index, converted from flags */
7520         int index;
7521
7522         /*
7523          * Current loop number, check find_free_extent_update_loop() for details
7524          */
7525         int loop;
7526
7527         /*
7528          * Whether we're refilling a cluster, if true we need to re-search
7529          * current block group but don't try to refill the cluster again.
7530          */
7531         bool retry_clustered;
7532
7533         /*
7534          * Whether we're updating free space cache, if true we need to re-search
7535          * current block group but don't try updating free space cache again.
7536          */
7537         bool retry_unclustered;
7538
7539         /* If current block group is cached */
7540         int cached;
7541
7542         /* Max contiguous hole found */
7543         u64 max_extent_size;
7544
7545         /* Total free space from free space cache, not always contiguous */
7546         u64 total_free_space;
7547
7548         /* Found result */
7549         u64 found_offset;
7550 };
7551
7552
7553 /*
7554  * Helper function for find_free_extent().
7555  *
7556  * Return -ENOENT to inform caller that we need fallback to unclustered mode.
7557  * Return -EAGAIN to inform caller that we need to re-search this block group
7558  * Return >0 to inform caller that we find nothing
7559  * Return 0 means we have found a location and set ffe_ctl->found_offset.
7560  */
7561 static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
7562                 struct btrfs_free_cluster *last_ptr,
7563                 struct find_free_extent_ctl *ffe_ctl,
7564                 struct btrfs_block_group_cache **cluster_bg_ret)
7565 {
7566         struct btrfs_fs_info *fs_info = bg->fs_info;
7567         struct btrfs_block_group_cache *cluster_bg;
7568         u64 aligned_cluster;
7569         u64 offset;
7570         int ret;
7571
7572         cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc);
7573         if (!cluster_bg)
7574                 goto refill_cluster;
7575         if (cluster_bg != bg && (cluster_bg->ro ||
7576             !block_group_bits(cluster_bg, ffe_ctl->flags)))
7577                 goto release_cluster;
7578
7579         offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
7580                         ffe_ctl->num_bytes, cluster_bg->key.objectid,
7581                         &ffe_ctl->max_extent_size);
7582         if (offset) {
7583                 /* We have a block, we're done */
7584                 spin_unlock(&last_ptr->refill_lock);
7585                 trace_btrfs_reserve_extent_cluster(cluster_bg,
7586                                 ffe_ctl->search_start, ffe_ctl->num_bytes);
7587                 *cluster_bg_ret = cluster_bg;
7588                 ffe_ctl->found_offset = offset;
7589                 return 0;
7590         }
7591         WARN_ON(last_ptr->block_group != cluster_bg);
7592
7593 release_cluster:
7594         /*
7595          * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
7596          * lets just skip it and let the allocator find whatever block it can
7597          * find. If we reach this point, we will have tried the cluster
7598          * allocator plenty of times and not have found anything, so we are
7599          * likely way too fragmented for the clustering stuff to find anything.
7600          *
7601          * However, if the cluster is taken from the current block group,
7602          * release the cluster first, so that we stand a better chance of
7603          * succeeding in the unclustered allocation.
7604          */
7605         if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) {
7606                 spin_unlock(&last_ptr->refill_lock);
7607                 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
7608                 return -ENOENT;
7609         }
7610
7611         /* This cluster didn't work out, free it and start over */
7612         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7613
7614         if (cluster_bg != bg)
7615                 btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
7616
7617 refill_cluster:
7618         if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) {
7619                 spin_unlock(&last_ptr->refill_lock);
7620                 return -ENOENT;
7621         }
7622
7623         aligned_cluster = max_t(u64,
7624                         ffe_ctl->empty_cluster + ffe_ctl->empty_size,
7625                         bg->full_stripe_len);
7626         ret = btrfs_find_space_cluster(fs_info, bg, last_ptr,
7627                         ffe_ctl->search_start, ffe_ctl->num_bytes,
7628                         aligned_cluster);
7629         if (ret == 0) {
7630                 /* Now pull our allocation out of this cluster */
7631                 offset = btrfs_alloc_from_cluster(bg, last_ptr,
7632                                 ffe_ctl->num_bytes, ffe_ctl->search_start,
7633                                 &ffe_ctl->max_extent_size);
7634                 if (offset) {
7635                         /* We found one, proceed */
7636                         spin_unlock(&last_ptr->refill_lock);
7637                         trace_btrfs_reserve_extent_cluster(bg,
7638                                         ffe_ctl->search_start,
7639                                         ffe_ctl->num_bytes);
7640                         ffe_ctl->found_offset = offset;
7641                         return 0;
7642                 }
7643         } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
7644                    !ffe_ctl->retry_clustered) {
7645                 spin_unlock(&last_ptr->refill_lock);
7646
7647                 ffe_ctl->retry_clustered = true;
7648                 wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
7649                                 ffe_ctl->empty_cluster + ffe_ctl->empty_size);
7650                 return -EAGAIN;
7651         }
7652         /*
7653          * At this point we either didn't find a cluster or we weren't able to
7654          * allocate a block from our cluster.  Free the cluster we've been
7655          * trying to use, and go to the next block group.
7656          */
7657         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7658         spin_unlock(&last_ptr->refill_lock);
7659         return 1;
7660 }
7661
7662 /*
7663  * Return >0 to inform caller that we find nothing
7664  * Return 0 when we found an free extent and set ffe_ctrl->found_offset
7665  * Return -EAGAIN to inform caller that we need to re-search this block group
7666  */
7667 static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
7668                 struct btrfs_free_cluster *last_ptr,
7669                 struct find_free_extent_ctl *ffe_ctl)
7670 {
7671         u64 offset;
7672
7673         /*
7674          * We are doing an unclustered allocation, set the fragmented flag so
7675          * we don't bother trying to setup a cluster again until we get more
7676          * space.
7677          */
7678         if (unlikely(last_ptr)) {
7679                 spin_lock(&last_ptr->lock);
7680                 last_ptr->fragmented = 1;
7681                 spin_unlock(&last_ptr->lock);
7682         }
7683         if (ffe_ctl->cached) {
7684                 struct btrfs_free_space_ctl *free_space_ctl;
7685
7686                 free_space_ctl = bg->free_space_ctl;
7687                 spin_lock(&free_space_ctl->tree_lock);
7688                 if (free_space_ctl->free_space <
7689                     ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
7690                     ffe_ctl->empty_size) {
7691                         ffe_ctl->total_free_space = max_t(u64,
7692                                         ffe_ctl->total_free_space,
7693                                         free_space_ctl->free_space);
7694                         spin_unlock(&free_space_ctl->tree_lock);
7695                         return 1;
7696                 }
7697                 spin_unlock(&free_space_ctl->tree_lock);
7698         }
7699
7700         offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
7701                         ffe_ctl->num_bytes, ffe_ctl->empty_size,
7702                         &ffe_ctl->max_extent_size);
7703
7704         /*
7705          * If we didn't find a chunk, and we haven't failed on this block group
7706          * before, and this block group is in the middle of caching and we are
7707          * ok with waiting, then go ahead and wait for progress to be made, and
7708          * set @retry_unclustered to true.
7709          *
7710          * If @retry_unclustered is true then we've already waited on this
7711          * block group once and should move on to the next block group.
7712          */
7713         if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
7714             ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
7715                 wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
7716                                                 ffe_ctl->empty_size);
7717                 ffe_ctl->retry_unclustered = true;
7718                 return -EAGAIN;
7719         } else if (!offset) {
7720                 return 1;
7721         }
7722         ffe_ctl->found_offset = offset;
7723         return 0;
7724 }
7725
7726 /*
7727  * Return >0 means caller needs to re-search for free extent
7728  * Return 0 means we have the needed free extent.
7729  * Return <0 means we failed to locate any free extent.
7730  */
7731 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
7732                                         struct btrfs_free_cluster *last_ptr,
7733                                         struct btrfs_key *ins,
7734                                         struct find_free_extent_ctl *ffe_ctl,
7735                                         int full_search, bool use_cluster)
7736 {
7737         struct btrfs_root *root = fs_info->extent_root;
7738         int ret;
7739
7740         if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
7741             ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg)
7742                 ffe_ctl->orig_have_caching_bg = true;
7743
7744         if (!ins->objectid && ffe_ctl->loop >= LOOP_CACHING_WAIT &&
7745             ffe_ctl->have_caching_bg)
7746                 return 1;
7747
7748         if (!ins->objectid && ++(ffe_ctl->index) < BTRFS_NR_RAID_TYPES)
7749                 return 1;
7750
7751         if (ins->objectid) {
7752                 if (!use_cluster && last_ptr) {
7753                         spin_lock(&last_ptr->lock);
7754                         last_ptr->window_start = ins->objectid;
7755                         spin_unlock(&last_ptr->lock);
7756                 }
7757                 return 0;
7758         }
7759
7760         /*
7761          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7762          *                      caching kthreads as we move along
7763          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7764          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7765          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7766          *                     again
7767          */
7768         if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
7769                 ffe_ctl->index = 0;
7770                 if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) {
7771                         /*
7772                          * We want to skip the LOOP_CACHING_WAIT step if we
7773                          * don't have any uncached bgs and we've already done a
7774                          * full search through.
7775                          */
7776                         if (ffe_ctl->orig_have_caching_bg || !full_search)
7777                                 ffe_ctl->loop = LOOP_CACHING_WAIT;
7778                         else
7779                                 ffe_ctl->loop = LOOP_ALLOC_CHUNK;
7780                 } else {
7781                         ffe_ctl->loop++;
7782                 }
7783
7784                 if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
7785                         struct btrfs_trans_handle *trans;
7786                         int exist = 0;
7787
7788                         trans = current->journal_info;
7789                         if (trans)
7790                                 exist = 1;
7791                         else
7792                                 trans = btrfs_join_transaction(root);
7793
7794                         if (IS_ERR(trans)) {
7795                                 ret = PTR_ERR(trans);
7796                                 return ret;
7797                         }
7798
7799                         ret = do_chunk_alloc(trans, ffe_ctl->flags,
7800                                              CHUNK_ALLOC_FORCE);
7801
7802                         /*
7803                          * If we can't allocate a new chunk we've already looped
7804                          * through at least once, move on to the NO_EMPTY_SIZE
7805                          * case.
7806                          */
7807                         if (ret == -ENOSPC)
7808                                 ffe_ctl->loop = LOOP_NO_EMPTY_SIZE;
7809
7810                         /* Do not bail out on ENOSPC since we can do more. */
7811                         if (ret < 0 && ret != -ENOSPC)
7812                                 btrfs_abort_transaction(trans, ret);
7813                         else
7814                                 ret = 0;
7815                         if (!exist)
7816                                 btrfs_end_transaction(trans);
7817                         if (ret)
7818                                 return ret;
7819                 }
7820
7821                 if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) {
7822                         /*
7823                          * Don't loop again if we already have no empty_size and
7824                          * no empty_cluster.
7825                          */
7826                         if (ffe_ctl->empty_size == 0 &&
7827                             ffe_ctl->empty_cluster == 0)
7828                                 return -ENOSPC;
7829                         ffe_ctl->empty_size = 0;
7830                         ffe_ctl->empty_cluster = 0;
7831                 }
7832                 return 1;
7833         }
7834         return -ENOSPC;
7835 }
7836
7837 /*
7838  * walks the btree of allocated extents and find a hole of a given size.
7839  * The key ins is changed to record the hole:
7840  * ins->objectid == start position
7841  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7842  * ins->offset == the size of the hole.
7843  * Any available blocks before search_start are skipped.
7844  *
7845  * If there is no suitable free space, we will record the max size of
7846  * the free space extent currently.
7847  *
7848  * The overall logic and call chain:
7849  *
7850  * find_free_extent()
7851  * |- Iterate through all block groups
7852  * |  |- Get a valid block group
7853  * |  |- Try to do clustered allocation in that block group
7854  * |  |- Try to do unclustered allocation in that block group
7855  * |  |- Check if the result is valid
7856  * |  |  |- If valid, then exit
7857  * |  |- Jump to next block group
7858  * |
7859  * |- Push harder to find free extents
7860  *    |- If not found, re-iterate all block groups
7861  */
7862 static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
7863                                 u64 ram_bytes, u64 num_bytes, u64 empty_size,
7864                                 u64 hint_byte, struct btrfs_key *ins,
7865                                 u64 flags, int delalloc)
7866 {
7867         int ret = 0;
7868         struct btrfs_free_cluster *last_ptr = NULL;
7869         struct btrfs_block_group_cache *block_group = NULL;
7870         struct find_free_extent_ctl ffe_ctl = {0};
7871         struct btrfs_space_info *space_info;
7872         bool use_cluster = true;
7873         bool full_search = false;
7874
7875         WARN_ON(num_bytes < fs_info->sectorsize);
7876
7877         ffe_ctl.ram_bytes = ram_bytes;
7878         ffe_ctl.num_bytes = num_bytes;
7879         ffe_ctl.empty_size = empty_size;
7880         ffe_ctl.flags = flags;
7881         ffe_ctl.search_start = 0;
7882         ffe_ctl.retry_clustered = false;
7883         ffe_ctl.retry_unclustered = false;
7884         ffe_ctl.delalloc = delalloc;
7885         ffe_ctl.index = btrfs_bg_flags_to_raid_index(flags);
7886         ffe_ctl.have_caching_bg = false;
7887         ffe_ctl.orig_have_caching_bg = false;
7888         ffe_ctl.found_offset = 0;
7889
7890         ins->type = BTRFS_EXTENT_ITEM_KEY;
7891         ins->objectid = 0;
7892         ins->offset = 0;
7893
7894         trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
7895
7896         space_info = __find_space_info(fs_info, flags);
7897         if (!space_info) {
7898                 btrfs_err(fs_info, "No space info for %llu", flags);
7899                 return -ENOSPC;
7900         }
7901
7902         /*
7903          * If our free space is heavily fragmented we may not be able to make
7904          * big contiguous allocations, so instead of doing the expensive search
7905          * for free space, simply return ENOSPC with our max_extent_size so we
7906          * can go ahead and search for a more manageable chunk.
7907          *
7908          * If our max_extent_size is large enough for our allocation simply
7909          * disable clustering since we will likely not be able to find enough
7910          * space to create a cluster and induce latency trying.
7911          */
7912         if (unlikely(space_info->max_extent_size)) {
7913                 spin_lock(&space_info->lock);
7914                 if (space_info->max_extent_size &&
7915                     num_bytes > space_info->max_extent_size) {
7916                         ins->offset = space_info->max_extent_size;
7917                         spin_unlock(&space_info->lock);
7918                         return -ENOSPC;
7919                 } else if (space_info->max_extent_size) {
7920                         use_cluster = false;
7921                 }
7922                 spin_unlock(&space_info->lock);
7923         }
7924
7925         last_ptr = fetch_cluster_info(fs_info, space_info,
7926                                       &ffe_ctl.empty_cluster);
7927         if (last_ptr) {
7928                 spin_lock(&last_ptr->lock);
7929                 if (last_ptr->block_group)
7930                         hint_byte = last_ptr->window_start;
7931                 if (last_ptr->fragmented) {
7932                         /*
7933                          * We still set window_start so we can keep track of the
7934                          * last place we found an allocation to try and save
7935                          * some time.
7936                          */
7937                         hint_byte = last_ptr->window_start;
7938                         use_cluster = false;
7939                 }
7940                 spin_unlock(&last_ptr->lock);
7941         }
7942
7943         ffe_ctl.search_start = max(ffe_ctl.search_start,
7944                                    first_logical_byte(fs_info, 0));
7945         ffe_ctl.search_start = max(ffe_ctl.search_start, hint_byte);
7946         if (ffe_ctl.search_start == hint_byte) {
7947                 block_group = btrfs_lookup_block_group(fs_info,
7948                                                        ffe_ctl.search_start);
7949                 /*
7950                  * we don't want to use the block group if it doesn't match our
7951                  * allocation bits, or if its not cached.
7952                  *
7953                  * However if we are re-searching with an ideal block group
7954                  * picked out then we don't care that the block group is cached.
7955                  */
7956                 if (block_group && block_group_bits(block_group, flags) &&
7957                     block_group->cached != BTRFS_CACHE_NO) {
7958                         down_read(&space_info->groups_sem);
7959                         if (list_empty(&block_group->list) ||
7960                             block_group->ro) {
7961                                 /*
7962                                  * someone is removing this block group,
7963                                  * we can't jump into the have_block_group
7964                                  * target because our list pointers are not
7965                                  * valid
7966                                  */
7967                                 btrfs_put_block_group(block_group);
7968                                 up_read(&space_info->groups_sem);
7969                         } else {
7970                                 ffe_ctl.index = btrfs_bg_flags_to_raid_index(
7971                                                 block_group->flags);
7972                                 btrfs_lock_block_group(block_group, delalloc);
7973                                 goto have_block_group;
7974                         }
7975                 } else if (block_group) {
7976                         btrfs_put_block_group(block_group);
7977                 }
7978         }
7979 search:
7980         ffe_ctl.have_caching_bg = false;
7981         if (ffe_ctl.index == btrfs_bg_flags_to_raid_index(flags) ||
7982             ffe_ctl.index == 0)
7983                 full_search = true;
7984         down_read(&space_info->groups_sem);
7985         list_for_each_entry(block_group,
7986                             &space_info->block_groups[ffe_ctl.index], list) {
7987                 /* If the block group is read-only, we can skip it entirely. */
7988                 if (unlikely(block_group->ro))
7989                         continue;
7990
7991                 btrfs_grab_block_group(block_group, delalloc);
7992                 ffe_ctl.search_start = block_group->key.objectid;
7993
7994                 /*
7995                  * this can happen if we end up cycling through all the
7996                  * raid types, but we want to make sure we only allocate
7997                  * for the proper type.
7998                  */
7999                 if (!block_group_bits(block_group, flags)) {
8000                         u64 extra = BTRFS_BLOCK_GROUP_DUP |
8001                                 BTRFS_BLOCK_GROUP_RAID1 |
8002                                 BTRFS_BLOCK_GROUP_RAID5 |
8003                                 BTRFS_BLOCK_GROUP_RAID6 |
8004                                 BTRFS_BLOCK_GROUP_RAID10;
8005
8006                         /*
8007                          * if they asked for extra copies and this block group
8008                          * doesn't provide them, bail.  This does allow us to
8009                          * fill raid0 from raid1.
8010                          */
8011                         if ((flags & extra) && !(block_group->flags & extra))
8012                                 goto loop;
8013                 }
8014
8015 have_block_group:
8016                 ffe_ctl.cached = block_group_cache_done(block_group);
8017                 if (unlikely(!ffe_ctl.cached)) {
8018                         ffe_ctl.have_caching_bg = true;
8019                         ret = cache_block_group(block_group, 0);
8020                         BUG_ON(ret < 0);
8021                         ret = 0;
8022                 }
8023
8024                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
8025                         goto loop;
8026
8027                 /*
8028                  * Ok we want to try and use the cluster allocator, so
8029                  * lets look there
8030                  */
8031                 if (last_ptr && use_cluster) {
8032                         struct btrfs_block_group_cache *cluster_bg = NULL;
8033
8034                         ret = find_free_extent_clustered(block_group, last_ptr,
8035                                                          &ffe_ctl, &cluster_bg);
8036
8037                         if (ret == 0) {
8038                                 if (cluster_bg && cluster_bg != block_group) {
8039                                         btrfs_release_block_group(block_group,
8040                                                                   delalloc);
8041                                         block_group = cluster_bg;
8042                                 }
8043                                 goto checks;
8044                         } else if (ret == -EAGAIN) {
8045                                 goto have_block_group;
8046                         } else if (ret > 0) {
8047                                 goto loop;
8048                         }
8049                         /* ret == -ENOENT case falls through */
8050                 }
8051
8052                 ret = find_free_extent_unclustered(block_group, last_ptr,
8053                                                    &ffe_ctl);
8054                 if (ret == -EAGAIN)
8055                         goto have_block_group;
8056                 else if (ret > 0)
8057                         goto loop;
8058                 /* ret == 0 case falls through */
8059 checks:
8060                 ffe_ctl.search_start = round_up(ffe_ctl.found_offset,
8061                                              fs_info->stripesize);
8062
8063                 /* move on to the next group */
8064                 if (ffe_ctl.search_start + num_bytes >
8065                     block_group->key.objectid + block_group->key.offset) {
8066                         btrfs_add_free_space(block_group, ffe_ctl.found_offset,
8067                                              num_bytes);
8068                         goto loop;
8069                 }
8070
8071                 if (ffe_ctl.found_offset < ffe_ctl.search_start)
8072                         btrfs_add_free_space(block_group, ffe_ctl.found_offset,
8073                                 ffe_ctl.search_start - ffe_ctl.found_offset);
8074
8075                 ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
8076                                 num_bytes, delalloc);
8077                 if (ret == -EAGAIN) {
8078                         btrfs_add_free_space(block_group, ffe_ctl.found_offset,
8079                                              num_bytes);
8080                         goto loop;
8081                 }
8082                 btrfs_inc_block_group_reservations(block_group);
8083
8084                 /* we are all good, lets return */
8085                 ins->objectid = ffe_ctl.search_start;
8086                 ins->offset = num_bytes;
8087
8088                 trace_btrfs_reserve_extent(block_group, ffe_ctl.search_start,
8089                                            num_bytes);
8090                 btrfs_release_block_group(block_group, delalloc);
8091                 break;
8092 loop:
8093                 ffe_ctl.retry_clustered = false;
8094                 ffe_ctl.retry_unclustered = false;
8095                 BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
8096                        ffe_ctl.index);
8097                 btrfs_release_block_group(block_group, delalloc);
8098                 cond_resched();
8099         }
8100         up_read(&space_info->groups_sem);
8101
8102         ret = find_free_extent_update_loop(fs_info, last_ptr, ins, &ffe_ctl,
8103                                            full_search, use_cluster);
8104         if (ret > 0)
8105                 goto search;
8106
8107         if (ret == -ENOSPC) {
8108                 /*
8109                  * Use ffe_ctl->total_free_space as fallback if we can't find
8110                  * any contiguous hole.
8111                  */
8112                 if (!ffe_ctl.max_extent_size)
8113                         ffe_ctl.max_extent_size = ffe_ctl.total_free_space;
8114                 spin_lock(&space_info->lock);
8115                 space_info->max_extent_size = ffe_ctl.max_extent_size;
8116                 spin_unlock(&space_info->lock);
8117                 ins->offset = ffe_ctl.max_extent_size;
8118         }
8119         return ret;
8120 }
8121
8122 #define DUMP_BLOCK_RSV(fs_info, rsv_name)                               \
8123 do {                                                                    \
8124         struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;           \
8125         spin_lock(&__rsv->lock);                                        \
8126         btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",      \
8127                    __rsv->size, __rsv->reserved);                       \
8128         spin_unlock(&__rsv->lock);                                      \
8129 } while (0)
8130
8131 static void dump_space_info(struct btrfs_fs_info *fs_info,
8132                             struct btrfs_space_info *info, u64 bytes,
8133                             int dump_block_groups)
8134 {
8135         struct btrfs_block_group_cache *cache;
8136         int index = 0;
8137
8138         spin_lock(&info->lock);
8139         btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
8140                    info->flags,
8141                    info->total_bytes - btrfs_space_info_used(info, true),
8142                    info->full ? "" : "not ");
8143         btrfs_info(fs_info,
8144                 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
8145                 info->total_bytes, info->bytes_used, info->bytes_pinned,
8146                 info->bytes_reserved, info->bytes_may_use,
8147                 info->bytes_readonly);
8148         spin_unlock(&info->lock);
8149
8150         DUMP_BLOCK_RSV(fs_info, global_block_rsv);
8151         DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
8152         DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
8153         DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
8154         DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
8155
8156         if (!dump_block_groups)
8157                 return;
8158
8159         down_read(&info->groups_sem);
8160 again:
8161         list_for_each_entry(cache, &info->block_groups[index], list) {
8162                 spin_lock(&cache->lock);
8163                 btrfs_info(fs_info,
8164                         "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
8165                         cache->key.objectid, cache->key.offset,
8166                         btrfs_block_group_used(&cache->item), cache->pinned,
8167                         cache->reserved, cache->ro ? "[readonly]" : "");
8168                 btrfs_dump_free_space(cache, bytes);
8169                 spin_unlock(&cache->lock);
8170         }
8171         if (++index < BTRFS_NR_RAID_TYPES)
8172                 goto again;
8173         up_read(&info->groups_sem);
8174 }
8175
8176 /*
8177  * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
8178  *                        hole that is at least as big as @num_bytes.
8179  *
8180  * @root           -    The root that will contain this extent
8181  *
8182  * @ram_bytes      -    The amount of space in ram that @num_bytes take. This
8183  *                      is used for accounting purposes. This value differs
8184  *                      from @num_bytes only in the case of compressed extents.
8185  *
8186  * @num_bytes      -    Number of bytes to allocate on-disk.
8187  *
8188  * @min_alloc_size -    Indicates the minimum amount of space that the
8189  *                      allocator should try to satisfy. In some cases
8190  *                      @num_bytes may be larger than what is required and if
8191  *                      the filesystem is fragmented then allocation fails.
8192  *                      However, the presence of @min_alloc_size gives a
8193  *                      chance to try and satisfy the smaller allocation.
8194  *
8195  * @empty_size     -    A hint that you plan on doing more COW. This is the
8196  *                      size in bytes the allocator should try to find free
8197  *                      next to the block it returns.  This is just a hint and
8198  *                      may be ignored by the allocator.
8199  *
8200  * @hint_byte      -    Hint to the allocator to start searching above the byte
8201  *                      address passed. It might be ignored.
8202  *
8203  * @ins            -    This key is modified to record the found hole. It will
8204  *                      have the following values:
8205  *                      ins->objectid == start position
8206  *                      ins->flags = BTRFS_EXTENT_ITEM_KEY
8207  *                      ins->offset == the size of the hole.
8208  *
8209  * @is_data        -    Boolean flag indicating whether an extent is
8210  *                      allocated for data (true) or metadata (false)
8211  *
8212  * @delalloc       -    Boolean flag indicating whether this allocation is for
8213  *                      delalloc or not. If 'true' data_rwsem of block groups
8214  *                      is going to be acquired.
8215  *
8216  *
8217  * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
8218  * case -ENOSPC is returned then @ins->offset will contain the size of the
8219  * largest available hole the allocator managed to find.
8220  */
8221 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
8222                          u64 num_bytes, u64 min_alloc_size,
8223                          u64 empty_size, u64 hint_byte,
8224                          struct btrfs_key *ins, int is_data, int delalloc)
8225 {
8226         struct btrfs_fs_info *fs_info = root->fs_info;
8227         bool final_tried = num_bytes == min_alloc_size;
8228         u64 flags;
8229         int ret;
8230
8231         flags = get_alloc_profile_by_root(root, is_data);
8232 again:
8233         WARN_ON(num_bytes < fs_info->sectorsize);
8234         ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
8235                                hint_byte, ins, flags, delalloc);
8236         if (!ret && !is_data) {
8237                 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
8238         } else if (ret == -ENOSPC) {
8239                 if (!final_tried && ins->offset) {
8240                         num_bytes = min(num_bytes >> 1, ins->offset);
8241                         num_bytes = round_down(num_bytes,
8242                                                fs_info->sectorsize);
8243                         num_bytes = max(num_bytes, min_alloc_size);
8244                         ram_bytes = num_bytes;
8245                         if (num_bytes == min_alloc_size)
8246                                 final_tried = true;
8247                         goto again;
8248                 } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
8249                         struct btrfs_space_info *sinfo;
8250
8251                         sinfo = __find_space_info(fs_info, flags);
8252                         btrfs_err(fs_info,
8253                                   "allocation failed flags %llu, wanted %llu",
8254                                   flags, num_bytes);
8255                         if (sinfo)
8256                                 dump_space_info(fs_info, sinfo, num_bytes, 1);
8257                 }
8258         }
8259
8260         return ret;
8261 }
8262
8263 static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
8264                                         u64 start, u64 len,
8265                                         int pin, int delalloc)
8266 {
8267         struct btrfs_block_group_cache *cache;
8268         int ret = 0;
8269
8270         cache = btrfs_lookup_block_group(fs_info, start);
8271         if (!cache) {
8272                 btrfs_err(fs_info, "Unable to find block group for %llu",
8273                           start);
8274                 return -ENOSPC;
8275         }
8276
8277         if (pin)
8278                 pin_down_extent(fs_info, cache, start, len, 1);
8279         else {
8280                 if (btrfs_test_opt(fs_info, DISCARD))
8281                         ret = btrfs_discard_extent(fs_info, start, len, NULL);
8282                 btrfs_add_free_space(cache, start, len);
8283                 btrfs_free_reserved_bytes(cache, len, delalloc);
8284                 trace_btrfs_reserved_extent_free(fs_info, start, len);
8285         }
8286
8287         btrfs_put_block_group(cache);
8288         return ret;
8289 }
8290
8291 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
8292                                u64 start, u64 len, int delalloc)
8293 {
8294         return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
8295 }
8296
8297 int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
8298                                        u64 start, u64 len)
8299 {
8300         return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
8301 }
8302
8303 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8304                                       u64 parent, u64 root_objectid,
8305                                       u64 flags, u64 owner, u64 offset,
8306                                       struct btrfs_key *ins, int ref_mod)
8307 {
8308         struct btrfs_fs_info *fs_info = trans->fs_info;
8309         int ret;
8310         struct btrfs_extent_item *extent_item;
8311         struct btrfs_extent_inline_ref *iref;
8312         struct btrfs_path *path;
8313         struct extent_buffer *leaf;
8314         int type;
8315         u32 size;
8316
8317         if (parent > 0)
8318                 type = BTRFS_SHARED_DATA_REF_KEY;
8319         else
8320                 type = BTRFS_EXTENT_DATA_REF_KEY;
8321
8322         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
8323
8324         path = btrfs_alloc_path();
8325         if (!path)
8326                 return -ENOMEM;
8327
8328         path->leave_spinning = 1;
8329         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8330                                       ins, size);
8331         if (ret) {
8332                 btrfs_free_path(path);
8333                 return ret;
8334         }
8335
8336         leaf = path->nodes[0];
8337         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8338                                      struct btrfs_extent_item);
8339         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
8340         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8341         btrfs_set_extent_flags(leaf, extent_item,
8342                                flags | BTRFS_EXTENT_FLAG_DATA);
8343
8344         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8345         btrfs_set_extent_inline_ref_type(leaf, iref, type);
8346         if (parent > 0) {
8347                 struct btrfs_shared_data_ref *ref;
8348                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
8349                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
8350                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
8351         } else {
8352                 struct btrfs_extent_data_ref *ref;
8353                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
8354                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
8355                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
8356                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
8357                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
8358         }
8359
8360         btrfs_mark_buffer_dirty(path->nodes[0]);
8361         btrfs_free_path(path);
8362
8363         ret = remove_from_free_space_tree(trans, ins->objectid, ins->offset);
8364         if (ret)
8365                 return ret;
8366
8367         ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
8368         if (ret) { /* -ENOENT, logic error */
8369                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8370                         ins->objectid, ins->offset);
8371                 BUG();
8372         }
8373         trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
8374         return ret;
8375 }
8376
8377 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
8378                                      struct btrfs_delayed_ref_node *node,
8379                                      struct btrfs_delayed_extent_op *extent_op)
8380 {
8381         struct btrfs_fs_info *fs_info = trans->fs_info;
8382         int ret;
8383         struct btrfs_extent_item *extent_item;
8384         struct btrfs_key extent_key;
8385         struct btrfs_tree_block_info *block_info;
8386         struct btrfs_extent_inline_ref *iref;
8387         struct btrfs_path *path;
8388         struct extent_buffer *leaf;
8389         struct btrfs_delayed_tree_ref *ref;
8390         u32 size = sizeof(*extent_item) + sizeof(*iref);
8391         u64 num_bytes;
8392         u64 flags = extent_op->flags_to_set;
8393         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
8394
8395         ref = btrfs_delayed_node_to_tree_ref(node);
8396
8397         extent_key.objectid = node->bytenr;
8398         if (skinny_metadata) {
8399                 extent_key.offset = ref->level;
8400                 extent_key.type = BTRFS_METADATA_ITEM_KEY;
8401                 num_bytes = fs_info->nodesize;
8402         } else {
8403                 extent_key.offset = node->num_bytes;
8404                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
8405                 size += sizeof(*block_info);
8406                 num_bytes = node->num_bytes;
8407         }
8408
8409         path = btrfs_alloc_path();
8410         if (!path)
8411                 return -ENOMEM;
8412
8413         path->leave_spinning = 1;
8414         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
8415                                       &extent_key, size);
8416         if (ret) {
8417                 btrfs_free_path(path);
8418                 return ret;
8419         }
8420
8421         leaf = path->nodes[0];
8422         extent_item = btrfs_item_ptr(leaf, path->slots[0],
8423                                      struct btrfs_extent_item);
8424         btrfs_set_extent_refs(leaf, extent_item, 1);
8425         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
8426         btrfs_set_extent_flags(leaf, extent_item,
8427                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
8428
8429         if (skinny_metadata) {
8430                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
8431         } else {
8432                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
8433                 btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
8434                 btrfs_set_tree_block_level(leaf, block_info, ref->level);
8435                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
8436         }
8437
8438         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
8439                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
8440                 btrfs_set_extent_inline_ref_type(leaf, iref,
8441                                                  BTRFS_SHARED_BLOCK_REF_KEY);
8442                 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
8443         } else {
8444                 btrfs_set_extent_inline_ref_type(leaf, iref,
8445                                                  BTRFS_TREE_BLOCK_REF_KEY);
8446                 btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
8447         }
8448
8449         btrfs_mark_buffer_dirty(leaf);
8450         btrfs_free_path(path);
8451
8452         ret = remove_from_free_space_tree(trans, extent_key.objectid,
8453                                           num_bytes);
8454         if (ret)
8455                 return ret;
8456
8457         ret = update_block_group(trans, fs_info, extent_key.objectid,
8458                                  fs_info->nodesize, 1);
8459         if (ret) { /* -ENOENT, logic error */
8460                 btrfs_err(fs_info, "update block group failed for %llu %llu",
8461                         extent_key.objectid, extent_key.offset);
8462                 BUG();
8463         }
8464
8465         trace_btrfs_reserved_extent_alloc(fs_info, extent_key.objectid,
8466                                           fs_info->nodesize);
8467         return ret;
8468 }
8469
8470 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
8471                                      struct btrfs_root *root, u64 owner,
8472                                      u64 offset, u64 ram_bytes,
8473                                      struct btrfs_key *ins)
8474 {
8475         int ret;
8476
8477         BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
8478
8479         btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
8480                            root->root_key.objectid, owner, offset,
8481                            BTRFS_ADD_DELAYED_EXTENT);
8482
8483         ret = btrfs_add_delayed_data_ref(trans, ins->objectid,
8484                                          ins->offset, 0,
8485                                          root->root_key.objectid, owner,
8486                                          offset, ram_bytes,
8487                                          BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
8488         return ret;
8489 }
8490
8491 /*
8492  * this is used by the tree logging recovery code.  It records that
8493  * an extent has been allocated and makes sure to clear the free
8494  * space cache bits as well
8495  */
8496 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
8497                                    u64 root_objectid, u64 owner, u64 offset,
8498                                    struct btrfs_key *ins)
8499 {
8500         struct btrfs_fs_info *fs_info = trans->fs_info;
8501         int ret;
8502         struct btrfs_block_group_cache *block_group;
8503         struct btrfs_space_info *space_info;
8504
8505         /*
8506          * Mixed block groups will exclude before processing the log so we only
8507          * need to do the exclude dance if this fs isn't mixed.
8508          */
8509         if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
8510                 ret = __exclude_logged_extent(fs_info, ins->objectid,
8511                                               ins->offset);
8512                 if (ret)
8513                         return ret;
8514         }
8515
8516         block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
8517         if (!block_group)
8518                 return -EINVAL;
8519
8520         space_info = block_group->space_info;
8521         spin_lock(&space_info->lock);
8522         spin_lock(&block_group->lock);
8523         space_info->bytes_reserved += ins->offset;
8524         block_group->reserved += ins->offset;
8525         spin_unlock(&block_group->lock);
8526         spin_unlock(&space_info->lock);
8527
8528         ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
8529                                          offset, ins, 1);
8530         btrfs_put_block_group(block_group);
8531         return ret;
8532 }
8533
8534 static struct extent_buffer *
8535 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8536                       u64 bytenr, int level, u64 owner)
8537 {
8538         struct btrfs_fs_info *fs_info = root->fs_info;
8539         struct extent_buffer *buf;
8540
8541         buf = btrfs_find_create_tree_block(fs_info, bytenr);
8542         if (IS_ERR(buf))
8543                 return buf;
8544
8545         /*
8546          * Extra safety check in case the extent tree is corrupted and extent
8547          * allocator chooses to use a tree block which is already used and
8548          * locked.
8549          */
8550         if (buf->lock_owner == current->pid) {
8551                 btrfs_err_rl(fs_info,
8552 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
8553                         buf->start, btrfs_header_owner(buf), current->pid);
8554                 free_extent_buffer(buf);
8555                 return ERR_PTR(-EUCLEAN);
8556         }
8557
8558         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8559         btrfs_tree_lock(buf);
8560         btrfs_clean_tree_block(buf);
8561         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
8562
8563         btrfs_set_lock_blocking_write(buf);
8564         set_extent_buffer_uptodate(buf);
8565
8566         memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
8567         btrfs_set_header_level(buf, level);
8568         btrfs_set_header_bytenr(buf, buf->start);
8569         btrfs_set_header_generation(buf, trans->transid);
8570         btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
8571         btrfs_set_header_owner(buf, owner);
8572         write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
8573         write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
8574         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8575                 buf->log_index = root->log_transid % 2;
8576                 /*
8577                  * we allow two log transactions at a time, use different
8578                  * EXTENT bit to differentiate dirty pages.
8579                  */
8580                 if (buf->log_index == 0)
8581                         set_extent_dirty(&root->dirty_log_pages, buf->start,
8582                                         buf->start + buf->len - 1, GFP_NOFS);
8583                 else
8584                         set_extent_new(&root->dirty_log_pages, buf->start,
8585                                         buf->start + buf->len - 1);
8586         } else {
8587                 buf->log_index = -1;
8588                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8589                          buf->start + buf->len - 1, GFP_NOFS);
8590         }
8591         trans->dirty = true;
8592         /* this returns a buffer locked for blocking */
8593         return buf;
8594 }
8595
8596 static struct btrfs_block_rsv *
8597 use_block_rsv(struct btrfs_trans_handle *trans,
8598               struct btrfs_root *root, u32 blocksize)
8599 {
8600         struct btrfs_fs_info *fs_info = root->fs_info;
8601         struct btrfs_block_rsv *block_rsv;
8602         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
8603         int ret;
8604         bool global_updated = false;
8605
8606         block_rsv = get_block_rsv(trans, root);
8607
8608         if (unlikely(block_rsv->size == 0))
8609                 goto try_reserve;
8610 again:
8611         ret = block_rsv_use_bytes(block_rsv, blocksize);
8612         if (!ret)
8613                 return block_rsv;
8614
8615         if (block_rsv->failfast)
8616                 return ERR_PTR(ret);
8617
8618         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
8619                 global_updated = true;
8620                 update_global_block_rsv(fs_info);
8621                 goto again;
8622         }
8623
8624         /*
8625          * The global reserve still exists to save us from ourselves, so don't
8626          * warn_on if we are short on our delayed refs reserve.
8627          */
8628         if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
8629             btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
8630                 static DEFINE_RATELIMIT_STATE(_rs,
8631                                 DEFAULT_RATELIMIT_INTERVAL * 10,
8632                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
8633                 if (__ratelimit(&_rs))
8634                         WARN(1, KERN_DEBUG
8635                                 "BTRFS: block rsv returned %d\n", ret);
8636         }
8637 try_reserve:
8638         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
8639                                      BTRFS_RESERVE_NO_FLUSH);
8640         if (!ret)
8641                 return block_rsv;
8642         /*
8643          * If we couldn't reserve metadata bytes try and use some from
8644          * the global reserve if its space type is the same as the global
8645          * reservation.
8646          */
8647         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
8648             block_rsv->space_info == global_rsv->space_info) {
8649                 ret = block_rsv_use_bytes(global_rsv, blocksize);
8650                 if (!ret)
8651                         return global_rsv;
8652         }
8653         return ERR_PTR(ret);
8654 }
8655
8656 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
8657                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
8658 {
8659         block_rsv_add_bytes(block_rsv, blocksize, false);
8660         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL);
8661 }
8662
8663 /*
8664  * finds a free extent and does all the dirty work required for allocation
8665  * returns the tree buffer or an ERR_PTR on error.
8666  */
8667 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
8668                                              struct btrfs_root *root,
8669                                              u64 parent, u64 root_objectid,
8670                                              const struct btrfs_disk_key *key,
8671                                              int level, u64 hint,
8672                                              u64 empty_size)
8673 {
8674         struct btrfs_fs_info *fs_info = root->fs_info;
8675         struct btrfs_key ins;
8676         struct btrfs_block_rsv *block_rsv;
8677         struct extent_buffer *buf;
8678         struct btrfs_delayed_extent_op *extent_op;
8679         u64 flags = 0;
8680         int ret;
8681         u32 blocksize = fs_info->nodesize;
8682         bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
8683
8684 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8685         if (btrfs_is_testing(fs_info)) {
8686                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8687                                             level, root_objectid);
8688                 if (!IS_ERR(buf))
8689                         root->alloc_bytenr += blocksize;
8690                 return buf;
8691         }
8692 #endif
8693
8694         block_rsv = use_block_rsv(trans, root, blocksize);
8695         if (IS_ERR(block_rsv))
8696                 return ERR_CAST(block_rsv);
8697
8698         ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
8699                                    empty_size, hint, &ins, 0, 0);
8700         if (ret)
8701                 goto out_unuse;
8702
8703         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
8704                                     root_objectid);
8705         if (IS_ERR(buf)) {
8706                 ret = PTR_ERR(buf);
8707                 goto out_free_reserved;
8708         }
8709
8710         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8711                 if (parent == 0)
8712                         parent = ins.objectid;
8713                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8714         } else
8715                 BUG_ON(parent > 0);
8716
8717         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8718                 extent_op = btrfs_alloc_delayed_extent_op();
8719                 if (!extent_op) {
8720                         ret = -ENOMEM;
8721                         goto out_free_buf;
8722                 }
8723                 if (key)
8724                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
8725                 else
8726                         memset(&extent_op->key, 0, sizeof(extent_op->key));
8727                 extent_op->flags_to_set = flags;
8728                 extent_op->update_key = skinny_metadata ? false : true;
8729                 extent_op->update_flags = true;
8730                 extent_op->is_data = false;
8731                 extent_op->level = level;
8732
8733                 btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
8734                                    root_objectid, level, 0,
8735                                    BTRFS_ADD_DELAYED_EXTENT);
8736                 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
8737                                                  ins.offset, parent,
8738                                                  root_objectid, level,
8739                                                  BTRFS_ADD_DELAYED_EXTENT,
8740                                                  extent_op, NULL, NULL);
8741                 if (ret)
8742                         goto out_free_delayed;
8743         }
8744         return buf;
8745
8746 out_free_delayed:
8747         btrfs_free_delayed_extent_op(extent_op);
8748 out_free_buf:
8749         free_extent_buffer(buf);
8750 out_free_reserved:
8751         btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
8752 out_unuse:
8753         unuse_block_rsv(fs_info, block_rsv, blocksize);
8754         return ERR_PTR(ret);
8755 }
8756
8757 struct walk_control {
8758         u64 refs[BTRFS_MAX_LEVEL];
8759         u64 flags[BTRFS_MAX_LEVEL];
8760         struct btrfs_key update_progress;
8761         struct btrfs_key drop_progress;
8762         int drop_level;
8763         int stage;
8764         int level;
8765         int shared_level;
8766         int update_ref;
8767         int keep_locks;
8768         int reada_slot;
8769         int reada_count;
8770         int restarted;
8771 };
8772
8773 #define DROP_REFERENCE  1
8774 #define UPDATE_BACKREF  2
8775
8776 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8777                                      struct btrfs_root *root,
8778                                      struct walk_control *wc,
8779                                      struct btrfs_path *path)
8780 {
8781         struct btrfs_fs_info *fs_info = root->fs_info;
8782         u64 bytenr;
8783         u64 generation;
8784         u64 refs;
8785         u64 flags;
8786         u32 nritems;
8787         struct btrfs_key key;
8788         struct extent_buffer *eb;
8789         int ret;
8790         int slot;
8791         int nread = 0;
8792
8793         if (path->slots[wc->level] < wc->reada_slot) {
8794                 wc->reada_count = wc->reada_count * 2 / 3;
8795                 wc->reada_count = max(wc->reada_count, 2);
8796         } else {
8797                 wc->reada_count = wc->reada_count * 3 / 2;
8798                 wc->reada_count = min_t(int, wc->reada_count,
8799                                         BTRFS_NODEPTRS_PER_BLOCK(fs_info));
8800         }
8801
8802         eb = path->nodes[wc->level];
8803         nritems = btrfs_header_nritems(eb);
8804
8805         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8806                 if (nread >= wc->reada_count)
8807                         break;
8808
8809                 cond_resched();
8810                 bytenr = btrfs_node_blockptr(eb, slot);
8811                 generation = btrfs_node_ptr_generation(eb, slot);
8812
8813                 if (slot == path->slots[wc->level])
8814                         goto reada;
8815
8816                 if (wc->stage == UPDATE_BACKREF &&
8817                     generation <= root->root_key.offset)
8818                         continue;
8819
8820                 /* We don't lock the tree block, it's OK to be racy here */
8821                 ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
8822                                                wc->level - 1, 1, &refs,
8823                                                &flags);
8824                 /* We don't care about errors in readahead. */
8825                 if (ret < 0)
8826                         continue;
8827                 BUG_ON(refs == 0);
8828
8829                 if (wc->stage == DROP_REFERENCE) {
8830                         if (refs == 1)
8831                                 goto reada;
8832
8833                         if (wc->level == 1 &&
8834                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8835                                 continue;
8836                         if (!wc->update_ref ||
8837                             generation <= root->root_key.offset)
8838                                 continue;
8839                         btrfs_node_key_to_cpu(eb, &key, slot);
8840                         ret = btrfs_comp_cpu_keys(&key,
8841                                                   &wc->update_progress);
8842                         if (ret < 0)
8843                                 continue;
8844                 } else {
8845                         if (wc->level == 1 &&
8846                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8847                                 continue;
8848                 }
8849 reada:
8850                 readahead_tree_block(fs_info, bytenr);
8851                 nread++;
8852         }
8853         wc->reada_slot = slot;
8854 }
8855
8856 /*
8857  * helper to process tree block while walking down the tree.
8858  *
8859  * when wc->stage == UPDATE_BACKREF, this function updates
8860  * back refs for pointers in the block.
8861  *
8862  * NOTE: return value 1 means we should stop walking down.
8863  */
8864 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8865                                    struct btrfs_root *root,
8866                                    struct btrfs_path *path,
8867                                    struct walk_control *wc, int lookup_info)
8868 {
8869         struct btrfs_fs_info *fs_info = root->fs_info;
8870         int level = wc->level;
8871         struct extent_buffer *eb = path->nodes[level];
8872         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8873         int ret;
8874
8875         if (wc->stage == UPDATE_BACKREF &&
8876             btrfs_header_owner(eb) != root->root_key.objectid)
8877                 return 1;
8878
8879         /*
8880          * when reference count of tree block is 1, it won't increase
8881          * again. once full backref flag is set, we never clear it.
8882          */
8883         if (lookup_info &&
8884             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8885              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8886                 BUG_ON(!path->locks[level]);
8887                 ret = btrfs_lookup_extent_info(trans, fs_info,
8888                                                eb->start, level, 1,
8889                                                &wc->refs[level],
8890                                                &wc->flags[level]);
8891                 BUG_ON(ret == -ENOMEM);
8892                 if (ret)
8893                         return ret;
8894                 BUG_ON(wc->refs[level] == 0);
8895         }
8896
8897         if (wc->stage == DROP_REFERENCE) {
8898                 if (wc->refs[level] > 1)
8899                         return 1;
8900
8901                 if (path->locks[level] && !wc->keep_locks) {
8902                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8903                         path->locks[level] = 0;
8904                 }
8905                 return 0;
8906         }
8907
8908         /* wc->stage == UPDATE_BACKREF */
8909         if (!(wc->flags[level] & flag)) {
8910                 BUG_ON(!path->locks[level]);
8911                 ret = btrfs_inc_ref(trans, root, eb, 1);
8912                 BUG_ON(ret); /* -ENOMEM */
8913                 ret = btrfs_dec_ref(trans, root, eb, 0);
8914                 BUG_ON(ret); /* -ENOMEM */
8915                 ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
8916                                                   eb->len, flag,
8917                                                   btrfs_header_level(eb), 0);
8918                 BUG_ON(ret); /* -ENOMEM */
8919                 wc->flags[level] |= flag;
8920         }
8921
8922         /*
8923          * the block is shared by multiple trees, so it's not good to
8924          * keep the tree lock
8925          */
8926         if (path->locks[level] && level > 0) {
8927                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8928                 path->locks[level] = 0;
8929         }
8930         return 0;
8931 }
8932
8933 /*
8934  * This is used to verify a ref exists for this root to deal with a bug where we
8935  * would have a drop_progress key that hadn't been updated properly.
8936  */
8937 static int check_ref_exists(struct btrfs_trans_handle *trans,
8938                             struct btrfs_root *root, u64 bytenr, u64 parent,
8939                             int level)
8940 {
8941         struct btrfs_path *path;
8942         struct btrfs_extent_inline_ref *iref;
8943         int ret;
8944
8945         path = btrfs_alloc_path();
8946         if (!path)
8947                 return -ENOMEM;
8948
8949         ret = lookup_extent_backref(trans, path, &iref, bytenr,
8950                                     root->fs_info->nodesize, parent,
8951                                     root->root_key.objectid, level, 0);
8952         btrfs_free_path(path);
8953         if (ret == -ENOENT)
8954                 return 0;
8955         if (ret < 0)
8956                 return ret;
8957         return 1;
8958 }
8959
8960 /*
8961  * helper to process tree block pointer.
8962  *
8963  * when wc->stage == DROP_REFERENCE, this function checks
8964  * reference count of the block pointed to. if the block
8965  * is shared and we need update back refs for the subtree
8966  * rooted at the block, this function changes wc->stage to
8967  * UPDATE_BACKREF. if the block is shared and there is no
8968  * need to update back, this function drops the reference
8969  * to the block.
8970  *
8971  * NOTE: return value 1 means we should stop walking down.
8972  */
8973 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8974                                  struct btrfs_root *root,
8975                                  struct btrfs_path *path,
8976                                  struct walk_control *wc, int *lookup_info)
8977 {
8978         struct btrfs_fs_info *fs_info = root->fs_info;
8979         u64 bytenr;
8980         u64 generation;
8981         u64 parent;
8982         struct btrfs_key key;
8983         struct btrfs_key first_key;
8984         struct extent_buffer *next;
8985         int level = wc->level;
8986         int reada = 0;
8987         int ret = 0;
8988         bool need_account = false;
8989
8990         generation = btrfs_node_ptr_generation(path->nodes[level],
8991                                                path->slots[level]);
8992         /*
8993          * if the lower level block was created before the snapshot
8994          * was created, we know there is no need to update back refs
8995          * for the subtree
8996          */
8997         if (wc->stage == UPDATE_BACKREF &&
8998             generation <= root->root_key.offset) {
8999                 *lookup_info = 1;
9000                 return 1;
9001         }
9002
9003         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
9004         btrfs_node_key_to_cpu(path->nodes[level], &first_key,
9005                               path->slots[level]);
9006
9007         next = find_extent_buffer(fs_info, bytenr);
9008         if (!next) {
9009                 next = btrfs_find_create_tree_block(fs_info, bytenr);
9010                 if (IS_ERR(next))
9011                         return PTR_ERR(next);
9012
9013                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
9014                                                level - 1);
9015                 reada = 1;
9016         }
9017         btrfs_tree_lock(next);
9018         btrfs_set_lock_blocking_write(next);
9019
9020         ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
9021                                        &wc->refs[level - 1],
9022                                        &wc->flags[level - 1]);
9023         if (ret < 0)
9024                 goto out_unlock;
9025
9026         if (unlikely(wc->refs[level - 1] == 0)) {
9027                 btrfs_err(fs_info, "Missing references.");
9028                 ret = -EIO;
9029                 goto out_unlock;
9030         }
9031         *lookup_info = 0;
9032
9033         if (wc->stage == DROP_REFERENCE) {
9034                 if (wc->refs[level - 1] > 1) {
9035                         need_account = true;
9036                         if (level == 1 &&
9037                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
9038                                 goto skip;
9039
9040                         if (!wc->update_ref ||
9041                             generation <= root->root_key.offset)
9042                                 goto skip;
9043
9044                         btrfs_node_key_to_cpu(path->nodes[level], &key,
9045                                               path->slots[level]);
9046                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
9047                         if (ret < 0)
9048                                 goto skip;
9049
9050                         wc->stage = UPDATE_BACKREF;
9051                         wc->shared_level = level - 1;
9052                 }
9053         } else {
9054                 if (level == 1 &&
9055                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
9056                         goto skip;
9057         }
9058
9059         if (!btrfs_buffer_uptodate(next, generation, 0)) {
9060                 btrfs_tree_unlock(next);
9061                 free_extent_buffer(next);
9062                 next = NULL;
9063                 *lookup_info = 1;
9064         }
9065
9066         if (!next) {
9067                 if (reada && level == 1)
9068                         reada_walk_down(trans, root, wc, path);
9069                 next = read_tree_block(fs_info, bytenr, generation, level - 1,
9070                                        &first_key);
9071                 if (IS_ERR(next)) {
9072                         return PTR_ERR(next);
9073                 } else if (!extent_buffer_uptodate(next)) {
9074                         free_extent_buffer(next);
9075                         return -EIO;
9076                 }
9077                 btrfs_tree_lock(next);
9078                 btrfs_set_lock_blocking_write(next);
9079         }
9080
9081         level--;
9082         ASSERT(level == btrfs_header_level(next));
9083         if (level != btrfs_header_level(next)) {
9084                 btrfs_err(root->fs_info, "mismatched level");
9085                 ret = -EIO;
9086                 goto out_unlock;
9087         }
9088         path->nodes[level] = next;
9089         path->slots[level] = 0;
9090         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9091         wc->level = level;
9092         if (wc->level == 1)
9093                 wc->reada_slot = 0;
9094         return 0;
9095 skip:
9096         wc->refs[level - 1] = 0;
9097         wc->flags[level - 1] = 0;
9098         if (wc->stage == DROP_REFERENCE) {
9099                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
9100                         parent = path->nodes[level]->start;
9101                 } else {
9102                         ASSERT(root->root_key.objectid ==
9103                                btrfs_header_owner(path->nodes[level]));
9104                         if (root->root_key.objectid !=
9105                             btrfs_header_owner(path->nodes[level])) {
9106                                 btrfs_err(root->fs_info,
9107                                                 "mismatched block owner");
9108                                 ret = -EIO;
9109                                 goto out_unlock;
9110                         }
9111                         parent = 0;
9112                 }
9113
9114                 /*
9115                  * If we had a drop_progress we need to verify the refs are set
9116                  * as expected.  If we find our ref then we know that from here
9117                  * on out everything should be correct, and we can clear the
9118                  * ->restarted flag.
9119                  */
9120                 if (wc->restarted) {
9121                         ret = check_ref_exists(trans, root, bytenr, parent,
9122                                                level - 1);
9123                         if (ret < 0)
9124                                 goto out_unlock;
9125                         if (ret == 0)
9126                                 goto no_delete;
9127                         ret = 0;
9128                         wc->restarted = 0;
9129                 }
9130
9131                 /*
9132                  * Reloc tree doesn't contribute to qgroup numbers, and we have
9133                  * already accounted them at merge time (replace_path),
9134                  * thus we could skip expensive subtree trace here.
9135                  */
9136                 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
9137                     need_account) {
9138                         ret = btrfs_qgroup_trace_subtree(trans, next,
9139                                                          generation, level - 1);
9140                         if (ret) {
9141                                 btrfs_err_rl(fs_info,
9142                                              "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
9143                                              ret);
9144                         }
9145                 }
9146
9147                 /*
9148                  * We need to update the next key in our walk control so we can
9149                  * update the drop_progress key accordingly.  We don't care if
9150                  * find_next_key doesn't find a key because that means we're at
9151                  * the end and are going to clean up now.
9152                  */
9153                 wc->drop_level = level;
9154                 find_next_key(path, level, &wc->drop_progress);
9155
9156                 ret = btrfs_free_extent(trans, root, bytenr, fs_info->nodesize,
9157                                         parent, root->root_key.objectid,
9158                                         level - 1, 0);
9159                 if (ret)
9160                         goto out_unlock;
9161         }
9162 no_delete:
9163         *lookup_info = 1;
9164         ret = 1;
9165
9166 out_unlock:
9167         btrfs_tree_unlock(next);
9168         free_extent_buffer(next);
9169
9170         return ret;
9171 }
9172
9173 /*
9174  * helper to process tree block while walking up the tree.
9175  *
9176  * when wc->stage == DROP_REFERENCE, this function drops
9177  * reference count on the block.
9178  *
9179  * when wc->stage == UPDATE_BACKREF, this function changes
9180  * wc->stage back to DROP_REFERENCE if we changed wc->stage
9181  * to UPDATE_BACKREF previously while processing the block.
9182  *
9183  * NOTE: return value 1 means we should stop walking up.
9184  */
9185 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
9186                                  struct btrfs_root *root,
9187                                  struct btrfs_path *path,
9188                                  struct walk_control *wc)
9189 {
9190         struct btrfs_fs_info *fs_info = root->fs_info;
9191         int ret;
9192         int level = wc->level;
9193         struct extent_buffer *eb = path->nodes[level];
9194         u64 parent = 0;
9195
9196         if (wc->stage == UPDATE_BACKREF) {
9197                 BUG_ON(wc->shared_level < level);
9198                 if (level < wc->shared_level)
9199                         goto out;
9200
9201                 ret = find_next_key(path, level + 1, &wc->update_progress);
9202                 if (ret > 0)
9203                         wc->update_ref = 0;
9204
9205                 wc->stage = DROP_REFERENCE;
9206                 wc->shared_level = -1;
9207                 path->slots[level] = 0;
9208
9209                 /*
9210                  * check reference count again if the block isn't locked.
9211                  * we should start walking down the tree again if reference
9212                  * count is one.
9213                  */
9214                 if (!path->locks[level]) {
9215                         BUG_ON(level == 0);
9216                         btrfs_tree_lock(eb);
9217                         btrfs_set_lock_blocking_write(eb);
9218                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9219
9220                         ret = btrfs_lookup_extent_info(trans, fs_info,
9221                                                        eb->start, level, 1,
9222                                                        &wc->refs[level],
9223                                                        &wc->flags[level]);
9224                         if (ret < 0) {
9225                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
9226                                 path->locks[level] = 0;
9227                                 return ret;
9228                         }
9229                         BUG_ON(wc->refs[level] == 0);
9230                         if (wc->refs[level] == 1) {
9231                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
9232                                 path->locks[level] = 0;
9233                                 return 1;
9234                         }
9235                 }
9236         }
9237
9238         /* wc->stage == DROP_REFERENCE */
9239         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
9240
9241         if (wc->refs[level] == 1) {
9242                 if (level == 0) {
9243                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9244                                 ret = btrfs_dec_ref(trans, root, eb, 1);
9245                         else
9246                                 ret = btrfs_dec_ref(trans, root, eb, 0);
9247                         BUG_ON(ret); /* -ENOMEM */
9248                         ret = btrfs_qgroup_trace_leaf_items(trans, eb);
9249                         if (ret) {
9250                                 btrfs_err_rl(fs_info,
9251                                              "error %d accounting leaf items. Quota is out of sync, rescan required.",
9252                                              ret);
9253                         }
9254                 }
9255                 /* make block locked assertion in btrfs_clean_tree_block happy */
9256                 if (!path->locks[level] &&
9257                     btrfs_header_generation(eb) == trans->transid) {
9258                         btrfs_tree_lock(eb);
9259                         btrfs_set_lock_blocking_write(eb);
9260                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9261                 }
9262                 btrfs_clean_tree_block(eb);
9263         }
9264
9265         if (eb == root->node) {
9266                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9267                         parent = eb->start;
9268                 else if (root->root_key.objectid != btrfs_header_owner(eb))
9269                         goto owner_mismatch;
9270         } else {
9271                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
9272                         parent = path->nodes[level + 1]->start;
9273                 else if (root->root_key.objectid !=
9274                          btrfs_header_owner(path->nodes[level + 1]))
9275                         goto owner_mismatch;
9276         }
9277
9278         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
9279 out:
9280         wc->refs[level] = 0;
9281         wc->flags[level] = 0;
9282         return 0;
9283
9284 owner_mismatch:
9285         btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
9286                      btrfs_header_owner(eb), root->root_key.objectid);
9287         return -EUCLEAN;
9288 }
9289
9290 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
9291                                    struct btrfs_root *root,
9292                                    struct btrfs_path *path,
9293                                    struct walk_control *wc)
9294 {
9295         int level = wc->level;
9296         int lookup_info = 1;
9297         int ret;
9298
9299         while (level >= 0) {
9300                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
9301                 if (ret > 0)
9302                         break;
9303
9304                 if (level == 0)
9305                         break;
9306
9307                 if (path->slots[level] >=
9308                     btrfs_header_nritems(path->nodes[level]))
9309                         break;
9310
9311                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
9312                 if (ret > 0) {
9313                         path->slots[level]++;
9314                         continue;
9315                 } else if (ret < 0)
9316                         return ret;
9317                 level = wc->level;
9318         }
9319         return 0;
9320 }
9321
9322 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
9323                                  struct btrfs_root *root,
9324                                  struct btrfs_path *path,
9325                                  struct walk_control *wc, int max_level)
9326 {
9327         int level = wc->level;
9328         int ret;
9329
9330         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
9331         while (level < max_level && path->nodes[level]) {
9332                 wc->level = level;
9333                 if (path->slots[level] + 1 <
9334                     btrfs_header_nritems(path->nodes[level])) {
9335                         path->slots[level]++;
9336                         return 0;
9337                 } else {
9338                         ret = walk_up_proc(trans, root, path, wc);
9339                         if (ret > 0)
9340                                 return 0;
9341                         if (ret < 0)
9342                                 return ret;
9343
9344                         if (path->locks[level]) {
9345                                 btrfs_tree_unlock_rw(path->nodes[level],
9346                                                      path->locks[level]);
9347                                 path->locks[level] = 0;
9348                         }
9349                         free_extent_buffer(path->nodes[level]);
9350                         path->nodes[level] = NULL;
9351                         level++;
9352                 }
9353         }
9354         return 1;
9355 }
9356
9357 /*
9358  * drop a subvolume tree.
9359  *
9360  * this function traverses the tree freeing any blocks that only
9361  * referenced by the tree.
9362  *
9363  * when a shared tree block is found. this function decreases its
9364  * reference count by one. if update_ref is true, this function
9365  * also make sure backrefs for the shared block and all lower level
9366  * blocks are properly updated.
9367  *
9368  * If called with for_reloc == 0, may exit early with -EAGAIN
9369  */
9370 int btrfs_drop_snapshot(struct btrfs_root *root,
9371                          struct btrfs_block_rsv *block_rsv, int update_ref,
9372                          int for_reloc)
9373 {
9374         struct btrfs_fs_info *fs_info = root->fs_info;
9375         struct btrfs_path *path;
9376         struct btrfs_trans_handle *trans;
9377         struct btrfs_root *tree_root = fs_info->tree_root;
9378         struct btrfs_root_item *root_item = &root->root_item;
9379         struct walk_control *wc;
9380         struct btrfs_key key;
9381         int err = 0;
9382         int ret;
9383         int level;
9384         bool root_dropped = false;
9385
9386         btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
9387
9388         path = btrfs_alloc_path();
9389         if (!path) {
9390                 err = -ENOMEM;
9391                 goto out;
9392         }
9393
9394         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9395         if (!wc) {
9396                 btrfs_free_path(path);
9397                 err = -ENOMEM;
9398                 goto out;
9399         }
9400
9401         trans = btrfs_start_transaction(tree_root, 0);
9402         if (IS_ERR(trans)) {
9403                 err = PTR_ERR(trans);
9404                 goto out_free;
9405         }
9406
9407         err = btrfs_run_delayed_items(trans);
9408         if (err)
9409                 goto out_end_trans;
9410
9411         if (block_rsv)
9412                 trans->block_rsv = block_rsv;
9413
9414         /*
9415          * This will help us catch people modifying the fs tree while we're
9416          * dropping it.  It is unsafe to mess with the fs tree while it's being
9417          * dropped as we unlock the root node and parent nodes as we walk down
9418          * the tree, assuming nothing will change.  If something does change
9419          * then we'll have stale information and drop references to blocks we've
9420          * already dropped.
9421          */
9422         set_bit(BTRFS_ROOT_DELETING, &root->state);
9423         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
9424                 level = btrfs_header_level(root->node);
9425                 path->nodes[level] = btrfs_lock_root_node(root);
9426                 btrfs_set_lock_blocking_write(path->nodes[level]);
9427                 path->slots[level] = 0;
9428                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9429                 memset(&wc->update_progress, 0,
9430                        sizeof(wc->update_progress));
9431         } else {
9432                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
9433                 memcpy(&wc->update_progress, &key,
9434                        sizeof(wc->update_progress));
9435
9436                 level = root_item->drop_level;
9437                 BUG_ON(level == 0);
9438                 path->lowest_level = level;
9439                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9440                 path->lowest_level = 0;
9441                 if (ret < 0) {
9442                         err = ret;
9443                         goto out_end_trans;
9444                 }
9445                 WARN_ON(ret > 0);
9446
9447                 /*
9448                  * unlock our path, this is safe because only this
9449                  * function is allowed to delete this snapshot
9450                  */
9451                 btrfs_unlock_up_safe(path, 0);
9452
9453                 level = btrfs_header_level(root->node);
9454                 while (1) {
9455                         btrfs_tree_lock(path->nodes[level]);
9456                         btrfs_set_lock_blocking_write(path->nodes[level]);
9457                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9458
9459                         ret = btrfs_lookup_extent_info(trans, fs_info,
9460                                                 path->nodes[level]->start,
9461                                                 level, 1, &wc->refs[level],
9462                                                 &wc->flags[level]);
9463                         if (ret < 0) {
9464                                 err = ret;
9465                                 goto out_end_trans;
9466                         }
9467                         BUG_ON(wc->refs[level] == 0);
9468
9469                         if (level == root_item->drop_level)
9470                                 break;
9471
9472                         btrfs_tree_unlock(path->nodes[level]);
9473                         path->locks[level] = 0;
9474                         WARN_ON(wc->refs[level] != 1);
9475                         level--;
9476                 }
9477         }
9478
9479         wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
9480         wc->level = level;
9481         wc->shared_level = -1;
9482         wc->stage = DROP_REFERENCE;
9483         wc->update_ref = update_ref;
9484         wc->keep_locks = 0;
9485         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
9486
9487         while (1) {
9488
9489                 ret = walk_down_tree(trans, root, path, wc);
9490                 if (ret < 0) {
9491                         err = ret;
9492                         break;
9493                 }
9494
9495                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
9496                 if (ret < 0) {
9497                         err = ret;
9498                         break;
9499                 }
9500
9501                 if (ret > 0) {
9502                         BUG_ON(wc->stage != DROP_REFERENCE);
9503                         break;
9504                 }
9505
9506                 if (wc->stage == DROP_REFERENCE) {
9507                         wc->drop_level = wc->level;
9508                         btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
9509                                               &wc->drop_progress,
9510                                               path->slots[wc->drop_level]);
9511                 }
9512                 btrfs_cpu_key_to_disk(&root_item->drop_progress,
9513                                       &wc->drop_progress);
9514                 root_item->drop_level = wc->drop_level;
9515
9516                 BUG_ON(wc->level == 0);
9517                 if (btrfs_should_end_transaction(trans) ||
9518                     (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
9519                         ret = btrfs_update_root(trans, tree_root,
9520                                                 &root->root_key,
9521                                                 root_item);
9522                         if (ret) {
9523                                 btrfs_abort_transaction(trans, ret);
9524                                 err = ret;
9525                                 goto out_end_trans;
9526                         }
9527
9528                         btrfs_end_transaction_throttle(trans);
9529                         if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
9530                                 btrfs_debug(fs_info,
9531                                             "drop snapshot early exit");
9532                                 err = -EAGAIN;
9533                                 goto out_free;
9534                         }
9535
9536                         trans = btrfs_start_transaction(tree_root, 0);
9537                         if (IS_ERR(trans)) {
9538                                 err = PTR_ERR(trans);
9539                                 goto out_free;
9540                         }
9541                         if (block_rsv)
9542                                 trans->block_rsv = block_rsv;
9543                 }
9544         }
9545         btrfs_release_path(path);
9546         if (err)
9547                 goto out_end_trans;
9548
9549         ret = btrfs_del_root(trans, &root->root_key);
9550         if (ret) {
9551                 btrfs_abort_transaction(trans, ret);
9552                 err = ret;
9553                 goto out_end_trans;
9554         }
9555
9556         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9557                 ret = btrfs_find_root(tree_root, &root->root_key, path,
9558                                       NULL, NULL);
9559                 if (ret < 0) {
9560                         btrfs_abort_transaction(trans, ret);
9561                         err = ret;
9562                         goto out_end_trans;
9563                 } else if (ret > 0) {
9564                         /* if we fail to delete the orphan item this time
9565                          * around, it'll get picked up the next time.
9566                          *
9567                          * The most common failure here is just -ENOENT.
9568                          */
9569                         btrfs_del_orphan_item(trans, tree_root,
9570                                               root->root_key.objectid);
9571                 }
9572         }
9573
9574         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9575                 btrfs_add_dropped_root(trans, root);
9576         } else {
9577                 free_extent_buffer(root->node);
9578                 free_extent_buffer(root->commit_root);
9579                 btrfs_put_fs_root(root);
9580         }
9581         root_dropped = true;
9582 out_end_trans:
9583         btrfs_end_transaction_throttle(trans);
9584 out_free:
9585         kfree(wc);
9586         btrfs_free_path(path);
9587 out:
9588         /*
9589          * So if we need to stop dropping the snapshot for whatever reason we
9590          * need to make sure to add it back to the dead root list so that we
9591          * keep trying to do the work later.  This also cleans up roots if we
9592          * don't have it in the radix (like when we recover after a power fail
9593          * or unmount) so we don't leak memory.
9594          */
9595         if (!for_reloc && !root_dropped)
9596                 btrfs_add_dead_root(root);
9597         if (err && err != -EAGAIN)
9598                 btrfs_handle_fs_error(fs_info, err, NULL);
9599         return err;
9600 }
9601
9602 /*
9603  * drop subtree rooted at tree block 'node'.
9604  *
9605  * NOTE: this function will unlock and release tree block 'node'
9606  * only used by relocation code
9607  */
9608 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9609                         struct btrfs_root *root,
9610                         struct extent_buffer *node,
9611                         struct extent_buffer *parent)
9612 {
9613         struct btrfs_fs_info *fs_info = root->fs_info;
9614         struct btrfs_path *path;
9615         struct walk_control *wc;
9616         int level;
9617         int parent_level;
9618         int ret = 0;
9619         int wret;
9620
9621         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9622
9623         path = btrfs_alloc_path();
9624         if (!path)
9625                 return -ENOMEM;
9626
9627         wc = kzalloc(sizeof(*wc), GFP_NOFS);
9628         if (!wc) {
9629                 btrfs_free_path(path);
9630                 return -ENOMEM;
9631         }
9632
9633         btrfs_assert_tree_locked(parent);
9634         parent_level = btrfs_header_level(parent);
9635         extent_buffer_get(parent);
9636         path->nodes[parent_level] = parent;
9637         path->slots[parent_level] = btrfs_header_nritems(parent);
9638
9639         btrfs_assert_tree_locked(node);
9640         level = btrfs_header_level(node);
9641         path->nodes[level] = node;
9642         path->slots[level] = 0;
9643         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9644
9645         wc->refs[parent_level] = 1;
9646         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9647         wc->level = level;
9648         wc->shared_level = -1;
9649         wc->stage = DROP_REFERENCE;
9650         wc->update_ref = 0;
9651         wc->keep_locks = 1;
9652         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
9653
9654         while (1) {
9655                 wret = walk_down_tree(trans, root, path, wc);
9656                 if (wret < 0) {
9657                         ret = wret;
9658                         break;
9659                 }
9660
9661                 wret = walk_up_tree(trans, root, path, wc, parent_level);
9662                 if (wret < 0)
9663                         ret = wret;
9664                 if (wret != 0)
9665                         break;
9666         }
9667
9668         kfree(wc);
9669         btrfs_free_path(path);
9670         return ret;
9671 }
9672
9673 static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
9674 {
9675         u64 num_devices;
9676         u64 stripped;
9677
9678         /*
9679          * if restripe for this chunk_type is on pick target profile and
9680          * return, otherwise do the usual balance
9681          */
9682         stripped = get_restripe_target(fs_info, flags);
9683         if (stripped)
9684                 return extended_to_chunk(stripped);
9685
9686         num_devices = fs_info->fs_devices->rw_devices;
9687
9688         stripped = BTRFS_BLOCK_GROUP_RAID0 |
9689                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9690                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9691
9692         if (num_devices == 1) {
9693                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9694                 stripped = flags & ~stripped;
9695
9696                 /* turn raid0 into single device chunks */
9697                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9698                         return stripped;
9699
9700                 /* turn mirroring into duplication */
9701                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9702                              BTRFS_BLOCK_GROUP_RAID10))
9703                         return stripped | BTRFS_BLOCK_GROUP_DUP;
9704         } else {
9705                 /* they already had raid on here, just return */
9706                 if (flags & stripped)
9707                         return flags;
9708
9709                 stripped |= BTRFS_BLOCK_GROUP_DUP;
9710                 stripped = flags & ~stripped;
9711
9712                 /* switch duplicated blocks with raid1 */
9713                 if (flags & BTRFS_BLOCK_GROUP_DUP)
9714                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
9715
9716                 /* this is drive concat, leave it alone */
9717         }
9718
9719         return flags;
9720 }
9721
9722 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9723 {
9724         struct btrfs_space_info *sinfo = cache->space_info;
9725         u64 num_bytes;
9726         u64 sinfo_used;
9727         u64 min_allocable_bytes;
9728         int ret = -ENOSPC;
9729
9730         /*
9731          * We need some metadata space and system metadata space for
9732          * allocating chunks in some corner cases until we force to set
9733          * it to be readonly.
9734          */
9735         if ((sinfo->flags &
9736              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9737             !force)
9738                 min_allocable_bytes = SZ_1M;
9739         else
9740                 min_allocable_bytes = 0;
9741
9742         spin_lock(&sinfo->lock);
9743         spin_lock(&cache->lock);
9744
9745         if (cache->ro) {
9746                 cache->ro++;
9747                 ret = 0;
9748                 goto out;
9749         }
9750
9751         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9752                     cache->bytes_super - btrfs_block_group_used(&cache->item);
9753         sinfo_used = btrfs_space_info_used(sinfo, true);
9754
9755         if (sinfo_used + num_bytes + min_allocable_bytes <=
9756             sinfo->total_bytes) {
9757                 sinfo->bytes_readonly += num_bytes;
9758                 cache->ro++;
9759                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9760                 ret = 0;
9761         }
9762 out:
9763         spin_unlock(&cache->lock);
9764         spin_unlock(&sinfo->lock);
9765         if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
9766                 btrfs_info(cache->fs_info,
9767                         "unable to make block group %llu ro",
9768                         cache->key.objectid);
9769                 btrfs_info(cache->fs_info,
9770                         "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
9771                         sinfo_used, num_bytes, min_allocable_bytes);
9772                 dump_space_info(cache->fs_info, cache->space_info, 0, 0);
9773         }
9774         return ret;
9775 }
9776
9777 int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
9778
9779 {
9780         struct btrfs_fs_info *fs_info = cache->fs_info;
9781         struct btrfs_trans_handle *trans;
9782         u64 alloc_flags;
9783         int ret;
9784
9785 again:
9786         trans = btrfs_join_transaction(fs_info->extent_root);
9787         if (IS_ERR(trans))
9788                 return PTR_ERR(trans);
9789
9790         /*
9791          * we're not allowed to set block groups readonly after the dirty
9792          * block groups cache has started writing.  If it already started,
9793          * back off and let this transaction commit
9794          */
9795         mutex_lock(&fs_info->ro_block_group_mutex);
9796         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9797                 u64 transid = trans->transid;
9798
9799                 mutex_unlock(&fs_info->ro_block_group_mutex);
9800                 btrfs_end_transaction(trans);
9801
9802                 ret = btrfs_wait_for_commit(fs_info, transid);
9803                 if (ret)
9804                         return ret;
9805                 goto again;
9806         }
9807
9808         /*
9809          * if we are changing raid levels, try to allocate a corresponding
9810          * block group with the new raid level.
9811          */
9812         alloc_flags = update_block_group_flags(fs_info, cache->flags);
9813         if (alloc_flags != cache->flags) {
9814                 ret = do_chunk_alloc(trans, alloc_flags,
9815                                      CHUNK_ALLOC_FORCE);
9816                 /*
9817                  * ENOSPC is allowed here, we may have enough space
9818                  * already allocated at the new raid level to
9819                  * carry on
9820                  */
9821                 if (ret == -ENOSPC)
9822                         ret = 0;
9823                 if (ret < 0)
9824                         goto out;
9825         }
9826
9827         ret = inc_block_group_ro(cache, 0);
9828         if (!ret)
9829                 goto out;
9830         alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
9831         ret = do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
9832         if (ret < 0)
9833                 goto out;
9834         ret = inc_block_group_ro(cache, 0);
9835 out:
9836         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9837                 alloc_flags = update_block_group_flags(fs_info, cache->flags);
9838                 mutex_lock(&fs_info->chunk_mutex);
9839                 check_system_chunk(trans, alloc_flags);
9840                 mutex_unlock(&fs_info->chunk_mutex);
9841         }
9842         mutex_unlock(&fs_info->ro_block_group_mutex);
9843
9844         btrfs_end_transaction(trans);
9845         return ret;
9846 }
9847
9848 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
9849 {
9850         u64 alloc_flags = get_alloc_profile(trans->fs_info, type);
9851
9852         return do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
9853 }
9854
9855 /*
9856  * helper to account the unused space of all the readonly block group in the
9857  * space_info. takes mirrors into account.
9858  */
9859 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9860 {
9861         struct btrfs_block_group_cache *block_group;
9862         u64 free_bytes = 0;
9863         int factor;
9864
9865         /* It's df, we don't care if it's racy */
9866         if (list_empty(&sinfo->ro_bgs))
9867                 return 0;
9868
9869         spin_lock(&sinfo->lock);
9870         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9871                 spin_lock(&block_group->lock);
9872
9873                 if (!block_group->ro) {
9874                         spin_unlock(&block_group->lock);
9875                         continue;
9876                 }
9877
9878                 factor = btrfs_bg_type_to_factor(block_group->flags);
9879                 free_bytes += (block_group->key.offset -
9880                                btrfs_block_group_used(&block_group->item)) *
9881                                factor;
9882
9883                 spin_unlock(&block_group->lock);
9884         }
9885         spin_unlock(&sinfo->lock);
9886
9887         return free_bytes;
9888 }
9889
9890 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
9891 {
9892         struct btrfs_space_info *sinfo = cache->space_info;
9893         u64 num_bytes;
9894
9895         BUG_ON(!cache->ro);
9896
9897         spin_lock(&sinfo->lock);
9898         spin_lock(&cache->lock);
9899         if (!--cache->ro) {
9900                 num_bytes = cache->key.offset - cache->reserved -
9901                             cache->pinned - cache->bytes_super -
9902                             btrfs_block_group_used(&cache->item);
9903                 sinfo->bytes_readonly -= num_bytes;
9904                 list_del_init(&cache->ro_list);
9905         }
9906         spin_unlock(&cache->lock);
9907         spin_unlock(&sinfo->lock);
9908 }
9909
9910 /*
9911  * Checks to see if it's even possible to relocate this block group.
9912  *
9913  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9914  * ok to go ahead and try.
9915  */
9916 int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
9917 {
9918         struct btrfs_block_group_cache *block_group;
9919         struct btrfs_space_info *space_info;
9920         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
9921         struct btrfs_device *device;
9922         u64 min_free;
9923         u64 dev_min = 1;
9924         u64 dev_nr = 0;
9925         u64 target;
9926         int debug;
9927         int index;
9928         int full = 0;
9929         int ret = 0;
9930
9931         debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
9932
9933         block_group = btrfs_lookup_block_group(fs_info, bytenr);
9934
9935         /* odd, couldn't find the block group, leave it alone */
9936         if (!block_group) {
9937                 if (debug)
9938                         btrfs_warn(fs_info,
9939                                    "can't find block group for bytenr %llu",
9940                                    bytenr);
9941                 return -1;
9942         }
9943
9944         min_free = btrfs_block_group_used(&block_group->item);
9945
9946         /* no bytes used, we're good */
9947         if (!min_free)
9948                 goto out;
9949
9950         space_info = block_group->space_info;
9951         spin_lock(&space_info->lock);
9952
9953         full = space_info->full;
9954
9955         /*
9956          * if this is the last block group we have in this space, we can't
9957          * relocate it unless we're able to allocate a new chunk below.
9958          *
9959          * Otherwise, we need to make sure we have room in the space to handle
9960          * all of the extents from this block group.  If we can, we're good
9961          */
9962         if ((space_info->total_bytes != block_group->key.offset) &&
9963             (btrfs_space_info_used(space_info, false) + min_free <
9964              space_info->total_bytes)) {
9965                 spin_unlock(&space_info->lock);
9966                 goto out;
9967         }
9968         spin_unlock(&space_info->lock);
9969
9970         /*
9971          * ok we don't have enough space, but maybe we have free space on our
9972          * devices to allocate new chunks for relocation, so loop through our
9973          * alloc devices and guess if we have enough space.  if this block
9974          * group is going to be restriped, run checks against the target
9975          * profile instead of the current one.
9976          */
9977         ret = -1;
9978
9979         /*
9980          * index:
9981          *      0: raid10
9982          *      1: raid1
9983          *      2: dup
9984          *      3: raid0
9985          *      4: single
9986          */
9987         target = get_restripe_target(fs_info, block_group->flags);
9988         if (target) {
9989                 index = btrfs_bg_flags_to_raid_index(extended_to_chunk(target));
9990         } else {
9991                 /*
9992                  * this is just a balance, so if we were marked as full
9993                  * we know there is no space for a new chunk
9994                  */
9995                 if (full) {
9996                         if (debug)
9997                                 btrfs_warn(fs_info,
9998                                            "no space to alloc new chunk for block group %llu",
9999                                            block_group->key.objectid);
10000                         goto out;
10001                 }
10002
10003                 index = btrfs_bg_flags_to_raid_index(block_group->flags);
10004         }
10005
10006         if (index == BTRFS_RAID_RAID10) {
10007                 dev_min = 4;
10008                 /* Divide by 2 */
10009                 min_free >>= 1;
10010         } else if (index == BTRFS_RAID_RAID1) {
10011                 dev_min = 2;
10012         } else if (index == BTRFS_RAID_DUP) {
10013                 /* Multiply by 2 */
10014                 min_free <<= 1;
10015         } else if (index == BTRFS_RAID_RAID0) {
10016                 dev_min = fs_devices->rw_devices;
10017                 min_free = div64_u64(min_free, dev_min);
10018         }
10019
10020         mutex_lock(&fs_info->chunk_mutex);
10021         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
10022                 u64 dev_offset;
10023
10024                 /*
10025                  * check to make sure we can actually find a chunk with enough
10026                  * space to fit our block group in.
10027                  */
10028                 if (device->total_bytes > device->bytes_used + min_free &&
10029                     !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
10030                         ret = find_free_dev_extent(device, min_free,
10031                                                    &dev_offset, NULL);
10032                         if (!ret)
10033                                 dev_nr++;
10034
10035                         if (dev_nr >= dev_min)
10036                                 break;
10037
10038                         ret = -1;
10039                 }
10040         }
10041         if (debug && ret == -1)
10042                 btrfs_warn(fs_info,
10043                            "no space to allocate a new chunk for block group %llu",
10044                            block_group->key.objectid);
10045         mutex_unlock(&fs_info->chunk_mutex);
10046 out:
10047         btrfs_put_block_group(block_group);
10048         return ret;
10049 }
10050
10051 static int find_first_block_group(struct btrfs_fs_info *fs_info,
10052                                   struct btrfs_path *path,
10053                                   struct btrfs_key *key)
10054 {
10055         struct btrfs_root *root = fs_info->extent_root;
10056         int ret = 0;
10057         struct btrfs_key found_key;
10058         struct extent_buffer *leaf;
10059         struct btrfs_block_group_item bg;
10060         u64 flags;
10061         int slot;
10062
10063         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
10064         if (ret < 0)
10065                 goto out;
10066
10067         while (1) {
10068                 slot = path->slots[0];
10069                 leaf = path->nodes[0];
10070                 if (slot >= btrfs_header_nritems(leaf)) {
10071                         ret = btrfs_next_leaf(root, path);
10072                         if (ret == 0)
10073                                 continue;
10074                         if (ret < 0)
10075                                 goto out;
10076                         break;
10077                 }
10078                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
10079
10080                 if (found_key.objectid >= key->objectid &&
10081                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
10082                         struct extent_map_tree *em_tree;
10083                         struct extent_map *em;
10084
10085                         em_tree = &root->fs_info->mapping_tree.map_tree;
10086                         read_lock(&em_tree->lock);
10087                         em = lookup_extent_mapping(em_tree, found_key.objectid,
10088                                                    found_key.offset);
10089                         read_unlock(&em_tree->lock);
10090                         if (!em) {
10091                                 btrfs_err(fs_info,
10092                         "logical %llu len %llu found bg but no related chunk",
10093                                           found_key.objectid, found_key.offset);
10094                                 ret = -ENOENT;
10095                         } else if (em->start != found_key.objectid ||
10096                                    em->len != found_key.offset) {
10097                                 btrfs_err(fs_info,
10098                 "block group %llu len %llu mismatch with chunk %llu len %llu",
10099                                           found_key.objectid, found_key.offset,
10100                                           em->start, em->len);
10101                                 ret = -EUCLEAN;
10102                         } else {
10103                                 read_extent_buffer(leaf, &bg,
10104                                         btrfs_item_ptr_offset(leaf, slot),
10105                                         sizeof(bg));
10106                                 flags = btrfs_block_group_flags(&bg) &
10107                                         BTRFS_BLOCK_GROUP_TYPE_MASK;
10108
10109                                 if (flags != (em->map_lookup->type &
10110                                               BTRFS_BLOCK_GROUP_TYPE_MASK)) {
10111                                         btrfs_err(fs_info,
10112 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
10113                                                 found_key.objectid,
10114                                                 found_key.offset, flags,
10115                                                 (BTRFS_BLOCK_GROUP_TYPE_MASK &
10116                                                  em->map_lookup->type));
10117                                         ret = -EUCLEAN;
10118                                 } else {
10119                                         ret = 0;
10120                                 }
10121                         }
10122                         free_extent_map(em);
10123                         goto out;
10124                 }
10125                 path->slots[0]++;
10126         }
10127 out:
10128         return ret;
10129 }
10130
10131 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
10132 {
10133         struct btrfs_block_group_cache *block_group;
10134         u64 last = 0;
10135
10136         while (1) {
10137                 struct inode *inode;
10138
10139                 block_group = btrfs_lookup_first_block_group(info, last);
10140                 while (block_group) {
10141                         wait_block_group_cache_done(block_group);
10142                         spin_lock(&block_group->lock);
10143                         if (block_group->iref)
10144                                 break;
10145                         spin_unlock(&block_group->lock);
10146                         block_group = next_block_group(info, block_group);
10147                 }
10148                 if (!block_group) {
10149                         if (last == 0)
10150                                 break;
10151                         last = 0;
10152                         continue;
10153                 }
10154
10155                 inode = block_group->inode;
10156                 block_group->iref = 0;
10157                 block_group->inode = NULL;
10158                 spin_unlock(&block_group->lock);
10159                 ASSERT(block_group->io_ctl.inode == NULL);
10160                 iput(inode);
10161                 last = block_group->key.objectid + block_group->key.offset;
10162                 btrfs_put_block_group(block_group);
10163         }
10164 }
10165
10166 /*
10167  * Must be called only after stopping all workers, since we could have block
10168  * group caching kthreads running, and therefore they could race with us if we
10169  * freed the block groups before stopping them.
10170  */
10171 int btrfs_free_block_groups(struct btrfs_fs_info *info)
10172 {
10173         struct btrfs_block_group_cache *block_group;
10174         struct btrfs_space_info *space_info;
10175         struct btrfs_caching_control *caching_ctl;
10176         struct rb_node *n;
10177
10178         down_write(&info->commit_root_sem);
10179         while (!list_empty(&info->caching_block_groups)) {
10180                 caching_ctl = list_entry(info->caching_block_groups.next,
10181                                          struct btrfs_caching_control, list);
10182                 list_del(&caching_ctl->list);
10183                 put_caching_control(caching_ctl);
10184         }
10185         up_write(&info->commit_root_sem);
10186
10187         spin_lock(&info->unused_bgs_lock);
10188         while (!list_empty(&info->unused_bgs)) {
10189                 block_group = list_first_entry(&info->unused_bgs,
10190                                                struct btrfs_block_group_cache,
10191                                                bg_list);
10192                 list_del_init(&block_group->bg_list);
10193                 btrfs_put_block_group(block_group);
10194         }
10195         spin_unlock(&info->unused_bgs_lock);
10196
10197         spin_lock(&info->block_group_cache_lock);
10198         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
10199                 block_group = rb_entry(n, struct btrfs_block_group_cache,
10200                                        cache_node);
10201                 rb_erase(&block_group->cache_node,
10202                          &info->block_group_cache_tree);
10203                 RB_CLEAR_NODE(&block_group->cache_node);
10204                 spin_unlock(&info->block_group_cache_lock);
10205
10206                 down_write(&block_group->space_info->groups_sem);
10207                 list_del(&block_group->list);
10208                 up_write(&block_group->space_info->groups_sem);
10209
10210                 /*
10211                  * We haven't cached this block group, which means we could
10212                  * possibly have excluded extents on this block group.
10213                  */
10214                 if (block_group->cached == BTRFS_CACHE_NO ||
10215                     block_group->cached == BTRFS_CACHE_ERROR)
10216                         free_excluded_extents(block_group);
10217
10218                 btrfs_remove_free_space_cache(block_group);
10219                 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
10220                 ASSERT(list_empty(&block_group->dirty_list));
10221                 ASSERT(list_empty(&block_group->io_list));
10222                 ASSERT(list_empty(&block_group->bg_list));
10223                 ASSERT(atomic_read(&block_group->count) == 1);
10224                 btrfs_put_block_group(block_group);
10225
10226                 spin_lock(&info->block_group_cache_lock);
10227         }
10228         spin_unlock(&info->block_group_cache_lock);
10229
10230         /* now that all the block groups are freed, go through and
10231          * free all the space_info structs.  This is only called during
10232          * the final stages of unmount, and so we know nobody is
10233          * using them.  We call synchronize_rcu() once before we start,
10234          * just to be on the safe side.
10235          */
10236         synchronize_rcu();
10237
10238         release_global_block_rsv(info);
10239
10240         while (!list_empty(&info->space_info)) {
10241                 int i;
10242
10243                 space_info = list_entry(info->space_info.next,
10244                                         struct btrfs_space_info,
10245                                         list);
10246
10247                 /*
10248                  * Do not hide this behind enospc_debug, this is actually
10249                  * important and indicates a real bug if this happens.
10250                  */
10251                 if (WARN_ON(space_info->bytes_pinned > 0 ||
10252                             space_info->bytes_reserved > 0 ||
10253                             space_info->bytes_may_use > 0))
10254                         dump_space_info(info, space_info, 0, 0);
10255                 list_del(&space_info->list);
10256                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
10257                         struct kobject *kobj;
10258                         kobj = space_info->block_group_kobjs[i];
10259                         space_info->block_group_kobjs[i] = NULL;
10260                         if (kobj) {
10261                                 kobject_del(kobj);
10262                                 kobject_put(kobj);
10263                         }
10264                 }
10265                 kobject_del(&space_info->kobj);
10266                 kobject_put(&space_info->kobj);
10267         }
10268         return 0;
10269 }
10270
10271 /* link_block_group will queue up kobjects to add when we're reclaim-safe */
10272 void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
10273 {
10274         struct btrfs_space_info *space_info;
10275         struct raid_kobject *rkobj;
10276         LIST_HEAD(list);
10277         int index;
10278         int ret = 0;
10279
10280         spin_lock(&fs_info->pending_raid_kobjs_lock);
10281         list_splice_init(&fs_info->pending_raid_kobjs, &list);
10282         spin_unlock(&fs_info->pending_raid_kobjs_lock);
10283
10284         list_for_each_entry(rkobj, &list, list) {
10285                 space_info = __find_space_info(fs_info, rkobj->flags);
10286                 index = btrfs_bg_flags_to_raid_index(rkobj->flags);
10287
10288                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
10289                                   "%s", get_raid_name(index));
10290                 if (ret) {
10291                         kobject_put(&rkobj->kobj);
10292                         break;
10293                 }
10294         }
10295         if (ret)
10296                 btrfs_warn(fs_info,
10297                            "failed to add kobject for block cache, ignoring");
10298 }
10299
10300 static void link_block_group(struct btrfs_block_group_cache *cache)
10301 {
10302         struct btrfs_space_info *space_info = cache->space_info;
10303         struct btrfs_fs_info *fs_info = cache->fs_info;
10304         int index = btrfs_bg_flags_to_raid_index(cache->flags);
10305         bool first = false;
10306
10307         down_write(&space_info->groups_sem);
10308         if (list_empty(&space_info->block_groups[index]))
10309                 first = true;
10310         list_add_tail(&cache->list, &space_info->block_groups[index]);
10311         up_write(&space_info->groups_sem);
10312
10313         if (first) {
10314                 struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
10315                 if (!rkobj) {
10316                         btrfs_warn(cache->fs_info,
10317                                 "couldn't alloc memory for raid level kobject");
10318                         return;
10319                 }
10320                 rkobj->flags = cache->flags;
10321                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
10322
10323                 spin_lock(&fs_info->pending_raid_kobjs_lock);
10324                 list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
10325                 spin_unlock(&fs_info->pending_raid_kobjs_lock);
10326                 space_info->block_group_kobjs[index] = &rkobj->kobj;
10327         }
10328 }
10329
10330 static struct btrfs_block_group_cache *
10331 btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
10332                                u64 start, u64 size)
10333 {
10334         struct btrfs_block_group_cache *cache;
10335
10336         cache = kzalloc(sizeof(*cache), GFP_NOFS);
10337         if (!cache)
10338                 return NULL;
10339
10340         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
10341                                         GFP_NOFS);
10342         if (!cache->free_space_ctl) {
10343                 kfree(cache);
10344                 return NULL;
10345         }
10346
10347         cache->key.objectid = start;
10348         cache->key.offset = size;
10349         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10350
10351         cache->fs_info = fs_info;
10352         cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
10353         set_free_space_tree_thresholds(cache);
10354
10355         atomic_set(&cache->count, 1);
10356         spin_lock_init(&cache->lock);
10357         init_rwsem(&cache->data_rwsem);
10358         INIT_LIST_HEAD(&cache->list);
10359         INIT_LIST_HEAD(&cache->cluster_list);
10360         INIT_LIST_HEAD(&cache->bg_list);
10361         INIT_LIST_HEAD(&cache->ro_list);
10362         INIT_LIST_HEAD(&cache->dirty_list);
10363         INIT_LIST_HEAD(&cache->io_list);
10364         btrfs_init_free_space_ctl(cache);
10365         atomic_set(&cache->trimming, 0);
10366         mutex_init(&cache->free_space_lock);
10367         btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
10368
10369         return cache;
10370 }
10371
10372
10373 /*
10374  * Iterate all chunks and verify that each of them has the corresponding block
10375  * group
10376  */
10377 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
10378 {
10379         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
10380         struct extent_map *em;
10381         struct btrfs_block_group_cache *bg;
10382         u64 start = 0;
10383         int ret = 0;
10384
10385         while (1) {
10386                 read_lock(&map_tree->map_tree.lock);
10387                 /*
10388                  * lookup_extent_mapping will return the first extent map
10389                  * intersecting the range, so setting @len to 1 is enough to
10390                  * get the first chunk.
10391                  */
10392                 em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
10393                 read_unlock(&map_tree->map_tree.lock);
10394                 if (!em)
10395                         break;
10396
10397                 bg = btrfs_lookup_block_group(fs_info, em->start);
10398                 if (!bg) {
10399                         btrfs_err(fs_info,
10400         "chunk start=%llu len=%llu doesn't have corresponding block group",
10401                                      em->start, em->len);
10402                         ret = -EUCLEAN;
10403                         free_extent_map(em);
10404                         break;
10405                 }
10406                 if (bg->key.objectid != em->start ||
10407                     bg->key.offset != em->len ||
10408                     (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
10409                     (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
10410                         btrfs_err(fs_info,
10411 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
10412                                 em->start, em->len,
10413                                 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
10414                                 bg->key.objectid, bg->key.offset,
10415                                 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
10416                         ret = -EUCLEAN;
10417                         free_extent_map(em);
10418                         btrfs_put_block_group(bg);
10419                         break;
10420                 }
10421                 start = em->start + em->len;
10422                 free_extent_map(em);
10423                 btrfs_put_block_group(bg);
10424         }
10425         return ret;
10426 }
10427
10428 int btrfs_read_block_groups(struct btrfs_fs_info *info)
10429 {
10430         struct btrfs_path *path;
10431         int ret;
10432         struct btrfs_block_group_cache *cache;
10433         struct btrfs_space_info *space_info;
10434         struct btrfs_key key;
10435         struct btrfs_key found_key;
10436         struct extent_buffer *leaf;
10437         int need_clear = 0;
10438         u64 cache_gen;
10439         u64 feature;
10440         int mixed;
10441
10442         feature = btrfs_super_incompat_flags(info->super_copy);
10443         mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
10444
10445         key.objectid = 0;
10446         key.offset = 0;
10447         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
10448         path = btrfs_alloc_path();
10449         if (!path)
10450                 return -ENOMEM;
10451         path->reada = READA_FORWARD;
10452
10453         cache_gen = btrfs_super_cache_generation(info->super_copy);
10454         if (btrfs_test_opt(info, SPACE_CACHE) &&
10455             btrfs_super_generation(info->super_copy) != cache_gen)
10456                 need_clear = 1;
10457         if (btrfs_test_opt(info, CLEAR_CACHE))
10458                 need_clear = 1;
10459
10460         while (1) {
10461                 ret = find_first_block_group(info, path, &key);
10462                 if (ret > 0)
10463                         break;
10464                 if (ret != 0)
10465                         goto error;
10466
10467                 leaf = path->nodes[0];
10468                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
10469
10470                 cache = btrfs_create_block_group_cache(info, found_key.objectid,
10471                                                        found_key.offset);
10472                 if (!cache) {
10473                         ret = -ENOMEM;
10474                         goto error;
10475                 }
10476
10477                 if (need_clear) {
10478                         /*
10479                          * When we mount with old space cache, we need to
10480                          * set BTRFS_DC_CLEAR and set dirty flag.
10481                          *
10482                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
10483                          *    truncate the old free space cache inode and
10484                          *    setup a new one.
10485                          * b) Setting 'dirty flag' makes sure that we flush
10486                          *    the new space cache info onto disk.
10487                          */
10488                         if (btrfs_test_opt(info, SPACE_CACHE))
10489                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
10490                 }
10491
10492                 read_extent_buffer(leaf, &cache->item,
10493                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
10494                                    sizeof(cache->item));
10495                 cache->flags = btrfs_block_group_flags(&cache->item);
10496                 if (!mixed &&
10497                     ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
10498                     (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
10499                         btrfs_err(info,
10500 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
10501                                   cache->key.objectid);
10502                         ret = -EINVAL;
10503                         goto error;
10504                 }
10505
10506                 key.objectid = found_key.objectid + found_key.offset;
10507                 btrfs_release_path(path);
10508
10509                 /*
10510                  * We need to exclude the super stripes now so that the space
10511                  * info has super bytes accounted for, otherwise we'll think
10512                  * we have more space than we actually do.
10513                  */
10514                 ret = exclude_super_stripes(cache);
10515                 if (ret) {
10516                         /*
10517                          * We may have excluded something, so call this just in
10518                          * case.
10519                          */
10520                         free_excluded_extents(cache);
10521                         btrfs_put_block_group(cache);
10522                         goto error;
10523                 }
10524
10525                 /*
10526                  * check for two cases, either we are full, and therefore
10527                  * don't need to bother with the caching work since we won't
10528                  * find any space, or we are empty, and we can just add all
10529                  * the space in and be done with it.  This saves us _a_lot_ of
10530                  * time, particularly in the full case.
10531                  */
10532                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
10533                         cache->last_byte_to_unpin = (u64)-1;
10534                         cache->cached = BTRFS_CACHE_FINISHED;
10535                         free_excluded_extents(cache);
10536                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10537                         cache->last_byte_to_unpin = (u64)-1;
10538                         cache->cached = BTRFS_CACHE_FINISHED;
10539                         add_new_free_space(cache, found_key.objectid,
10540                                            found_key.objectid +
10541                                            found_key.offset);
10542                         free_excluded_extents(cache);
10543                 }
10544
10545                 ret = btrfs_add_block_group_cache(info, cache);
10546                 if (ret) {
10547                         btrfs_remove_free_space_cache(cache);
10548                         btrfs_put_block_group(cache);
10549                         goto error;
10550                 }
10551
10552                 trace_btrfs_add_block_group(info, cache, 0);
10553                 update_space_info(info, cache->flags, found_key.offset,
10554                                   btrfs_block_group_used(&cache->item),
10555                                   cache->bytes_super, &space_info);
10556
10557                 cache->space_info = space_info;
10558
10559                 link_block_group(cache);
10560
10561                 set_avail_alloc_bits(info, cache->flags);
10562                 if (btrfs_chunk_readonly(info, cache->key.objectid)) {
10563                         inc_block_group_ro(cache, 1);
10564                 } else if (btrfs_block_group_used(&cache->item) == 0) {
10565                         ASSERT(list_empty(&cache->bg_list));
10566                         btrfs_mark_bg_unused(cache);
10567                 }
10568         }
10569
10570         list_for_each_entry_rcu(space_info, &info->space_info, list) {
10571                 if (!(get_alloc_profile(info, space_info->flags) &
10572                       (BTRFS_BLOCK_GROUP_RAID10 |
10573                        BTRFS_BLOCK_GROUP_RAID1 |
10574                        BTRFS_BLOCK_GROUP_RAID5 |
10575                        BTRFS_BLOCK_GROUP_RAID6 |
10576                        BTRFS_BLOCK_GROUP_DUP)))
10577                         continue;
10578                 /*
10579                  * avoid allocating from un-mirrored block group if there are
10580                  * mirrored block groups.
10581                  */
10582                 list_for_each_entry(cache,
10583                                 &space_info->block_groups[BTRFS_RAID_RAID0],
10584                                 list)
10585                         inc_block_group_ro(cache, 1);
10586                 list_for_each_entry(cache,
10587                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
10588                                 list)
10589                         inc_block_group_ro(cache, 1);
10590         }
10591
10592         btrfs_add_raid_kobjects(info);
10593         init_global_block_rsv(info);
10594         ret = check_chunk_block_group_mappings(info);
10595 error:
10596         btrfs_free_path(path);
10597         return ret;
10598 }
10599
10600 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
10601 {
10602         struct btrfs_fs_info *fs_info = trans->fs_info;
10603         struct btrfs_block_group_cache *block_group;
10604         struct btrfs_root *extent_root = fs_info->extent_root;
10605         struct btrfs_block_group_item item;
10606         struct btrfs_key key;
10607         int ret = 0;
10608
10609         if (!trans->can_flush_pending_bgs)
10610                 return;
10611
10612         while (!list_empty(&trans->new_bgs)) {
10613                 block_group = list_first_entry(&trans->new_bgs,
10614                                                struct btrfs_block_group_cache,
10615                                                bg_list);
10616                 if (ret)
10617                         goto next;
10618
10619                 spin_lock(&block_group->lock);
10620                 memcpy(&item, &block_group->item, sizeof(item));
10621                 memcpy(&key, &block_group->key, sizeof(key));
10622                 spin_unlock(&block_group->lock);
10623
10624                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
10625                                         sizeof(item));
10626                 if (ret)
10627                         btrfs_abort_transaction(trans, ret);
10628                 ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
10629                 if (ret)
10630                         btrfs_abort_transaction(trans, ret);
10631                 add_block_group_free_space(trans, block_group);
10632                 /* already aborted the transaction if it failed. */
10633 next:
10634                 btrfs_delayed_refs_rsv_release(fs_info, 1);
10635                 list_del_init(&block_group->bg_list);
10636         }
10637         btrfs_trans_release_chunk_metadata(trans);
10638 }
10639
10640 int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
10641                            u64 type, u64 chunk_offset, u64 size)
10642 {
10643         struct btrfs_fs_info *fs_info = trans->fs_info;
10644         struct btrfs_block_group_cache *cache;
10645         int ret;
10646
10647         btrfs_set_log_full_commit(fs_info, trans);
10648
10649         cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
10650         if (!cache)
10651                 return -ENOMEM;
10652
10653         btrfs_set_block_group_used(&cache->item, bytes_used);
10654         btrfs_set_block_group_chunk_objectid(&cache->item,
10655                                              BTRFS_FIRST_CHUNK_TREE_OBJECTID);
10656         btrfs_set_block_group_flags(&cache->item, type);
10657
10658         cache->flags = type;
10659         cache->last_byte_to_unpin = (u64)-1;
10660         cache->cached = BTRFS_CACHE_FINISHED;
10661         cache->needs_free_space = 1;
10662         ret = exclude_super_stripes(cache);
10663         if (ret) {
10664                 /*
10665                  * We may have excluded something, so call this just in
10666                  * case.
10667                  */
10668                 free_excluded_extents(cache);
10669                 btrfs_put_block_group(cache);
10670                 return ret;
10671         }
10672
10673         add_new_free_space(cache, chunk_offset, chunk_offset + size);
10674
10675         free_excluded_extents(cache);
10676
10677 #ifdef CONFIG_BTRFS_DEBUG
10678         if (btrfs_should_fragment_free_space(cache)) {
10679                 u64 new_bytes_used = size - bytes_used;
10680
10681                 bytes_used += new_bytes_used >> 1;
10682                 fragment_free_space(cache);
10683         }
10684 #endif
10685         /*
10686          * Ensure the corresponding space_info object is created and
10687          * assigned to our block group. We want our bg to be added to the rbtree
10688          * with its ->space_info set.
10689          */
10690         cache->space_info = __find_space_info(fs_info, cache->flags);
10691         ASSERT(cache->space_info);
10692
10693         ret = btrfs_add_block_group_cache(fs_info, cache);
10694         if (ret) {
10695                 btrfs_remove_free_space_cache(cache);
10696                 btrfs_put_block_group(cache);
10697                 return ret;
10698         }
10699
10700         /*
10701          * Now that our block group has its ->space_info set and is inserted in
10702          * the rbtree, update the space info's counters.
10703          */
10704         trace_btrfs_add_block_group(fs_info, cache, 1);
10705         update_space_info(fs_info, cache->flags, size, bytes_used,
10706                                 cache->bytes_super, &cache->space_info);
10707         update_global_block_rsv(fs_info);
10708
10709         link_block_group(cache);
10710
10711         list_add_tail(&cache->bg_list, &trans->new_bgs);
10712         trans->delayed_ref_updates++;
10713         btrfs_update_delayed_refs_rsv(trans);
10714
10715         set_avail_alloc_bits(fs_info, type);
10716         return 0;
10717 }
10718
10719 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10720 {
10721         u64 extra_flags = chunk_to_extended(flags) &
10722                                 BTRFS_EXTENDED_PROFILE_MASK;
10723
10724         write_seqlock(&fs_info->profiles_lock);
10725         if (flags & BTRFS_BLOCK_GROUP_DATA)
10726                 fs_info->avail_data_alloc_bits &= ~extra_flags;
10727         if (flags & BTRFS_BLOCK_GROUP_METADATA)
10728                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10729         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10730                 fs_info->avail_system_alloc_bits &= ~extra_flags;
10731         write_sequnlock(&fs_info->profiles_lock);
10732 }
10733
10734 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10735                              u64 group_start, struct extent_map *em)
10736 {
10737         struct btrfs_fs_info *fs_info = trans->fs_info;
10738         struct btrfs_root *root = fs_info->extent_root;
10739         struct btrfs_path *path;
10740         struct btrfs_block_group_cache *block_group;
10741         struct btrfs_free_cluster *cluster;
10742         struct btrfs_root *tree_root = fs_info->tree_root;
10743         struct btrfs_key key;
10744         struct inode *inode;
10745         struct kobject *kobj = NULL;
10746         int ret;
10747         int index;
10748         int factor;
10749         struct btrfs_caching_control *caching_ctl = NULL;
10750         bool remove_em;
10751         bool remove_rsv = false;
10752
10753         block_group = btrfs_lookup_block_group(fs_info, group_start);
10754         BUG_ON(!block_group);
10755         BUG_ON(!block_group->ro);
10756
10757         trace_btrfs_remove_block_group(block_group);
10758         /*
10759          * Free the reserved super bytes from this block group before
10760          * remove it.
10761          */
10762         free_excluded_extents(block_group);
10763         btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
10764                                   block_group->key.offset);
10765
10766         memcpy(&key, &block_group->key, sizeof(key));
10767         index = btrfs_bg_flags_to_raid_index(block_group->flags);
10768         factor = btrfs_bg_type_to_factor(block_group->flags);
10769
10770         /* make sure this block group isn't part of an allocation cluster */
10771         cluster = &fs_info->data_alloc_cluster;
10772         spin_lock(&cluster->refill_lock);
10773         btrfs_return_cluster_to_free_space(block_group, cluster);
10774         spin_unlock(&cluster->refill_lock);
10775
10776         /*
10777          * make sure this block group isn't part of a metadata
10778          * allocation cluster
10779          */
10780         cluster = &fs_info->meta_alloc_cluster;
10781         spin_lock(&cluster->refill_lock);
10782         btrfs_return_cluster_to_free_space(block_group, cluster);
10783         spin_unlock(&cluster->refill_lock);
10784
10785         path = btrfs_alloc_path();
10786         if (!path) {
10787                 ret = -ENOMEM;
10788                 goto out;
10789         }
10790
10791         /*
10792          * get the inode first so any iput calls done for the io_list
10793          * aren't the final iput (no unlinks allowed now)
10794          */
10795         inode = lookup_free_space_inode(fs_info, block_group, path);
10796
10797         mutex_lock(&trans->transaction->cache_write_mutex);
10798         /*
10799          * Make sure our free space cache IO is done before removing the
10800          * free space inode
10801          */
10802         spin_lock(&trans->transaction->dirty_bgs_lock);
10803         if (!list_empty(&block_group->io_list)) {
10804                 list_del_init(&block_group->io_list);
10805
10806                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10807
10808                 spin_unlock(&trans->transaction->dirty_bgs_lock);
10809                 btrfs_wait_cache_io(trans, block_group, path);
10810                 btrfs_put_block_group(block_group);
10811                 spin_lock(&trans->transaction->dirty_bgs_lock);
10812         }
10813
10814         if (!list_empty(&block_group->dirty_list)) {
10815                 list_del_init(&block_group->dirty_list);
10816                 remove_rsv = true;
10817                 btrfs_put_block_group(block_group);
10818         }
10819         spin_unlock(&trans->transaction->dirty_bgs_lock);
10820         mutex_unlock(&trans->transaction->cache_write_mutex);
10821
10822         if (!IS_ERR(inode)) {
10823                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
10824                 if (ret) {
10825                         btrfs_add_delayed_iput(inode);
10826                         goto out;
10827                 }
10828                 clear_nlink(inode);
10829                 /* One for the block groups ref */
10830                 spin_lock(&block_group->lock);
10831                 if (block_group->iref) {
10832                         block_group->iref = 0;
10833                         block_group->inode = NULL;
10834                         spin_unlock(&block_group->lock);
10835                         iput(inode);
10836                 } else {
10837                         spin_unlock(&block_group->lock);
10838                 }
10839                 /* One for our lookup ref */
10840                 btrfs_add_delayed_iput(inode);
10841         }
10842
10843         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10844         key.offset = block_group->key.objectid;
10845         key.type = 0;
10846
10847         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10848         if (ret < 0)
10849                 goto out;
10850         if (ret > 0)
10851                 btrfs_release_path(path);
10852         if (ret == 0) {
10853                 ret = btrfs_del_item(trans, tree_root, path);
10854                 if (ret)
10855                         goto out;
10856                 btrfs_release_path(path);
10857         }
10858
10859         spin_lock(&fs_info->block_group_cache_lock);
10860         rb_erase(&block_group->cache_node,
10861                  &fs_info->block_group_cache_tree);
10862         RB_CLEAR_NODE(&block_group->cache_node);
10863
10864         if (fs_info->first_logical_byte == block_group->key.objectid)
10865                 fs_info->first_logical_byte = (u64)-1;
10866         spin_unlock(&fs_info->block_group_cache_lock);
10867
10868         down_write(&block_group->space_info->groups_sem);
10869         /*
10870          * we must use list_del_init so people can check to see if they
10871          * are still on the list after taking the semaphore
10872          */
10873         list_del_init(&block_group->list);
10874         if (list_empty(&block_group->space_info->block_groups[index])) {
10875                 kobj = block_group->space_info->block_group_kobjs[index];
10876                 block_group->space_info->block_group_kobjs[index] = NULL;
10877                 clear_avail_alloc_bits(fs_info, block_group->flags);
10878         }
10879         up_write(&block_group->space_info->groups_sem);
10880         if (kobj) {
10881                 kobject_del(kobj);
10882                 kobject_put(kobj);
10883         }
10884
10885         if (block_group->has_caching_ctl)
10886                 caching_ctl = get_caching_control(block_group);
10887         if (block_group->cached == BTRFS_CACHE_STARTED)
10888                 wait_block_group_cache_done(block_group);
10889         if (block_group->has_caching_ctl) {
10890                 down_write(&fs_info->commit_root_sem);
10891                 if (!caching_ctl) {
10892                         struct btrfs_caching_control *ctl;
10893
10894                         list_for_each_entry(ctl,
10895                                     &fs_info->caching_block_groups, list)
10896                                 if (ctl->block_group == block_group) {
10897                                         caching_ctl = ctl;
10898                                         refcount_inc(&caching_ctl->count);
10899                                         break;
10900                                 }
10901                 }
10902                 if (caching_ctl)
10903                         list_del_init(&caching_ctl->list);
10904                 up_write(&fs_info->commit_root_sem);
10905                 if (caching_ctl) {
10906                         /* Once for the caching bgs list and once for us. */
10907                         put_caching_control(caching_ctl);
10908                         put_caching_control(caching_ctl);
10909                 }
10910         }
10911
10912         spin_lock(&trans->transaction->dirty_bgs_lock);
10913         WARN_ON(!list_empty(&block_group->dirty_list));
10914         WARN_ON(!list_empty(&block_group->io_list));
10915         spin_unlock(&trans->transaction->dirty_bgs_lock);
10916
10917         btrfs_remove_free_space_cache(block_group);
10918
10919         spin_lock(&block_group->space_info->lock);
10920         list_del_init(&block_group->ro_list);
10921
10922         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
10923                 WARN_ON(block_group->space_info->total_bytes
10924                         < block_group->key.offset);
10925                 WARN_ON(block_group->space_info->bytes_readonly
10926                         < block_group->key.offset);
10927                 WARN_ON(block_group->space_info->disk_total
10928                         < block_group->key.offset * factor);
10929         }
10930         block_group->space_info->total_bytes -= block_group->key.offset;
10931         block_group->space_info->bytes_readonly -= block_group->key.offset;
10932         block_group->space_info->disk_total -= block_group->key.offset * factor;
10933
10934         spin_unlock(&block_group->space_info->lock);
10935
10936         memcpy(&key, &block_group->key, sizeof(key));
10937
10938         mutex_lock(&fs_info->chunk_mutex);
10939         spin_lock(&block_group->lock);
10940         block_group->removed = 1;
10941         /*
10942          * At this point trimming can't start on this block group, because we
10943          * removed the block group from the tree fs_info->block_group_cache_tree
10944          * so no one can't find it anymore and even if someone already got this
10945          * block group before we removed it from the rbtree, they have already
10946          * incremented block_group->trimming - if they didn't, they won't find
10947          * any free space entries because we already removed them all when we
10948          * called btrfs_remove_free_space_cache().
10949          *
10950          * And we must not remove the extent map from the fs_info->mapping_tree
10951          * to prevent the same logical address range and physical device space
10952          * ranges from being reused for a new block group. This is because our
10953          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10954          * completely transactionless, so while it is trimming a range the
10955          * currently running transaction might finish and a new one start,
10956          * allowing for new block groups to be created that can reuse the same
10957          * physical device locations unless we take this special care.
10958          *
10959          * There may also be an implicit trim operation if the file system
10960          * is mounted with -odiscard. The same protections must remain
10961          * in place until the extents have been discarded completely when
10962          * the transaction commit has completed.
10963          */
10964         remove_em = (atomic_read(&block_group->trimming) == 0);
10965         spin_unlock(&block_group->lock);
10966
10967         if (remove_em) {
10968                 struct extent_map_tree *em_tree;
10969
10970                 em_tree = &fs_info->mapping_tree.map_tree;
10971                 write_lock(&em_tree->lock);
10972                 remove_extent_mapping(em_tree, em);
10973                 write_unlock(&em_tree->lock);
10974                 /* once for the tree */
10975                 free_extent_map(em);
10976         }
10977
10978         mutex_unlock(&fs_info->chunk_mutex);
10979
10980         ret = remove_block_group_free_space(trans, block_group);
10981         if (ret)
10982                 goto out;
10983
10984         btrfs_put_block_group(block_group);
10985         btrfs_put_block_group(block_group);
10986
10987         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10988         if (ret > 0)
10989                 ret = -EIO;
10990         if (ret < 0)
10991                 goto out;
10992
10993         ret = btrfs_del_item(trans, root, path);
10994 out:
10995         if (remove_rsv)
10996                 btrfs_delayed_refs_rsv_release(fs_info, 1);
10997         btrfs_free_path(path);
10998         return ret;
10999 }
11000
11001 struct btrfs_trans_handle *
11002 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
11003                                      const u64 chunk_offset)
11004 {
11005         struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
11006         struct extent_map *em;
11007         struct map_lookup *map;
11008         unsigned int num_items;
11009
11010         read_lock(&em_tree->lock);
11011         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
11012         read_unlock(&em_tree->lock);
11013         ASSERT(em && em->start == chunk_offset);
11014
11015         /*
11016          * We need to reserve 3 + N units from the metadata space info in order
11017          * to remove a block group (done at btrfs_remove_chunk() and at
11018          * btrfs_remove_block_group()), which are used for:
11019          *
11020          * 1 unit for adding the free space inode's orphan (located in the tree
11021          * of tree roots).
11022          * 1 unit for deleting the block group item (located in the extent
11023          * tree).
11024          * 1 unit for deleting the free space item (located in tree of tree
11025          * roots).
11026          * N units for deleting N device extent items corresponding to each
11027          * stripe (located in the device tree).
11028          *
11029          * In order to remove a block group we also need to reserve units in the
11030          * system space info in order to update the chunk tree (update one or
11031          * more device items and remove one chunk item), but this is done at
11032          * btrfs_remove_chunk() through a call to check_system_chunk().
11033          */
11034         map = em->map_lookup;
11035         num_items = 3 + map->num_stripes;
11036         free_extent_map(em);
11037
11038         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
11039                                                            num_items, 1);
11040 }
11041
11042 /*
11043  * Process the unused_bgs list and remove any that don't have any allocated
11044  * space inside of them.
11045  */
11046 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
11047 {
11048         struct btrfs_block_group_cache *block_group;
11049         struct btrfs_space_info *space_info;
11050         struct btrfs_trans_handle *trans;
11051         int ret = 0;
11052
11053         if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
11054                 return;
11055
11056         spin_lock(&fs_info->unused_bgs_lock);
11057         while (!list_empty(&fs_info->unused_bgs)) {
11058                 u64 start, end;
11059                 int trimming;
11060
11061                 block_group = list_first_entry(&fs_info->unused_bgs,
11062                                                struct btrfs_block_group_cache,
11063                                                bg_list);
11064                 list_del_init(&block_group->bg_list);
11065
11066                 space_info = block_group->space_info;
11067
11068                 if (ret || btrfs_mixed_space_info(space_info)) {
11069                         btrfs_put_block_group(block_group);
11070                         continue;
11071                 }
11072                 spin_unlock(&fs_info->unused_bgs_lock);
11073
11074                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
11075
11076                 /* Don't want to race with allocators so take the groups_sem */
11077                 down_write(&space_info->groups_sem);
11078                 spin_lock(&block_group->lock);
11079                 if (block_group->reserved || block_group->pinned ||
11080                     btrfs_block_group_used(&block_group->item) ||
11081                     block_group->ro ||
11082                     list_is_singular(&block_group->list)) {
11083                         /*
11084                          * We want to bail if we made new allocations or have
11085                          * outstanding allocations in this block group.  We do
11086                          * the ro check in case balance is currently acting on
11087                          * this block group.
11088                          */
11089                         trace_btrfs_skip_unused_block_group(block_group);
11090                         spin_unlock(&block_group->lock);
11091                         up_write(&space_info->groups_sem);
11092                         goto next;
11093                 }
11094                 spin_unlock(&block_group->lock);
11095
11096                 /* We don't want to force the issue, only flip if it's ok. */
11097                 ret = inc_block_group_ro(block_group, 0);
11098                 up_write(&space_info->groups_sem);
11099                 if (ret < 0) {
11100                         ret = 0;
11101                         goto next;
11102                 }
11103
11104                 /*
11105                  * Want to do this before we do anything else so we can recover
11106                  * properly if we fail to join the transaction.
11107                  */
11108                 trans = btrfs_start_trans_remove_block_group(fs_info,
11109                                                      block_group->key.objectid);
11110                 if (IS_ERR(trans)) {
11111                         btrfs_dec_block_group_ro(block_group);
11112                         ret = PTR_ERR(trans);
11113                         goto next;
11114                 }
11115
11116                 /*
11117                  * We could have pending pinned extents for this block group,
11118                  * just delete them, we don't care about them anymore.
11119                  */
11120                 start = block_group->key.objectid;
11121                 end = start + block_group->key.offset - 1;
11122                 /*
11123                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
11124                  * btrfs_finish_extent_commit(). If we are at transaction N,
11125                  * another task might be running finish_extent_commit() for the
11126                  * previous transaction N - 1, and have seen a range belonging
11127                  * to the block group in freed_extents[] before we were able to
11128                  * clear the whole block group range from freed_extents[]. This
11129                  * means that task can lookup for the block group after we
11130                  * unpinned it from freed_extents[] and removed it, leading to
11131                  * a BUG_ON() at btrfs_unpin_extent_range().
11132                  */
11133                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
11134                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
11135                                   EXTENT_DIRTY);
11136                 if (ret) {
11137                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
11138                         btrfs_dec_block_group_ro(block_group);
11139                         goto end_trans;
11140                 }
11141                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
11142                                   EXTENT_DIRTY);
11143                 if (ret) {
11144                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
11145                         btrfs_dec_block_group_ro(block_group);
11146                         goto end_trans;
11147                 }
11148                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
11149
11150                 /* Reset pinned so btrfs_put_block_group doesn't complain */
11151                 spin_lock(&space_info->lock);
11152                 spin_lock(&block_group->lock);
11153
11154                 update_bytes_pinned(space_info, -block_group->pinned);
11155                 space_info->bytes_readonly += block_group->pinned;
11156                 percpu_counter_add_batch(&space_info->total_bytes_pinned,
11157                                    -block_group->pinned,
11158                                    BTRFS_TOTAL_BYTES_PINNED_BATCH);
11159                 block_group->pinned = 0;
11160
11161                 spin_unlock(&block_group->lock);
11162                 spin_unlock(&space_info->lock);
11163
11164                 /* DISCARD can flip during remount */
11165                 trimming = btrfs_test_opt(fs_info, DISCARD);
11166
11167                 /* Implicit trim during transaction commit. */
11168                 if (trimming)
11169                         btrfs_get_block_group_trimming(block_group);
11170
11171                 /*
11172                  * Btrfs_remove_chunk will abort the transaction if things go
11173                  * horribly wrong.
11174                  */
11175                 ret = btrfs_remove_chunk(trans, block_group->key.objectid);
11176
11177                 if (ret) {
11178                         if (trimming)
11179                                 btrfs_put_block_group_trimming(block_group);
11180                         goto end_trans;
11181                 }
11182
11183                 /*
11184                  * If we're not mounted with -odiscard, we can just forget
11185                  * about this block group. Otherwise we'll need to wait
11186                  * until transaction commit to do the actual discard.
11187                  */
11188                 if (trimming) {
11189                         spin_lock(&fs_info->unused_bgs_lock);
11190                         /*
11191                          * A concurrent scrub might have added us to the list
11192                          * fs_info->unused_bgs, so use a list_move operation
11193                          * to add the block group to the deleted_bgs list.
11194                          */
11195                         list_move(&block_group->bg_list,
11196                                   &trans->transaction->deleted_bgs);
11197                         spin_unlock(&fs_info->unused_bgs_lock);
11198                         btrfs_get_block_group(block_group);
11199                 }
11200 end_trans:
11201                 btrfs_end_transaction(trans);
11202 next:
11203                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
11204                 btrfs_put_block_group(block_group);
11205                 spin_lock(&fs_info->unused_bgs_lock);
11206         }
11207         spin_unlock(&fs_info->unused_bgs_lock);
11208 }
11209
11210 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
11211 {
11212         struct btrfs_super_block *disk_super;
11213         u64 features;
11214         u64 flags;
11215         int mixed = 0;
11216         int ret;
11217
11218         disk_super = fs_info->super_copy;
11219         if (!btrfs_super_root(disk_super))
11220                 return -EINVAL;
11221
11222         features = btrfs_super_incompat_flags(disk_super);
11223         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
11224                 mixed = 1;
11225
11226         flags = BTRFS_BLOCK_GROUP_SYSTEM;
11227         ret = create_space_info(fs_info, flags);
11228         if (ret)
11229                 goto out;
11230
11231         if (mixed) {
11232                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
11233                 ret = create_space_info(fs_info, flags);
11234         } else {
11235                 flags = BTRFS_BLOCK_GROUP_METADATA;
11236                 ret = create_space_info(fs_info, flags);
11237                 if (ret)
11238                         goto out;
11239
11240                 flags = BTRFS_BLOCK_GROUP_DATA;
11241                 ret = create_space_info(fs_info, flags);
11242         }
11243 out:
11244         return ret;
11245 }
11246
11247 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
11248                                    u64 start, u64 end)
11249 {
11250         return unpin_extent_range(fs_info, start, end, false);
11251 }
11252
11253 /*
11254  * It used to be that old block groups would be left around forever.
11255  * Iterating over them would be enough to trim unused space.  Since we
11256  * now automatically remove them, we also need to iterate over unallocated
11257  * space.
11258  *
11259  * We don't want a transaction for this since the discard may take a
11260  * substantial amount of time.  We don't require that a transaction be
11261  * running, but we do need to take a running transaction into account
11262  * to ensure that we're not discarding chunks that were released or
11263  * allocated in the current transaction.
11264  *
11265  * Holding the chunks lock will prevent other threads from allocating
11266  * or releasing chunks, but it won't prevent a running transaction
11267  * from committing and releasing the memory that the pending chunks
11268  * list head uses.  For that, we need to take a reference to the
11269  * transaction and hold the commit root sem.  We only need to hold
11270  * it while performing the free space search since we have already
11271  * held back allocations.
11272  */
11273 static int btrfs_trim_free_extents(struct btrfs_device *device,
11274                                    struct fstrim_range *range, u64 *trimmed)
11275 {
11276         u64 start = range->start, len = 0;
11277         int ret;
11278
11279         *trimmed = 0;
11280
11281         /* Discard not supported = nothing to do. */
11282         if (!blk_queue_discard(bdev_get_queue(device->bdev)))
11283                 return 0;
11284
11285         /* Not writable = nothing to do. */
11286         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
11287                 return 0;
11288
11289         /* No free space = nothing to do. */
11290         if (device->total_bytes <= device->bytes_used)
11291                 return 0;
11292
11293         ret = 0;
11294
11295         while (1) {
11296                 struct btrfs_fs_info *fs_info = device->fs_info;
11297                 u64 bytes;
11298
11299                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
11300                 if (ret)
11301                         break;
11302
11303                 ret = find_free_dev_extent_start(device, range->minlen, start,
11304                                                  &start, &len);
11305
11306                 if (ret) {
11307                         mutex_unlock(&fs_info->chunk_mutex);
11308                         if (ret == -ENOSPC)
11309                                 ret = 0;
11310                         break;
11311                 }
11312
11313                 /* If we are out of the passed range break */
11314                 if (start > range->start + range->len - 1) {
11315                         mutex_unlock(&fs_info->chunk_mutex);
11316                         ret = 0;
11317                         break;
11318                 }
11319
11320                 start = max(range->start, start);
11321                 len = min(range->len, len);
11322
11323                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
11324                 mutex_unlock(&fs_info->chunk_mutex);
11325
11326                 if (ret)
11327                         break;
11328
11329                 start += len;
11330                 *trimmed += bytes;
11331
11332                 /* We've trimmed enough */
11333                 if (*trimmed >= range->len)
11334                         break;
11335
11336                 if (fatal_signal_pending(current)) {
11337                         ret = -ERESTARTSYS;
11338                         break;
11339                 }
11340
11341                 cond_resched();
11342         }
11343
11344         return ret;
11345 }
11346
11347 /*
11348  * Trim the whole filesystem by:
11349  * 1) trimming the free space in each block group
11350  * 2) trimming the unallocated space on each device
11351  *
11352  * This will also continue trimming even if a block group or device encounters
11353  * an error.  The return value will be the last error, or 0 if nothing bad
11354  * happens.
11355  */
11356 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
11357 {
11358         struct btrfs_block_group_cache *cache = NULL;
11359         struct btrfs_device *device;
11360         struct list_head *devices;
11361         u64 group_trimmed;
11362         u64 start;
11363         u64 end;
11364         u64 trimmed = 0;
11365         u64 bg_failed = 0;
11366         u64 dev_failed = 0;
11367         int bg_ret = 0;
11368         int dev_ret = 0;
11369         int ret = 0;
11370
11371         cache = btrfs_lookup_first_block_group(fs_info, range->start);
11372         for (; cache; cache = next_block_group(fs_info, cache)) {
11373                 if (cache->key.objectid >= (range->start + range->len)) {
11374                         btrfs_put_block_group(cache);
11375                         break;
11376                 }
11377
11378                 start = max(range->start, cache->key.objectid);
11379                 end = min(range->start + range->len,
11380                                 cache->key.objectid + cache->key.offset);
11381
11382                 if (end - start >= range->minlen) {
11383                         if (!block_group_cache_done(cache)) {
11384                                 ret = cache_block_group(cache, 0);
11385                                 if (ret) {
11386                                         bg_failed++;
11387                                         bg_ret = ret;
11388                                         continue;
11389                                 }
11390                                 ret = wait_block_group_cache_done(cache);
11391                                 if (ret) {
11392                                         bg_failed++;
11393                                         bg_ret = ret;
11394                                         continue;
11395                                 }
11396                         }
11397                         ret = btrfs_trim_block_group(cache,
11398                                                      &group_trimmed,
11399                                                      start,
11400                                                      end,
11401                                                      range->minlen);
11402
11403                         trimmed += group_trimmed;
11404                         if (ret) {
11405                                 bg_failed++;
11406                                 bg_ret = ret;
11407                                 continue;
11408                         }
11409                 }
11410         }
11411
11412         if (bg_failed)
11413                 btrfs_warn(fs_info,
11414                         "failed to trim %llu block group(s), last error %d",
11415                         bg_failed, bg_ret);
11416         mutex_lock(&fs_info->fs_devices->device_list_mutex);
11417         devices = &fs_info->fs_devices->devices;
11418         list_for_each_entry(device, devices, dev_list) {
11419                 ret = btrfs_trim_free_extents(device, range, &group_trimmed);
11420                 if (ret) {
11421                         dev_failed++;
11422                         dev_ret = ret;
11423                         break;
11424                 }
11425
11426                 trimmed += group_trimmed;
11427         }
11428         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
11429
11430         if (dev_failed)
11431                 btrfs_warn(fs_info,
11432                         "failed to trim %llu device(s), last error %d",
11433                         dev_failed, dev_ret);
11434         range->len = trimmed;
11435         if (bg_ret)
11436                 return bg_ret;
11437         return dev_ret;
11438 }
11439
11440 /*
11441  * btrfs_{start,end}_write_no_snapshotting() are similar to
11442  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
11443  * data into the page cache through nocow before the subvolume is snapshoted,
11444  * but flush the data into disk after the snapshot creation, or to prevent
11445  * operations while snapshotting is ongoing and that cause the snapshot to be
11446  * inconsistent (writes followed by expanding truncates for example).
11447  */
11448 void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
11449 {
11450         percpu_counter_dec(&root->subv_writers->counter);
11451         cond_wake_up(&root->subv_writers->wait);
11452 }
11453
11454 int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
11455 {
11456         if (atomic_read(&root->will_be_snapshotted))
11457                 return 0;
11458
11459         percpu_counter_inc(&root->subv_writers->counter);
11460         /*
11461          * Make sure counter is updated before we check for snapshot creation.
11462          */
11463         smp_mb();
11464         if (atomic_read(&root->will_be_snapshotted)) {
11465                 btrfs_end_write_no_snapshotting(root);
11466                 return 0;
11467         }
11468         return 1;
11469 }
11470
11471 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
11472 {
11473         while (true) {
11474                 int ret;
11475
11476                 ret = btrfs_start_write_no_snapshotting(root);
11477                 if (ret)
11478                         break;
11479                 wait_var_event(&root->will_be_snapshotted,
11480                                !atomic_read(&root->will_be_snapshotted));
11481         }
11482 }
11483
11484 void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
11485 {
11486         struct btrfs_fs_info *fs_info = bg->fs_info;
11487
11488         spin_lock(&fs_info->unused_bgs_lock);
11489         if (list_empty(&bg->bg_list)) {
11490                 btrfs_get_block_group(bg);
11491                 trace_btrfs_add_unused_block_group(bg);
11492                 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
11493         }
11494         spin_unlock(&fs_info->unused_bgs_lock);
11495 }