Merge branch 'upstream-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarz...
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include "compat.h"
25 #include "hash.h"
26 #include "crc32c.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "ref-cache.h"
34 #include "free-space-cache.h"
35
36 #define PENDING_EXTENT_INSERT 0
37 #define PENDING_EXTENT_DELETE 1
38 #define PENDING_BACKREF_UPDATE 2
39
40 struct pending_extent_op {
41         int type;
42         u64 bytenr;
43         u64 num_bytes;
44         u64 parent;
45         u64 orig_parent;
46         u64 generation;
47         u64 orig_generation;
48         int level;
49         struct list_head list;
50         int del;
51 };
52
53 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
54                                          struct btrfs_root *root, u64 parent,
55                                          u64 root_objectid, u64 ref_generation,
56                                          u64 owner, struct btrfs_key *ins,
57                                          int ref_mod);
58 static int update_reserved_extents(struct btrfs_root *root,
59                                    u64 bytenr, u64 num, int reserve);
60 static int update_block_group(struct btrfs_trans_handle *trans,
61                               struct btrfs_root *root,
62                               u64 bytenr, u64 num_bytes, int alloc,
63                               int mark_free);
64 static noinline int __btrfs_free_extent(struct btrfs_trans_handle *trans,
65                                         struct btrfs_root *root,
66                                         u64 bytenr, u64 num_bytes, u64 parent,
67                                         u64 root_objectid, u64 ref_generation,
68                                         u64 owner_objectid, int pin,
69                                         int ref_to_drop);
70
71 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
72                           struct btrfs_root *extent_root, u64 alloc_bytes,
73                           u64 flags, int force);
74
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76 {
77         return (cache->flags & bits) == bits;
78 }
79
80 /*
81  * this adds the block group to the fs_info rb tree for the block group
82  * cache
83  */
84 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
85                                 struct btrfs_block_group_cache *block_group)
86 {
87         struct rb_node **p;
88         struct rb_node *parent = NULL;
89         struct btrfs_block_group_cache *cache;
90
91         spin_lock(&info->block_group_cache_lock);
92         p = &info->block_group_cache_tree.rb_node;
93
94         while (*p) {
95                 parent = *p;
96                 cache = rb_entry(parent, struct btrfs_block_group_cache,
97                                  cache_node);
98                 if (block_group->key.objectid < cache->key.objectid) {
99                         p = &(*p)->rb_left;
100                 } else if (block_group->key.objectid > cache->key.objectid) {
101                         p = &(*p)->rb_right;
102                 } else {
103                         spin_unlock(&info->block_group_cache_lock);
104                         return -EEXIST;
105                 }
106         }
107
108         rb_link_node(&block_group->cache_node, parent, p);
109         rb_insert_color(&block_group->cache_node,
110                         &info->block_group_cache_tree);
111         spin_unlock(&info->block_group_cache_lock);
112
113         return 0;
114 }
115
116 /*
117  * This will return the block group at or after bytenr if contains is 0, else
118  * it will return the block group that contains the bytenr
119  */
120 static struct btrfs_block_group_cache *
121 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
122                               int contains)
123 {
124         struct btrfs_block_group_cache *cache, *ret = NULL;
125         struct rb_node *n;
126         u64 end, start;
127
128         spin_lock(&info->block_group_cache_lock);
129         n = info->block_group_cache_tree.rb_node;
130
131         while (n) {
132                 cache = rb_entry(n, struct btrfs_block_group_cache,
133                                  cache_node);
134                 end = cache->key.objectid + cache->key.offset - 1;
135                 start = cache->key.objectid;
136
137                 if (bytenr < start) {
138                         if (!contains && (!ret || start < ret->key.objectid))
139                                 ret = cache;
140                         n = n->rb_left;
141                 } else if (bytenr > start) {
142                         if (contains && bytenr <= end) {
143                                 ret = cache;
144                                 break;
145                         }
146                         n = n->rb_right;
147                 } else {
148                         ret = cache;
149                         break;
150                 }
151         }
152         if (ret)
153                 atomic_inc(&ret->count);
154         spin_unlock(&info->block_group_cache_lock);
155
156         return ret;
157 }
158
159 /*
160  * this is only called by cache_block_group, since we could have freed extents
161  * we need to check the pinned_extents for any extents that can't be used yet
162  * since their free space will be released as soon as the transaction commits.
163  */
164 static int add_new_free_space(struct btrfs_block_group_cache *block_group,
165                               struct btrfs_fs_info *info, u64 start, u64 end)
166 {
167         u64 extent_start, extent_end, size;
168         int ret;
169
170         while (start < end) {
171                 ret = find_first_extent_bit(&info->pinned_extents, start,
172                                             &extent_start, &extent_end,
173                                             EXTENT_DIRTY);
174                 if (ret)
175                         break;
176
177                 if (extent_start == start) {
178                         start = extent_end + 1;
179                 } else if (extent_start > start && extent_start < end) {
180                         size = extent_start - start;
181                         ret = btrfs_add_free_space(block_group, start,
182                                                    size);
183                         BUG_ON(ret);
184                         start = extent_end + 1;
185                 } else {
186                         break;
187                 }
188         }
189
190         if (start < end) {
191                 size = end - start;
192                 ret = btrfs_add_free_space(block_group, start, size);
193                 BUG_ON(ret);
194         }
195
196         return 0;
197 }
198
199 static int remove_sb_from_cache(struct btrfs_root *root,
200                                 struct btrfs_block_group_cache *cache)
201 {
202         u64 bytenr;
203         u64 *logical;
204         int stripe_len;
205         int i, nr, ret;
206
207         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
208                 bytenr = btrfs_sb_offset(i);
209                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
210                                        cache->key.objectid, bytenr, 0,
211                                        &logical, &nr, &stripe_len);
212                 BUG_ON(ret);
213                 while (nr--) {
214                         btrfs_remove_free_space(cache, logical[nr],
215                                                 stripe_len);
216                 }
217                 kfree(logical);
218         }
219         return 0;
220 }
221
222 static int cache_block_group(struct btrfs_root *root,
223                              struct btrfs_block_group_cache *block_group)
224 {
225         struct btrfs_path *path;
226         int ret = 0;
227         struct btrfs_key key;
228         struct extent_buffer *leaf;
229         int slot;
230         u64 last;
231
232         if (!block_group)
233                 return 0;
234
235         root = root->fs_info->extent_root;
236
237         if (block_group->cached)
238                 return 0;
239
240         path = btrfs_alloc_path();
241         if (!path)
242                 return -ENOMEM;
243
244         path->reada = 2;
245         /*
246          * we get into deadlocks with paths held by callers of this function.
247          * since the alloc_mutex is protecting things right now, just
248          * skip the locking here
249          */
250         path->skip_locking = 1;
251         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
252         key.objectid = last;
253         key.offset = 0;
254         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
255         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
256         if (ret < 0)
257                 goto err;
258
259         while (1) {
260                 leaf = path->nodes[0];
261                 slot = path->slots[0];
262                 if (slot >= btrfs_header_nritems(leaf)) {
263                         ret = btrfs_next_leaf(root, path);
264                         if (ret < 0)
265                                 goto err;
266                         if (ret == 0)
267                                 continue;
268                         else
269                                 break;
270                 }
271                 btrfs_item_key_to_cpu(leaf, &key, slot);
272                 if (key.objectid < block_group->key.objectid)
273                         goto next;
274
275                 if (key.objectid >= block_group->key.objectid +
276                     block_group->key.offset)
277                         break;
278
279                 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
280                         add_new_free_space(block_group, root->fs_info, last,
281                                            key.objectid);
282
283                         last = key.objectid + key.offset;
284                 }
285 next:
286                 path->slots[0]++;
287         }
288
289         add_new_free_space(block_group, root->fs_info, last,
290                            block_group->key.objectid +
291                            block_group->key.offset);
292
293         block_group->cached = 1;
294         remove_sb_from_cache(root, block_group);
295         ret = 0;
296 err:
297         btrfs_free_path(path);
298         return ret;
299 }
300
301 /*
302  * return the block group that starts at or after bytenr
303  */
304 static struct btrfs_block_group_cache *
305 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
306 {
307         struct btrfs_block_group_cache *cache;
308
309         cache = block_group_cache_tree_search(info, bytenr, 0);
310
311         return cache;
312 }
313
314 /*
315  * return the block group that contains the given bytenr
316  */
317 struct btrfs_block_group_cache *btrfs_lookup_block_group(
318                                                  struct btrfs_fs_info *info,
319                                                  u64 bytenr)
320 {
321         struct btrfs_block_group_cache *cache;
322
323         cache = block_group_cache_tree_search(info, bytenr, 1);
324
325         return cache;
326 }
327
328 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
329 {
330         if (atomic_dec_and_test(&cache->count))
331                 kfree(cache);
332 }
333
334 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
335                                                   u64 flags)
336 {
337         struct list_head *head = &info->space_info;
338         struct btrfs_space_info *found;
339
340         rcu_read_lock();
341         list_for_each_entry_rcu(found, head, list) {
342                 if (found->flags == flags) {
343                         rcu_read_unlock();
344                         return found;
345                 }
346         }
347         rcu_read_unlock();
348         return NULL;
349 }
350
351 /*
352  * after adding space to the filesystem, we need to clear the full flags
353  * on all the space infos.
354  */
355 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
356 {
357         struct list_head *head = &info->space_info;
358         struct btrfs_space_info *found;
359
360         rcu_read_lock();
361         list_for_each_entry_rcu(found, head, list)
362                 found->full = 0;
363         rcu_read_unlock();
364 }
365
366 static u64 div_factor(u64 num, int factor)
367 {
368         if (factor == 10)
369                 return num;
370         num *= factor;
371         do_div(num, 10);
372         return num;
373 }
374
375 u64 btrfs_find_block_group(struct btrfs_root *root,
376                            u64 search_start, u64 search_hint, int owner)
377 {
378         struct btrfs_block_group_cache *cache;
379         u64 used;
380         u64 last = max(search_hint, search_start);
381         u64 group_start = 0;
382         int full_search = 0;
383         int factor = 9;
384         int wrapped = 0;
385 again:
386         while (1) {
387                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
388                 if (!cache)
389                         break;
390
391                 spin_lock(&cache->lock);
392                 last = cache->key.objectid + cache->key.offset;
393                 used = btrfs_block_group_used(&cache->item);
394
395                 if ((full_search || !cache->ro) &&
396                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
397                         if (used + cache->pinned + cache->reserved <
398                             div_factor(cache->key.offset, factor)) {
399                                 group_start = cache->key.objectid;
400                                 spin_unlock(&cache->lock);
401                                 btrfs_put_block_group(cache);
402                                 goto found;
403                         }
404                 }
405                 spin_unlock(&cache->lock);
406                 btrfs_put_block_group(cache);
407                 cond_resched();
408         }
409         if (!wrapped) {
410                 last = search_start;
411                 wrapped = 1;
412                 goto again;
413         }
414         if (!full_search && factor < 10) {
415                 last = search_start;
416                 full_search = 1;
417                 factor = 10;
418                 goto again;
419         }
420 found:
421         return group_start;
422 }
423
424 /* simple helper to search for an existing extent at a given offset */
425 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
426 {
427         int ret;
428         struct btrfs_key key;
429         struct btrfs_path *path;
430
431         path = btrfs_alloc_path();
432         BUG_ON(!path);
433         key.objectid = start;
434         key.offset = len;
435         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
436         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
437                                 0, 0);
438         btrfs_free_path(path);
439         return ret;
440 }
441
442 /*
443  * Back reference rules.  Back refs have three main goals:
444  *
445  * 1) differentiate between all holders of references to an extent so that
446  *    when a reference is dropped we can make sure it was a valid reference
447  *    before freeing the extent.
448  *
449  * 2) Provide enough information to quickly find the holders of an extent
450  *    if we notice a given block is corrupted or bad.
451  *
452  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
453  *    maintenance.  This is actually the same as #2, but with a slightly
454  *    different use case.
455  *
456  * File extents can be referenced by:
457  *
458  * - multiple snapshots, subvolumes, or different generations in one subvol
459  * - different files inside a single subvolume
460  * - different offsets inside a file (bookend extents in file.c)
461  *
462  * The extent ref structure has fields for:
463  *
464  * - Objectid of the subvolume root
465  * - Generation number of the tree holding the reference
466  * - objectid of the file holding the reference
467  * - number of references holding by parent node (alway 1 for tree blocks)
468  *
469  * Btree leaf may hold multiple references to a file extent. In most cases,
470  * these references are from same file and the corresponding offsets inside
471  * the file are close together.
472  *
473  * When a file extent is allocated the fields are filled in:
474  *     (root_key.objectid, trans->transid, inode objectid, 1)
475  *
476  * When a leaf is cow'd new references are added for every file extent found
477  * in the leaf.  It looks similar to the create case, but trans->transid will
478  * be different when the block is cow'd.
479  *
480  *     (root_key.objectid, trans->transid, inode objectid,
481  *      number of references in the leaf)
482  *
483  * When a file extent is removed either during snapshot deletion or
484  * file truncation, we find the corresponding back reference and check
485  * the following fields:
486  *
487  *     (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
488  *      inode objectid)
489  *
490  * Btree extents can be referenced by:
491  *
492  * - Different subvolumes
493  * - Different generations of the same subvolume
494  *
495  * When a tree block is created, back references are inserted:
496  *
497  * (root->root_key.objectid, trans->transid, level, 1)
498  *
499  * When a tree block is cow'd, new back references are added for all the
500  * blocks it points to. If the tree block isn't in reference counted root,
501  * the old back references are removed. These new back references are of
502  * the form (trans->transid will have increased since creation):
503  *
504  * (root->root_key.objectid, trans->transid, level, 1)
505  *
506  * When a backref is in deleting, the following fields are checked:
507  *
508  * if backref was for a tree root:
509  *     (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
510  * else
511  *     (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
512  *
513  * Back Reference Key composing:
514  *
515  * The key objectid corresponds to the first byte in the extent, the key
516  * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
517  * byte of parent extent. If a extent is tree root, the key offset is set
518  * to the key objectid.
519  */
520
521 static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
522                                           struct btrfs_root *root,
523                                           struct btrfs_path *path,
524                                           u64 bytenr, u64 parent,
525                                           u64 ref_root, u64 ref_generation,
526                                           u64 owner_objectid, int del)
527 {
528         struct btrfs_key key;
529         struct btrfs_extent_ref *ref;
530         struct extent_buffer *leaf;
531         u64 ref_objectid;
532         int ret;
533
534         key.objectid = bytenr;
535         key.type = BTRFS_EXTENT_REF_KEY;
536         key.offset = parent;
537
538         ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
539         if (ret < 0)
540                 goto out;
541         if (ret > 0) {
542                 ret = -ENOENT;
543                 goto out;
544         }
545
546         leaf = path->nodes[0];
547         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
548         ref_objectid = btrfs_ref_objectid(leaf, ref);
549         if (btrfs_ref_root(leaf, ref) != ref_root ||
550             btrfs_ref_generation(leaf, ref) != ref_generation ||
551             (ref_objectid != owner_objectid &&
552              ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
553                 ret = -EIO;
554                 WARN_ON(1);
555                 goto out;
556         }
557         ret = 0;
558 out:
559         return ret;
560 }
561
562 static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
563                                           struct btrfs_root *root,
564                                           struct btrfs_path *path,
565                                           u64 bytenr, u64 parent,
566                                           u64 ref_root, u64 ref_generation,
567                                           u64 owner_objectid,
568                                           int refs_to_add)
569 {
570         struct btrfs_key key;
571         struct extent_buffer *leaf;
572         struct btrfs_extent_ref *ref;
573         u32 num_refs;
574         int ret;
575
576         key.objectid = bytenr;
577         key.type = BTRFS_EXTENT_REF_KEY;
578         key.offset = parent;
579
580         ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
581         if (ret == 0) {
582                 leaf = path->nodes[0];
583                 ref = btrfs_item_ptr(leaf, path->slots[0],
584                                      struct btrfs_extent_ref);
585                 btrfs_set_ref_root(leaf, ref, ref_root);
586                 btrfs_set_ref_generation(leaf, ref, ref_generation);
587                 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
588                 btrfs_set_ref_num_refs(leaf, ref, refs_to_add);
589         } else if (ret == -EEXIST) {
590                 u64 existing_owner;
591
592                 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
593                 leaf = path->nodes[0];
594                 ref = btrfs_item_ptr(leaf, path->slots[0],
595                                      struct btrfs_extent_ref);
596                 if (btrfs_ref_root(leaf, ref) != ref_root ||
597                     btrfs_ref_generation(leaf, ref) != ref_generation) {
598                         ret = -EIO;
599                         WARN_ON(1);
600                         goto out;
601                 }
602
603                 num_refs = btrfs_ref_num_refs(leaf, ref);
604                 BUG_ON(num_refs == 0);
605                 btrfs_set_ref_num_refs(leaf, ref, num_refs + refs_to_add);
606
607                 existing_owner = btrfs_ref_objectid(leaf, ref);
608                 if (existing_owner != owner_objectid &&
609                     existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
610                         btrfs_set_ref_objectid(leaf, ref,
611                                         BTRFS_MULTIPLE_OBJECTIDS);
612                 }
613                 ret = 0;
614         } else {
615                 goto out;
616         }
617         btrfs_unlock_up_safe(path, 1);
618         btrfs_mark_buffer_dirty(path->nodes[0]);
619 out:
620         btrfs_release_path(root, path);
621         return ret;
622 }
623
624 static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
625                                           struct btrfs_root *root,
626                                           struct btrfs_path *path,
627                                           int refs_to_drop)
628 {
629         struct extent_buffer *leaf;
630         struct btrfs_extent_ref *ref;
631         u32 num_refs;
632         int ret = 0;
633
634         leaf = path->nodes[0];
635         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
636         num_refs = btrfs_ref_num_refs(leaf, ref);
637         BUG_ON(num_refs < refs_to_drop);
638         num_refs -= refs_to_drop;
639         if (num_refs == 0) {
640                 ret = btrfs_del_item(trans, root, path);
641         } else {
642                 btrfs_set_ref_num_refs(leaf, ref, num_refs);
643                 btrfs_mark_buffer_dirty(leaf);
644         }
645         btrfs_release_path(root, path);
646         return ret;
647 }
648
649 #ifdef BIO_RW_DISCARD
650 static void btrfs_issue_discard(struct block_device *bdev,
651                                 u64 start, u64 len)
652 {
653         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
654 }
655 #endif
656
657 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
658                                 u64 num_bytes)
659 {
660 #ifdef BIO_RW_DISCARD
661         int ret;
662         u64 map_length = num_bytes;
663         struct btrfs_multi_bio *multi = NULL;
664
665         /* Tell the block device(s) that the sectors can be discarded */
666         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
667                               bytenr, &map_length, &multi, 0);
668         if (!ret) {
669                 struct btrfs_bio_stripe *stripe = multi->stripes;
670                 int i;
671
672                 if (map_length > num_bytes)
673                         map_length = num_bytes;
674
675                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
676                         btrfs_issue_discard(stripe->dev->bdev,
677                                             stripe->physical,
678                                             map_length);
679                 }
680                 kfree(multi);
681         }
682
683         return ret;
684 #else
685         return 0;
686 #endif
687 }
688
689 static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
690                                      struct btrfs_root *root, u64 bytenr,
691                                      u64 num_bytes,
692                                      u64 orig_parent, u64 parent,
693                                      u64 orig_root, u64 ref_root,
694                                      u64 orig_generation, u64 ref_generation,
695                                      u64 owner_objectid)
696 {
697         int ret;
698         int pin = owner_objectid < BTRFS_FIRST_FREE_OBJECTID;
699
700         ret = btrfs_update_delayed_ref(trans, bytenr, num_bytes,
701                                        orig_parent, parent, orig_root,
702                                        ref_root, orig_generation,
703                                        ref_generation, owner_objectid, pin);
704         BUG_ON(ret);
705         return ret;
706 }
707
708 int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
709                             struct btrfs_root *root, u64 bytenr,
710                             u64 num_bytes, u64 orig_parent, u64 parent,
711                             u64 ref_root, u64 ref_generation,
712                             u64 owner_objectid)
713 {
714         int ret;
715         if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
716             owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
717                 return 0;
718
719         ret = __btrfs_update_extent_ref(trans, root, bytenr, num_bytes,
720                                         orig_parent, parent, ref_root,
721                                         ref_root, ref_generation,
722                                         ref_generation, owner_objectid);
723         return ret;
724 }
725 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
726                                   struct btrfs_root *root, u64 bytenr,
727                                   u64 num_bytes,
728                                   u64 orig_parent, u64 parent,
729                                   u64 orig_root, u64 ref_root,
730                                   u64 orig_generation, u64 ref_generation,
731                                   u64 owner_objectid)
732 {
733         int ret;
734
735         ret = btrfs_add_delayed_ref(trans, bytenr, num_bytes, parent, ref_root,
736                                     ref_generation, owner_objectid,
737                                     BTRFS_ADD_DELAYED_REF, 0);
738         BUG_ON(ret);
739         return ret;
740 }
741
742 static noinline_for_stack int add_extent_ref(struct btrfs_trans_handle *trans,
743                           struct btrfs_root *root, u64 bytenr,
744                           u64 num_bytes, u64 parent, u64 ref_root,
745                           u64 ref_generation, u64 owner_objectid,
746                           int refs_to_add)
747 {
748         struct btrfs_path *path;
749         int ret;
750         struct btrfs_key key;
751         struct extent_buffer *l;
752         struct btrfs_extent_item *item;
753         u32 refs;
754
755         path = btrfs_alloc_path();
756         if (!path)
757                 return -ENOMEM;
758
759         path->reada = 1;
760         path->leave_spinning = 1;
761         key.objectid = bytenr;
762         key.type = BTRFS_EXTENT_ITEM_KEY;
763         key.offset = num_bytes;
764
765         /* first find the extent item and update its reference count */
766         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
767                                 path, 0, 1);
768         if (ret < 0) {
769                 btrfs_set_path_blocking(path);
770                 return ret;
771         }
772
773         if (ret > 0) {
774                 WARN_ON(1);
775                 btrfs_free_path(path);
776                 return -EIO;
777         }
778         l = path->nodes[0];
779
780         btrfs_item_key_to_cpu(l, &key, path->slots[0]);
781         if (key.objectid != bytenr) {
782                 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
783                 printk(KERN_ERR "btrfs wanted %llu found %llu\n",
784                        (unsigned long long)bytenr,
785                        (unsigned long long)key.objectid);
786                 BUG();
787         }
788         BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
789
790         item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
791
792         refs = btrfs_extent_refs(l, item);
793         btrfs_set_extent_refs(l, item, refs + refs_to_add);
794         btrfs_unlock_up_safe(path, 1);
795
796         btrfs_mark_buffer_dirty(path->nodes[0]);
797
798         btrfs_release_path(root->fs_info->extent_root, path);
799
800         path->reada = 1;
801         path->leave_spinning = 1;
802
803         /* now insert the actual backref */
804         ret = insert_extent_backref(trans, root->fs_info->extent_root,
805                                     path, bytenr, parent,
806                                     ref_root, ref_generation,
807                                     owner_objectid, refs_to_add);
808         BUG_ON(ret);
809         btrfs_free_path(path);
810         return 0;
811 }
812
813 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
814                          struct btrfs_root *root,
815                          u64 bytenr, u64 num_bytes, u64 parent,
816                          u64 ref_root, u64 ref_generation,
817                          u64 owner_objectid)
818 {
819         int ret;
820         if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
821             owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
822                 return 0;
823
824         ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, parent,
825                                      0, ref_root, 0, ref_generation,
826                                      owner_objectid);
827         return ret;
828 }
829
830 static int drop_delayed_ref(struct btrfs_trans_handle *trans,
831                                         struct btrfs_root *root,
832                                         struct btrfs_delayed_ref_node *node)
833 {
834         int ret = 0;
835         struct btrfs_delayed_ref *ref = btrfs_delayed_node_to_ref(node);
836
837         BUG_ON(node->ref_mod == 0);
838         ret = __btrfs_free_extent(trans, root, node->bytenr, node->num_bytes,
839                                   node->parent, ref->root, ref->generation,
840                                   ref->owner_objectid, ref->pin, node->ref_mod);
841
842         return ret;
843 }
844
845 /* helper function to actually process a single delayed ref entry */
846 static noinline int run_one_delayed_ref(struct btrfs_trans_handle *trans,
847                                         struct btrfs_root *root,
848                                         struct btrfs_delayed_ref_node *node,
849                                         int insert_reserved)
850 {
851         int ret;
852         struct btrfs_delayed_ref *ref;
853
854         if (node->parent == (u64)-1) {
855                 struct btrfs_delayed_ref_head *head;
856                 /*
857                  * we've hit the end of the chain and we were supposed
858                  * to insert this extent into the tree.  But, it got
859                  * deleted before we ever needed to insert it, so all
860                  * we have to do is clean up the accounting
861                  */
862                 if (insert_reserved) {
863                         update_reserved_extents(root, node->bytenr,
864                                                 node->num_bytes, 0);
865                 }
866                 head = btrfs_delayed_node_to_head(node);
867                 mutex_unlock(&head->mutex);
868                 return 0;
869         }
870
871         ref = btrfs_delayed_node_to_ref(node);
872         if (ref->action == BTRFS_ADD_DELAYED_REF) {
873                 if (insert_reserved) {
874                         struct btrfs_key ins;
875
876                         ins.objectid = node->bytenr;
877                         ins.offset = node->num_bytes;
878                         ins.type = BTRFS_EXTENT_ITEM_KEY;
879
880                         /* record the full extent allocation */
881                         ret = __btrfs_alloc_reserved_extent(trans, root,
882                                         node->parent, ref->root,
883                                         ref->generation, ref->owner_objectid,
884                                         &ins, node->ref_mod);
885                         update_reserved_extents(root, node->bytenr,
886                                                 node->num_bytes, 0);
887                 } else {
888                         /* just add one backref */
889                         ret = add_extent_ref(trans, root, node->bytenr,
890                                      node->num_bytes,
891                                      node->parent, ref->root, ref->generation,
892                                      ref->owner_objectid, node->ref_mod);
893                 }
894                 BUG_ON(ret);
895         } else if (ref->action == BTRFS_DROP_DELAYED_REF) {
896                 WARN_ON(insert_reserved);
897                 ret = drop_delayed_ref(trans, root, node);
898         }
899         return 0;
900 }
901
902 static noinline struct btrfs_delayed_ref_node *
903 select_delayed_ref(struct btrfs_delayed_ref_head *head)
904 {
905         struct rb_node *node;
906         struct btrfs_delayed_ref_node *ref;
907         int action = BTRFS_ADD_DELAYED_REF;
908 again:
909         /*
910          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
911          * this prevents ref count from going down to zero when
912          * there still are pending delayed ref.
913          */
914         node = rb_prev(&head->node.rb_node);
915         while (1) {
916                 if (!node)
917                         break;
918                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
919                                 rb_node);
920                 if (ref->bytenr != head->node.bytenr)
921                         break;
922                 if (btrfs_delayed_node_to_ref(ref)->action == action)
923                         return ref;
924                 node = rb_prev(node);
925         }
926         if (action == BTRFS_ADD_DELAYED_REF) {
927                 action = BTRFS_DROP_DELAYED_REF;
928                 goto again;
929         }
930         return NULL;
931 }
932
933 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
934                                        struct btrfs_root *root,
935                                        struct list_head *cluster)
936 {
937         struct btrfs_delayed_ref_root *delayed_refs;
938         struct btrfs_delayed_ref_node *ref;
939         struct btrfs_delayed_ref_head *locked_ref = NULL;
940         int ret;
941         int count = 0;
942         int must_insert_reserved = 0;
943
944         delayed_refs = &trans->transaction->delayed_refs;
945         while (1) {
946                 if (!locked_ref) {
947                         /* pick a new head ref from the cluster list */
948                         if (list_empty(cluster))
949                                 break;
950
951                         locked_ref = list_entry(cluster->next,
952                                      struct btrfs_delayed_ref_head, cluster);
953
954                         /* grab the lock that says we are going to process
955                          * all the refs for this head */
956                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
957
958                         /*
959                          * we may have dropped the spin lock to get the head
960                          * mutex lock, and that might have given someone else
961                          * time to free the head.  If that's true, it has been
962                          * removed from our list and we can move on.
963                          */
964                         if (ret == -EAGAIN) {
965                                 locked_ref = NULL;
966                                 count++;
967                                 continue;
968                         }
969                 }
970
971                 /*
972                  * record the must insert reserved flag before we
973                  * drop the spin lock.
974                  */
975                 must_insert_reserved = locked_ref->must_insert_reserved;
976                 locked_ref->must_insert_reserved = 0;
977
978                 /*
979                  * locked_ref is the head node, so we have to go one
980                  * node back for any delayed ref updates
981                  */
982                 ref = select_delayed_ref(locked_ref);
983                 if (!ref) {
984                         /* All delayed refs have been processed, Go ahead
985                          * and send the head node to run_one_delayed_ref,
986                          * so that any accounting fixes can happen
987                          */
988                         ref = &locked_ref->node;
989                         list_del_init(&locked_ref->cluster);
990                         locked_ref = NULL;
991                 }
992
993                 ref->in_tree = 0;
994                 rb_erase(&ref->rb_node, &delayed_refs->root);
995                 delayed_refs->num_entries--;
996                 spin_unlock(&delayed_refs->lock);
997
998                 ret = run_one_delayed_ref(trans, root, ref,
999                                           must_insert_reserved);
1000                 BUG_ON(ret);
1001                 btrfs_put_delayed_ref(ref);
1002
1003                 count++;
1004                 cond_resched();
1005                 spin_lock(&delayed_refs->lock);
1006         }
1007         return count;
1008 }
1009
1010 /*
1011  * this starts processing the delayed reference count updates and
1012  * extent insertions we have queued up so far.  count can be
1013  * 0, which means to process everything in the tree at the start
1014  * of the run (but not newly added entries), or it can be some target
1015  * number you'd like to process.
1016  */
1017 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1018                            struct btrfs_root *root, unsigned long count)
1019 {
1020         struct rb_node *node;
1021         struct btrfs_delayed_ref_root *delayed_refs;
1022         struct btrfs_delayed_ref_node *ref;
1023         struct list_head cluster;
1024         int ret;
1025         int run_all = count == (unsigned long)-1;
1026         int run_most = 0;
1027
1028         if (root == root->fs_info->extent_root)
1029                 root = root->fs_info->tree_root;
1030
1031         delayed_refs = &trans->transaction->delayed_refs;
1032         INIT_LIST_HEAD(&cluster);
1033 again:
1034         spin_lock(&delayed_refs->lock);
1035         if (count == 0) {
1036                 count = delayed_refs->num_entries * 2;
1037                 run_most = 1;
1038         }
1039         while (1) {
1040                 if (!(run_all || run_most) &&
1041                     delayed_refs->num_heads_ready < 64)
1042                         break;
1043
1044                 /*
1045                  * go find something we can process in the rbtree.  We start at
1046                  * the beginning of the tree, and then build a cluster
1047                  * of refs to process starting at the first one we are able to
1048                  * lock
1049                  */
1050                 ret = btrfs_find_ref_cluster(trans, &cluster,
1051                                              delayed_refs->run_delayed_start);
1052                 if (ret)
1053                         break;
1054
1055                 ret = run_clustered_refs(trans, root, &cluster);
1056                 BUG_ON(ret < 0);
1057
1058                 count -= min_t(unsigned long, ret, count);
1059
1060                 if (count == 0)
1061                         break;
1062         }
1063
1064         if (run_all) {
1065                 node = rb_first(&delayed_refs->root);
1066                 if (!node)
1067                         goto out;
1068                 count = (unsigned long)-1;
1069
1070                 while (node) {
1071                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
1072                                        rb_node);
1073                         if (btrfs_delayed_ref_is_head(ref)) {
1074                                 struct btrfs_delayed_ref_head *head;
1075
1076                                 head = btrfs_delayed_node_to_head(ref);
1077                                 atomic_inc(&ref->refs);
1078
1079                                 spin_unlock(&delayed_refs->lock);
1080                                 mutex_lock(&head->mutex);
1081                                 mutex_unlock(&head->mutex);
1082
1083                                 btrfs_put_delayed_ref(ref);
1084                                 cond_resched();
1085                                 goto again;
1086                         }
1087                         node = rb_next(node);
1088                 }
1089                 spin_unlock(&delayed_refs->lock);
1090                 schedule_timeout(1);
1091                 goto again;
1092         }
1093 out:
1094         spin_unlock(&delayed_refs->lock);
1095         return 0;
1096 }
1097
1098 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1099                           struct btrfs_root *root, u64 objectid, u64 bytenr)
1100 {
1101         struct btrfs_root *extent_root = root->fs_info->extent_root;
1102         struct btrfs_path *path;
1103         struct extent_buffer *leaf;
1104         struct btrfs_extent_ref *ref_item;
1105         struct btrfs_key key;
1106         struct btrfs_key found_key;
1107         u64 ref_root;
1108         u64 last_snapshot;
1109         u32 nritems;
1110         int ret;
1111
1112         key.objectid = bytenr;
1113         key.offset = (u64)-1;
1114         key.type = BTRFS_EXTENT_ITEM_KEY;
1115
1116         path = btrfs_alloc_path();
1117         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1118         if (ret < 0)
1119                 goto out;
1120         BUG_ON(ret == 0);
1121
1122         ret = -ENOENT;
1123         if (path->slots[0] == 0)
1124                 goto out;
1125
1126         path->slots[0]--;
1127         leaf = path->nodes[0];
1128         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1129
1130         if (found_key.objectid != bytenr ||
1131             found_key.type != BTRFS_EXTENT_ITEM_KEY)
1132                 goto out;
1133
1134         last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1135         while (1) {
1136                 leaf = path->nodes[0];
1137                 nritems = btrfs_header_nritems(leaf);
1138                 if (path->slots[0] >= nritems) {
1139                         ret = btrfs_next_leaf(extent_root, path);
1140                         if (ret < 0)
1141                                 goto out;
1142                         if (ret == 0)
1143                                 continue;
1144                         break;
1145                 }
1146                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1147                 if (found_key.objectid != bytenr)
1148                         break;
1149
1150                 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
1151                         path->slots[0]++;
1152                         continue;
1153                 }
1154
1155                 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1156                                           struct btrfs_extent_ref);
1157                 ref_root = btrfs_ref_root(leaf, ref_item);
1158                 if ((ref_root != root->root_key.objectid &&
1159                      ref_root != BTRFS_TREE_LOG_OBJECTID) ||
1160                      objectid != btrfs_ref_objectid(leaf, ref_item)) {
1161                         ret = 1;
1162                         goto out;
1163                 }
1164                 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
1165                         ret = 1;
1166                         goto out;
1167                 }
1168
1169                 path->slots[0]++;
1170         }
1171         ret = 0;
1172 out:
1173         btrfs_free_path(path);
1174         return ret;
1175 }
1176
1177 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1178                     struct extent_buffer *buf, u32 nr_extents)
1179 {
1180         struct btrfs_key key;
1181         struct btrfs_file_extent_item *fi;
1182         u64 root_gen;
1183         u32 nritems;
1184         int i;
1185         int level;
1186         int ret = 0;
1187         int shared = 0;
1188
1189         if (!root->ref_cows)
1190                 return 0;
1191
1192         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1193                 shared = 0;
1194                 root_gen = root->root_key.offset;
1195         } else {
1196                 shared = 1;
1197                 root_gen = trans->transid - 1;
1198         }
1199
1200         level = btrfs_header_level(buf);
1201         nritems = btrfs_header_nritems(buf);
1202
1203         if (level == 0) {
1204                 struct btrfs_leaf_ref *ref;
1205                 struct btrfs_extent_info *info;
1206
1207                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
1208                 if (!ref) {
1209                         ret = -ENOMEM;
1210                         goto out;
1211                 }
1212
1213                 ref->root_gen = root_gen;
1214                 ref->bytenr = buf->start;
1215                 ref->owner = btrfs_header_owner(buf);
1216                 ref->generation = btrfs_header_generation(buf);
1217                 ref->nritems = nr_extents;
1218                 info = ref->extents;
1219
1220                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
1221                         u64 disk_bytenr;
1222                         btrfs_item_key_to_cpu(buf, &key, i);
1223                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1224                                 continue;
1225                         fi = btrfs_item_ptr(buf, i,
1226                                             struct btrfs_file_extent_item);
1227                         if (btrfs_file_extent_type(buf, fi) ==
1228                             BTRFS_FILE_EXTENT_INLINE)
1229                                 continue;
1230                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1231                         if (disk_bytenr == 0)
1232                                 continue;
1233
1234                         info->bytenr = disk_bytenr;
1235                         info->num_bytes =
1236                                 btrfs_file_extent_disk_num_bytes(buf, fi);
1237                         info->objectid = key.objectid;
1238                         info->offset = key.offset;
1239                         info++;
1240                 }
1241
1242                 ret = btrfs_add_leaf_ref(root, ref, shared);
1243                 if (ret == -EEXIST && shared) {
1244                         struct btrfs_leaf_ref *old;
1245                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
1246                         BUG_ON(!old);
1247                         btrfs_remove_leaf_ref(root, old);
1248                         btrfs_free_leaf_ref(root, old);
1249                         ret = btrfs_add_leaf_ref(root, ref, shared);
1250                 }
1251                 WARN_ON(ret);
1252                 btrfs_free_leaf_ref(root, ref);
1253         }
1254 out:
1255         return ret;
1256 }
1257
1258 /* when a block goes through cow, we update the reference counts of
1259  * everything that block points to.  The internal pointers of the block
1260  * can be in just about any order, and it is likely to have clusters of
1261  * things that are close together and clusters of things that are not.
1262  *
1263  * To help reduce the seeks that come with updating all of these reference
1264  * counts, sort them by byte number before actual updates are done.
1265  *
1266  * struct refsort is used to match byte number to slot in the btree block.
1267  * we sort based on the byte number and then use the slot to actually
1268  * find the item.
1269  *
1270  * struct refsort is smaller than strcut btrfs_item and smaller than
1271  * struct btrfs_key_ptr.  Since we're currently limited to the page size
1272  * for a btree block, there's no way for a kmalloc of refsorts for a
1273  * single node to be bigger than a page.
1274  */
1275 struct refsort {
1276         u64 bytenr;
1277         u32 slot;
1278 };
1279
1280 /*
1281  * for passing into sort()
1282  */
1283 static int refsort_cmp(const void *a_void, const void *b_void)
1284 {
1285         const struct refsort *a = a_void;
1286         const struct refsort *b = b_void;
1287
1288         if (a->bytenr < b->bytenr)
1289                 return -1;
1290         if (a->bytenr > b->bytenr)
1291                 return 1;
1292         return 0;
1293 }
1294
1295
1296 noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
1297                            struct btrfs_root *root,
1298                            struct extent_buffer *orig_buf,
1299                            struct extent_buffer *buf, u32 *nr_extents)
1300 {
1301         u64 bytenr;
1302         u64 ref_root;
1303         u64 orig_root;
1304         u64 ref_generation;
1305         u64 orig_generation;
1306         struct refsort *sorted;
1307         u32 nritems;
1308         u32 nr_file_extents = 0;
1309         struct btrfs_key key;
1310         struct btrfs_file_extent_item *fi;
1311         int i;
1312         int level;
1313         int ret = 0;
1314         int faili = 0;
1315         int refi = 0;
1316         int slot;
1317         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1318                             u64, u64, u64, u64, u64, u64, u64, u64, u64);
1319
1320         ref_root = btrfs_header_owner(buf);
1321         ref_generation = btrfs_header_generation(buf);
1322         orig_root = btrfs_header_owner(orig_buf);
1323         orig_generation = btrfs_header_generation(orig_buf);
1324
1325         nritems = btrfs_header_nritems(buf);
1326         level = btrfs_header_level(buf);
1327
1328         sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
1329         BUG_ON(!sorted);
1330
1331         if (root->ref_cows) {
1332                 process_func = __btrfs_inc_extent_ref;
1333         } else {
1334                 if (level == 0 &&
1335                     root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1336                         goto out;
1337                 if (level != 0 &&
1338                     root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1339                         goto out;
1340                 process_func = __btrfs_update_extent_ref;
1341         }
1342
1343         /*
1344          * we make two passes through the items.  In the first pass we
1345          * only record the byte number and slot.  Then we sort based on
1346          * byte number and do the actual work based on the sorted results
1347          */
1348         for (i = 0; i < nritems; i++) {
1349                 cond_resched();
1350                 if (level == 0) {
1351                         btrfs_item_key_to_cpu(buf, &key, i);
1352                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1353                                 continue;
1354                         fi = btrfs_item_ptr(buf, i,
1355                                             struct btrfs_file_extent_item);
1356                         if (btrfs_file_extent_type(buf, fi) ==
1357                             BTRFS_FILE_EXTENT_INLINE)
1358                                 continue;
1359                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1360                         if (bytenr == 0)
1361                                 continue;
1362
1363                         nr_file_extents++;
1364                         sorted[refi].bytenr = bytenr;
1365                         sorted[refi].slot = i;
1366                         refi++;
1367                 } else {
1368                         bytenr = btrfs_node_blockptr(buf, i);
1369                         sorted[refi].bytenr = bytenr;
1370                         sorted[refi].slot = i;
1371                         refi++;
1372                 }
1373         }
1374         /*
1375          * if refi == 0, we didn't actually put anything into the sorted
1376          * array and we're done
1377          */
1378         if (refi == 0)
1379                 goto out;
1380
1381         sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
1382
1383         for (i = 0; i < refi; i++) {
1384                 cond_resched();
1385                 slot = sorted[i].slot;
1386                 bytenr = sorted[i].bytenr;
1387
1388                 if (level == 0) {
1389                         btrfs_item_key_to_cpu(buf, &key, slot);
1390                         fi = btrfs_item_ptr(buf, slot,
1391                                             struct btrfs_file_extent_item);
1392
1393                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1394                         if (bytenr == 0)
1395                                 continue;
1396
1397                         ret = process_func(trans, root, bytenr,
1398                                    btrfs_file_extent_disk_num_bytes(buf, fi),
1399                                    orig_buf->start, buf->start,
1400                                    orig_root, ref_root,
1401                                    orig_generation, ref_generation,
1402                                    key.objectid);
1403
1404                         if (ret) {
1405                                 faili = slot;
1406                                 WARN_ON(1);
1407                                 goto fail;
1408                         }
1409                 } else {
1410                         ret = process_func(trans, root, bytenr, buf->len,
1411                                            orig_buf->start, buf->start,
1412                                            orig_root, ref_root,
1413                                            orig_generation, ref_generation,
1414                                            level - 1);
1415                         if (ret) {
1416                                 faili = slot;
1417                                 WARN_ON(1);
1418                                 goto fail;
1419                         }
1420                 }
1421         }
1422 out:
1423         kfree(sorted);
1424         if (nr_extents) {
1425                 if (level == 0)
1426                         *nr_extents = nr_file_extents;
1427                 else
1428                         *nr_extents = nritems;
1429         }
1430         return 0;
1431 fail:
1432         kfree(sorted);
1433         WARN_ON(1);
1434         return ret;
1435 }
1436
1437 int btrfs_update_ref(struct btrfs_trans_handle *trans,
1438                      struct btrfs_root *root, struct extent_buffer *orig_buf,
1439                      struct extent_buffer *buf, int start_slot, int nr)
1440
1441 {
1442         u64 bytenr;
1443         u64 ref_root;
1444         u64 orig_root;
1445         u64 ref_generation;
1446         u64 orig_generation;
1447         struct btrfs_key key;
1448         struct btrfs_file_extent_item *fi;
1449         int i;
1450         int ret;
1451         int slot;
1452         int level;
1453
1454         BUG_ON(start_slot < 0);
1455         BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1456
1457         ref_root = btrfs_header_owner(buf);
1458         ref_generation = btrfs_header_generation(buf);
1459         orig_root = btrfs_header_owner(orig_buf);
1460         orig_generation = btrfs_header_generation(orig_buf);
1461         level = btrfs_header_level(buf);
1462
1463         if (!root->ref_cows) {
1464                 if (level == 0 &&
1465                     root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1466                         return 0;
1467                 if (level != 0 &&
1468                     root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1469                         return 0;
1470         }
1471
1472         for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1473                 cond_resched();
1474                 if (level == 0) {
1475                         btrfs_item_key_to_cpu(buf, &key, slot);
1476                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1477                                 continue;
1478                         fi = btrfs_item_ptr(buf, slot,
1479                                             struct btrfs_file_extent_item);
1480                         if (btrfs_file_extent_type(buf, fi) ==
1481                             BTRFS_FILE_EXTENT_INLINE)
1482                                 continue;
1483                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1484                         if (bytenr == 0)
1485                                 continue;
1486                         ret = __btrfs_update_extent_ref(trans, root, bytenr,
1487                                     btrfs_file_extent_disk_num_bytes(buf, fi),
1488                                     orig_buf->start, buf->start,
1489                                     orig_root, ref_root, orig_generation,
1490                                     ref_generation, key.objectid);
1491                         if (ret)
1492                                 goto fail;
1493                 } else {
1494                         bytenr = btrfs_node_blockptr(buf, slot);
1495                         ret = __btrfs_update_extent_ref(trans, root, bytenr,
1496                                             buf->len, orig_buf->start,
1497                                             buf->start, orig_root, ref_root,
1498                                             orig_generation, ref_generation,
1499                                             level - 1);
1500                         if (ret)
1501                                 goto fail;
1502                 }
1503         }
1504         return 0;
1505 fail:
1506         WARN_ON(1);
1507         return -1;
1508 }
1509
1510 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1511                                  struct btrfs_root *root,
1512                                  struct btrfs_path *path,
1513                                  struct btrfs_block_group_cache *cache)
1514 {
1515         int ret;
1516         struct btrfs_root *extent_root = root->fs_info->extent_root;
1517         unsigned long bi;
1518         struct extent_buffer *leaf;
1519
1520         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1521         if (ret < 0)
1522                 goto fail;
1523         BUG_ON(ret);
1524
1525         leaf = path->nodes[0];
1526         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1527         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1528         btrfs_mark_buffer_dirty(leaf);
1529         btrfs_release_path(extent_root, path);
1530 fail:
1531         if (ret)
1532                 return ret;
1533         return 0;
1534
1535 }
1536
1537 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1538                                    struct btrfs_root *root)
1539 {
1540         struct btrfs_block_group_cache *cache, *entry;
1541         struct rb_node *n;
1542         int err = 0;
1543         int werr = 0;
1544         struct btrfs_path *path;
1545         u64 last = 0;
1546
1547         path = btrfs_alloc_path();
1548         if (!path)
1549                 return -ENOMEM;
1550
1551         while (1) {
1552                 cache = NULL;
1553                 spin_lock(&root->fs_info->block_group_cache_lock);
1554                 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1555                      n; n = rb_next(n)) {
1556                         entry = rb_entry(n, struct btrfs_block_group_cache,
1557                                          cache_node);
1558                         if (entry->dirty) {
1559                                 cache = entry;
1560                                 break;
1561                         }
1562                 }
1563                 spin_unlock(&root->fs_info->block_group_cache_lock);
1564
1565                 if (!cache)
1566                         break;
1567
1568                 cache->dirty = 0;
1569                 last += cache->key.offset;
1570
1571                 err = write_one_cache_group(trans, root,
1572                                             path, cache);
1573                 /*
1574                  * if we fail to write the cache group, we want
1575                  * to keep it marked dirty in hopes that a later
1576                  * write will work
1577                  */
1578                 if (err) {
1579                         werr = err;
1580                         continue;
1581                 }
1582         }
1583         btrfs_free_path(path);
1584         return werr;
1585 }
1586
1587 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
1588 {
1589         struct btrfs_block_group_cache *block_group;
1590         int readonly = 0;
1591
1592         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1593         if (!block_group || block_group->ro)
1594                 readonly = 1;
1595         if (block_group)
1596                 btrfs_put_block_group(block_group);
1597         return readonly;
1598 }
1599
1600 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1601                              u64 total_bytes, u64 bytes_used,
1602                              struct btrfs_space_info **space_info)
1603 {
1604         struct btrfs_space_info *found;
1605
1606         found = __find_space_info(info, flags);
1607         if (found) {
1608                 spin_lock(&found->lock);
1609                 found->total_bytes += total_bytes;
1610                 found->bytes_used += bytes_used;
1611                 found->full = 0;
1612                 spin_unlock(&found->lock);
1613                 *space_info = found;
1614                 return 0;
1615         }
1616         found = kzalloc(sizeof(*found), GFP_NOFS);
1617         if (!found)
1618                 return -ENOMEM;
1619
1620         INIT_LIST_HEAD(&found->block_groups);
1621         init_rwsem(&found->groups_sem);
1622         spin_lock_init(&found->lock);
1623         found->flags = flags;
1624         found->total_bytes = total_bytes;
1625         found->bytes_used = bytes_used;
1626         found->bytes_pinned = 0;
1627         found->bytes_reserved = 0;
1628         found->bytes_readonly = 0;
1629         found->bytes_delalloc = 0;
1630         found->full = 0;
1631         found->force_alloc = 0;
1632         *space_info = found;
1633         list_add_rcu(&found->list, &info->space_info);
1634         return 0;
1635 }
1636
1637 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1638 {
1639         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1640                                    BTRFS_BLOCK_GROUP_RAID1 |
1641                                    BTRFS_BLOCK_GROUP_RAID10 |
1642                                    BTRFS_BLOCK_GROUP_DUP);
1643         if (extra_flags) {
1644                 if (flags & BTRFS_BLOCK_GROUP_DATA)
1645                         fs_info->avail_data_alloc_bits |= extra_flags;
1646                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1647                         fs_info->avail_metadata_alloc_bits |= extra_flags;
1648                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1649                         fs_info->avail_system_alloc_bits |= extra_flags;
1650         }
1651 }
1652
1653 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
1654 {
1655         spin_lock(&cache->space_info->lock);
1656         spin_lock(&cache->lock);
1657         if (!cache->ro) {
1658                 cache->space_info->bytes_readonly += cache->key.offset -
1659                                         btrfs_block_group_used(&cache->item);
1660                 cache->ro = 1;
1661         }
1662         spin_unlock(&cache->lock);
1663         spin_unlock(&cache->space_info->lock);
1664 }
1665
1666 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1667 {
1668         u64 num_devices = root->fs_info->fs_devices->rw_devices;
1669
1670         if (num_devices == 1)
1671                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1672         if (num_devices < 4)
1673                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1674
1675         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1676             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1677                       BTRFS_BLOCK_GROUP_RAID10))) {
1678                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1679         }
1680
1681         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1682             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1683                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1684         }
1685
1686         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1687             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1688              (flags & BTRFS_BLOCK_GROUP_RAID10) |
1689              (flags & BTRFS_BLOCK_GROUP_DUP)))
1690                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1691         return flags;
1692 }
1693
1694 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
1695 {
1696         struct btrfs_fs_info *info = root->fs_info;
1697         u64 alloc_profile;
1698
1699         if (data) {
1700                 alloc_profile = info->avail_data_alloc_bits &
1701                         info->data_alloc_profile;
1702                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
1703         } else if (root == root->fs_info->chunk_root) {
1704                 alloc_profile = info->avail_system_alloc_bits &
1705                         info->system_alloc_profile;
1706                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
1707         } else {
1708                 alloc_profile = info->avail_metadata_alloc_bits &
1709                         info->metadata_alloc_profile;
1710                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
1711         }
1712
1713         return btrfs_reduce_alloc_profile(root, data);
1714 }
1715
1716 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
1717 {
1718         u64 alloc_target;
1719
1720         alloc_target = btrfs_get_alloc_profile(root, 1);
1721         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
1722                                                        alloc_target);
1723 }
1724
1725 /*
1726  * for now this just makes sure we have at least 5% of our metadata space free
1727  * for use.
1728  */
1729 int btrfs_check_metadata_free_space(struct btrfs_root *root)
1730 {
1731         struct btrfs_fs_info *info = root->fs_info;
1732         struct btrfs_space_info *meta_sinfo;
1733         u64 alloc_target, thresh;
1734         int committed = 0, ret;
1735
1736         /* get the space info for where the metadata will live */
1737         alloc_target = btrfs_get_alloc_profile(root, 0);
1738         meta_sinfo = __find_space_info(info, alloc_target);
1739
1740 again:
1741         spin_lock(&meta_sinfo->lock);
1742         if (!meta_sinfo->full)
1743                 thresh = meta_sinfo->total_bytes * 80;
1744         else
1745                 thresh = meta_sinfo->total_bytes * 95;
1746
1747         do_div(thresh, 100);
1748
1749         if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
1750             meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
1751                 struct btrfs_trans_handle *trans;
1752                 if (!meta_sinfo->full) {
1753                         meta_sinfo->force_alloc = 1;
1754                         spin_unlock(&meta_sinfo->lock);
1755
1756                         trans = btrfs_start_transaction(root, 1);
1757                         if (!trans)
1758                                 return -ENOMEM;
1759
1760                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
1761                                              2 * 1024 * 1024, alloc_target, 0);
1762                         btrfs_end_transaction(trans, root);
1763                         goto again;
1764                 }
1765                 spin_unlock(&meta_sinfo->lock);
1766
1767                 if (!committed) {
1768                         committed = 1;
1769                         trans = btrfs_join_transaction(root, 1);
1770                         if (!trans)
1771                                 return -ENOMEM;
1772                         ret = btrfs_commit_transaction(trans, root);
1773                         if (ret)
1774                                 return ret;
1775                         goto again;
1776                 }
1777                 return -ENOSPC;
1778         }
1779         spin_unlock(&meta_sinfo->lock);
1780
1781         return 0;
1782 }
1783
1784 /*
1785  * This will check the space that the inode allocates from to make sure we have
1786  * enough space for bytes.
1787  */
1788 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
1789                                 u64 bytes)
1790 {
1791         struct btrfs_space_info *data_sinfo;
1792         int ret = 0, committed = 0;
1793
1794         /* make sure bytes are sectorsize aligned */
1795         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
1796
1797         data_sinfo = BTRFS_I(inode)->space_info;
1798 again:
1799         /* make sure we have enough space to handle the data first */
1800         spin_lock(&data_sinfo->lock);
1801         if (data_sinfo->total_bytes - data_sinfo->bytes_used -
1802             data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
1803             data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
1804             data_sinfo->bytes_may_use < bytes) {
1805                 struct btrfs_trans_handle *trans;
1806
1807                 /*
1808                  * if we don't have enough free bytes in this space then we need
1809                  * to alloc a new chunk.
1810                  */
1811                 if (!data_sinfo->full) {
1812                         u64 alloc_target;
1813
1814                         data_sinfo->force_alloc = 1;
1815                         spin_unlock(&data_sinfo->lock);
1816
1817                         alloc_target = btrfs_get_alloc_profile(root, 1);
1818                         trans = btrfs_start_transaction(root, 1);
1819                         if (!trans)
1820                                 return -ENOMEM;
1821
1822                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
1823                                              bytes + 2 * 1024 * 1024,
1824                                              alloc_target, 0);
1825                         btrfs_end_transaction(trans, root);
1826                         if (ret)
1827                                 return ret;
1828                         goto again;
1829                 }
1830                 spin_unlock(&data_sinfo->lock);
1831
1832                 /* commit the current transaction and try again */
1833                 if (!committed) {
1834                         committed = 1;
1835                         trans = btrfs_join_transaction(root, 1);
1836                         if (!trans)
1837                                 return -ENOMEM;
1838                         ret = btrfs_commit_transaction(trans, root);
1839                         if (ret)
1840                                 return ret;
1841                         goto again;
1842                 }
1843
1844                 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
1845                        ", %llu bytes_used, %llu bytes_reserved, "
1846                        "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
1847                        "%llu total\n", (unsigned long long)bytes,
1848                        (unsigned long long)data_sinfo->bytes_delalloc,
1849                        (unsigned long long)data_sinfo->bytes_used,
1850                        (unsigned long long)data_sinfo->bytes_reserved,
1851                        (unsigned long long)data_sinfo->bytes_pinned,
1852                        (unsigned long long)data_sinfo->bytes_readonly,
1853                        (unsigned long long)data_sinfo->bytes_may_use,
1854                        (unsigned long long)data_sinfo->total_bytes);
1855                 return -ENOSPC;
1856         }
1857         data_sinfo->bytes_may_use += bytes;
1858         BTRFS_I(inode)->reserved_bytes += bytes;
1859         spin_unlock(&data_sinfo->lock);
1860
1861         return btrfs_check_metadata_free_space(root);
1862 }
1863
1864 /*
1865  * if there was an error for whatever reason after calling
1866  * btrfs_check_data_free_space, call this so we can cleanup the counters.
1867  */
1868 void btrfs_free_reserved_data_space(struct btrfs_root *root,
1869                                     struct inode *inode, u64 bytes)
1870 {
1871         struct btrfs_space_info *data_sinfo;
1872
1873         /* make sure bytes are sectorsize aligned */
1874         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
1875
1876         data_sinfo = BTRFS_I(inode)->space_info;
1877         spin_lock(&data_sinfo->lock);
1878         data_sinfo->bytes_may_use -= bytes;
1879         BTRFS_I(inode)->reserved_bytes -= bytes;
1880         spin_unlock(&data_sinfo->lock);
1881 }
1882
1883 /* called when we are adding a delalloc extent to the inode's io_tree */
1884 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
1885                                   u64 bytes)
1886 {
1887         struct btrfs_space_info *data_sinfo;
1888
1889         /* get the space info for where this inode will be storing its data */
1890         data_sinfo = BTRFS_I(inode)->space_info;
1891
1892         /* make sure we have enough space to handle the data first */
1893         spin_lock(&data_sinfo->lock);
1894         data_sinfo->bytes_delalloc += bytes;
1895
1896         /*
1897          * we are adding a delalloc extent without calling
1898          * btrfs_check_data_free_space first.  This happens on a weird
1899          * writepage condition, but shouldn't hurt our accounting
1900          */
1901         if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
1902                 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
1903                 BTRFS_I(inode)->reserved_bytes = 0;
1904         } else {
1905                 data_sinfo->bytes_may_use -= bytes;
1906                 BTRFS_I(inode)->reserved_bytes -= bytes;
1907         }
1908
1909         spin_unlock(&data_sinfo->lock);
1910 }
1911
1912 /* called when we are clearing an delalloc extent from the inode's io_tree */
1913 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
1914                               u64 bytes)
1915 {
1916         struct btrfs_space_info *info;
1917
1918         info = BTRFS_I(inode)->space_info;
1919
1920         spin_lock(&info->lock);
1921         info->bytes_delalloc -= bytes;
1922         spin_unlock(&info->lock);
1923 }
1924
1925 static void force_metadata_allocation(struct btrfs_fs_info *info)
1926 {
1927         struct list_head *head = &info->space_info;
1928         struct btrfs_space_info *found;
1929
1930         rcu_read_lock();
1931         list_for_each_entry_rcu(found, head, list) {
1932                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
1933                         found->force_alloc = 1;
1934         }
1935         rcu_read_unlock();
1936 }
1937
1938 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1939                           struct btrfs_root *extent_root, u64 alloc_bytes,
1940                           u64 flags, int force)
1941 {
1942         struct btrfs_space_info *space_info;
1943         struct btrfs_fs_info *fs_info = extent_root->fs_info;
1944         u64 thresh;
1945         int ret = 0;
1946
1947         mutex_lock(&fs_info->chunk_mutex);
1948
1949         flags = btrfs_reduce_alloc_profile(extent_root, flags);
1950
1951         space_info = __find_space_info(extent_root->fs_info, flags);
1952         if (!space_info) {
1953                 ret = update_space_info(extent_root->fs_info, flags,
1954                                         0, 0, &space_info);
1955                 BUG_ON(ret);
1956         }
1957         BUG_ON(!space_info);
1958
1959         spin_lock(&space_info->lock);
1960         if (space_info->force_alloc) {
1961                 force = 1;
1962                 space_info->force_alloc = 0;
1963         }
1964         if (space_info->full) {
1965                 spin_unlock(&space_info->lock);
1966                 goto out;
1967         }
1968
1969         thresh = space_info->total_bytes - space_info->bytes_readonly;
1970         thresh = div_factor(thresh, 6);
1971         if (!force &&
1972            (space_info->bytes_used + space_info->bytes_pinned +
1973             space_info->bytes_reserved + alloc_bytes) < thresh) {
1974                 spin_unlock(&space_info->lock);
1975                 goto out;
1976         }
1977         spin_unlock(&space_info->lock);
1978
1979         /*
1980          * if we're doing a data chunk, go ahead and make sure that
1981          * we keep a reasonable number of metadata chunks allocated in the
1982          * FS as well.
1983          */
1984         if (flags & BTRFS_BLOCK_GROUP_DATA) {
1985                 fs_info->data_chunk_allocations++;
1986                 if (!(fs_info->data_chunk_allocations %
1987                       fs_info->metadata_ratio))
1988                         force_metadata_allocation(fs_info);
1989         }
1990
1991         ret = btrfs_alloc_chunk(trans, extent_root, flags);
1992         if (ret)
1993                 space_info->full = 1;
1994 out:
1995         mutex_unlock(&extent_root->fs_info->chunk_mutex);
1996         return ret;
1997 }
1998
1999 static int update_block_group(struct btrfs_trans_handle *trans,
2000                               struct btrfs_root *root,
2001                               u64 bytenr, u64 num_bytes, int alloc,
2002                               int mark_free)
2003 {
2004         struct btrfs_block_group_cache *cache;
2005         struct btrfs_fs_info *info = root->fs_info;
2006         u64 total = num_bytes;
2007         u64 old_val;
2008         u64 byte_in_group;
2009
2010         while (total) {
2011                 cache = btrfs_lookup_block_group(info, bytenr);
2012                 if (!cache)
2013                         return -1;
2014                 byte_in_group = bytenr - cache->key.objectid;
2015                 WARN_ON(byte_in_group > cache->key.offset);
2016
2017                 spin_lock(&cache->space_info->lock);
2018                 spin_lock(&cache->lock);
2019                 cache->dirty = 1;
2020                 old_val = btrfs_block_group_used(&cache->item);
2021                 num_bytes = min(total, cache->key.offset - byte_in_group);
2022                 if (alloc) {
2023                         old_val += num_bytes;
2024                         cache->space_info->bytes_used += num_bytes;
2025                         if (cache->ro)
2026                                 cache->space_info->bytes_readonly -= num_bytes;
2027                         btrfs_set_block_group_used(&cache->item, old_val);
2028                         spin_unlock(&cache->lock);
2029                         spin_unlock(&cache->space_info->lock);
2030                 } else {
2031                         old_val -= num_bytes;
2032                         cache->space_info->bytes_used -= num_bytes;
2033                         if (cache->ro)
2034                                 cache->space_info->bytes_readonly += num_bytes;
2035                         btrfs_set_block_group_used(&cache->item, old_val);
2036                         spin_unlock(&cache->lock);
2037                         spin_unlock(&cache->space_info->lock);
2038                         if (mark_free) {
2039                                 int ret;
2040
2041                                 ret = btrfs_discard_extent(root, bytenr,
2042                                                            num_bytes);
2043                                 WARN_ON(ret);
2044
2045                                 ret = btrfs_add_free_space(cache, bytenr,
2046                                                            num_bytes);
2047                                 WARN_ON(ret);
2048                         }
2049                 }
2050                 btrfs_put_block_group(cache);
2051                 total -= num_bytes;
2052                 bytenr += num_bytes;
2053         }
2054         return 0;
2055 }
2056
2057 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
2058 {
2059         struct btrfs_block_group_cache *cache;
2060         u64 bytenr;
2061
2062         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
2063         if (!cache)
2064                 return 0;
2065
2066         bytenr = cache->key.objectid;
2067         btrfs_put_block_group(cache);
2068
2069         return bytenr;
2070 }
2071
2072 int btrfs_update_pinned_extents(struct btrfs_root *root,
2073                                 u64 bytenr, u64 num, int pin)
2074 {
2075         u64 len;
2076         struct btrfs_block_group_cache *cache;
2077         struct btrfs_fs_info *fs_info = root->fs_info;
2078
2079         if (pin) {
2080                 set_extent_dirty(&fs_info->pinned_extents,
2081                                 bytenr, bytenr + num - 1, GFP_NOFS);
2082         } else {
2083                 clear_extent_dirty(&fs_info->pinned_extents,
2084                                 bytenr, bytenr + num - 1, GFP_NOFS);
2085         }
2086
2087         while (num > 0) {
2088                 cache = btrfs_lookup_block_group(fs_info, bytenr);
2089                 BUG_ON(!cache);
2090                 len = min(num, cache->key.offset -
2091                           (bytenr - cache->key.objectid));
2092                 if (pin) {
2093                         spin_lock(&cache->space_info->lock);
2094                         spin_lock(&cache->lock);
2095                         cache->pinned += len;
2096                         cache->space_info->bytes_pinned += len;
2097                         spin_unlock(&cache->lock);
2098                         spin_unlock(&cache->space_info->lock);
2099                         fs_info->total_pinned += len;
2100                 } else {
2101                         spin_lock(&cache->space_info->lock);
2102                         spin_lock(&cache->lock);
2103                         cache->pinned -= len;
2104                         cache->space_info->bytes_pinned -= len;
2105                         spin_unlock(&cache->lock);
2106                         spin_unlock(&cache->space_info->lock);
2107                         fs_info->total_pinned -= len;
2108                         if (cache->cached)
2109                                 btrfs_add_free_space(cache, bytenr, len);
2110                 }
2111                 btrfs_put_block_group(cache);
2112                 bytenr += len;
2113                 num -= len;
2114         }
2115         return 0;
2116 }
2117
2118 static int update_reserved_extents(struct btrfs_root *root,
2119                                    u64 bytenr, u64 num, int reserve)
2120 {
2121         u64 len;
2122         struct btrfs_block_group_cache *cache;
2123         struct btrfs_fs_info *fs_info = root->fs_info;
2124
2125         while (num > 0) {
2126                 cache = btrfs_lookup_block_group(fs_info, bytenr);
2127                 BUG_ON(!cache);
2128                 len = min(num, cache->key.offset -
2129                           (bytenr - cache->key.objectid));
2130
2131                 spin_lock(&cache->space_info->lock);
2132                 spin_lock(&cache->lock);
2133                 if (reserve) {
2134                         cache->reserved += len;
2135                         cache->space_info->bytes_reserved += len;
2136                 } else {
2137                         cache->reserved -= len;
2138                         cache->space_info->bytes_reserved -= len;
2139                 }
2140                 spin_unlock(&cache->lock);
2141                 spin_unlock(&cache->space_info->lock);
2142                 btrfs_put_block_group(cache);
2143                 bytenr += len;
2144                 num -= len;
2145         }
2146         return 0;
2147 }
2148
2149 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
2150 {
2151         u64 last = 0;
2152         u64 start;
2153         u64 end;
2154         struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
2155         int ret;
2156
2157         while (1) {
2158                 ret = find_first_extent_bit(pinned_extents, last,
2159                                             &start, &end, EXTENT_DIRTY);
2160                 if (ret)
2161                         break;
2162                 set_extent_dirty(copy, start, end, GFP_NOFS);
2163                 last = end + 1;
2164         }
2165         return 0;
2166 }
2167
2168 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2169                                struct btrfs_root *root,
2170                                struct extent_io_tree *unpin)
2171 {
2172         u64 start;
2173         u64 end;
2174         int ret;
2175
2176         while (1) {
2177                 ret = find_first_extent_bit(unpin, 0, &start, &end,
2178                                             EXTENT_DIRTY);
2179                 if (ret)
2180                         break;
2181
2182                 ret = btrfs_discard_extent(root, start, end + 1 - start);
2183
2184                 /* unlocks the pinned mutex */
2185                 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
2186                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2187
2188                 cond_resched();
2189         }
2190         return ret;
2191 }
2192
2193 static int pin_down_bytes(struct btrfs_trans_handle *trans,
2194                           struct btrfs_root *root,
2195                           struct btrfs_path *path,
2196                           u64 bytenr, u64 num_bytes, int is_data,
2197                           struct extent_buffer **must_clean)
2198 {
2199         int err = 0;
2200         struct extent_buffer *buf;
2201
2202         if (is_data)
2203                 goto pinit;
2204
2205         buf = btrfs_find_tree_block(root, bytenr, num_bytes);
2206         if (!buf)
2207                 goto pinit;
2208
2209         /* we can reuse a block if it hasn't been written
2210          * and it is from this transaction.  We can't
2211          * reuse anything from the tree log root because
2212          * it has tiny sub-transactions.
2213          */
2214         if (btrfs_buffer_uptodate(buf, 0) &&
2215             btrfs_try_tree_lock(buf)) {
2216                 u64 header_owner = btrfs_header_owner(buf);
2217                 u64 header_transid = btrfs_header_generation(buf);
2218                 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
2219                     header_owner != BTRFS_TREE_RELOC_OBJECTID &&
2220                     header_owner != BTRFS_DATA_RELOC_TREE_OBJECTID &&
2221                     header_transid == trans->transid &&
2222                     !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2223                         *must_clean = buf;
2224                         return 1;
2225                 }
2226                 btrfs_tree_unlock(buf);
2227         }
2228         free_extent_buffer(buf);
2229 pinit:
2230         btrfs_set_path_blocking(path);
2231         /* unlocks the pinned mutex */
2232         btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2233
2234         BUG_ON(err < 0);
2235         return 0;
2236 }
2237
2238 /*
2239  * remove an extent from the root, returns 0 on success
2240  */
2241 static int __free_extent(struct btrfs_trans_handle *trans,
2242                          struct btrfs_root *root,
2243                          u64 bytenr, u64 num_bytes, u64 parent,
2244                          u64 root_objectid, u64 ref_generation,
2245                          u64 owner_objectid, int pin, int mark_free,
2246                          int refs_to_drop)
2247 {
2248         struct btrfs_path *path;
2249         struct btrfs_key key;
2250         struct btrfs_fs_info *info = root->fs_info;
2251         struct btrfs_root *extent_root = info->extent_root;
2252         struct extent_buffer *leaf;
2253         int ret;
2254         int extent_slot = 0;
2255         int found_extent = 0;
2256         int num_to_del = 1;
2257         struct btrfs_extent_item *ei;
2258         u32 refs;
2259
2260         key.objectid = bytenr;
2261         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
2262         key.offset = num_bytes;
2263         path = btrfs_alloc_path();
2264         if (!path)
2265                 return -ENOMEM;
2266
2267         path->reada = 1;
2268         path->leave_spinning = 1;
2269         ret = lookup_extent_backref(trans, extent_root, path,
2270                                     bytenr, parent, root_objectid,
2271                                     ref_generation, owner_objectid, 1);
2272         if (ret == 0) {
2273                 struct btrfs_key found_key;
2274                 extent_slot = path->slots[0];
2275                 while (extent_slot > 0) {
2276                         extent_slot--;
2277                         btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2278                                               extent_slot);
2279                         if (found_key.objectid != bytenr)
2280                                 break;
2281                         if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
2282                             found_key.offset == num_bytes) {
2283                                 found_extent = 1;
2284                                 break;
2285                         }
2286                         if (path->slots[0] - extent_slot > 5)
2287                                 break;
2288                 }
2289                 if (!found_extent) {
2290                         ret = remove_extent_backref(trans, extent_root, path,
2291                                                     refs_to_drop);
2292                         BUG_ON(ret);
2293                         btrfs_release_path(extent_root, path);
2294                         path->leave_spinning = 1;
2295                         ret = btrfs_search_slot(trans, extent_root,
2296                                                 &key, path, -1, 1);
2297                         if (ret) {
2298                                 printk(KERN_ERR "umm, got %d back from search"
2299                                        ", was looking for %llu\n", ret,
2300                                        (unsigned long long)bytenr);
2301                                 btrfs_print_leaf(extent_root, path->nodes[0]);
2302                         }
2303                         BUG_ON(ret);
2304                         extent_slot = path->slots[0];
2305                 }
2306         } else {
2307                 btrfs_print_leaf(extent_root, path->nodes[0]);
2308                 WARN_ON(1);
2309                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
2310                        "parent %llu root %llu gen %llu owner %llu\n",
2311                        (unsigned long long)bytenr,
2312                        (unsigned long long)parent,
2313                        (unsigned long long)root_objectid,
2314                        (unsigned long long)ref_generation,
2315                        (unsigned long long)owner_objectid);
2316         }
2317
2318         leaf = path->nodes[0];
2319         ei = btrfs_item_ptr(leaf, extent_slot,
2320                             struct btrfs_extent_item);
2321         refs = btrfs_extent_refs(leaf, ei);
2322
2323         /*
2324          * we're not allowed to delete the extent item if there
2325          * are other delayed ref updates pending
2326          */
2327
2328         BUG_ON(refs < refs_to_drop);
2329         refs -= refs_to_drop;
2330         btrfs_set_extent_refs(leaf, ei, refs);
2331         btrfs_mark_buffer_dirty(leaf);
2332
2333         if (refs == 0 && found_extent &&
2334             path->slots[0] == extent_slot + 1) {
2335                 struct btrfs_extent_ref *ref;
2336                 ref = btrfs_item_ptr(leaf, path->slots[0],
2337                                      struct btrfs_extent_ref);
2338                 BUG_ON(btrfs_ref_num_refs(leaf, ref) != refs_to_drop);
2339                 /* if the back ref and the extent are next to each other
2340                  * they get deleted below in one shot
2341                  */
2342                 path->slots[0] = extent_slot;
2343                 num_to_del = 2;
2344         } else if (found_extent) {
2345                 /* otherwise delete the extent back ref */
2346                 ret = remove_extent_backref(trans, extent_root, path,
2347                                             refs_to_drop);
2348                 BUG_ON(ret);
2349                 /* if refs are 0, we need to setup the path for deletion */
2350                 if (refs == 0) {
2351                         btrfs_release_path(extent_root, path);
2352                         path->leave_spinning = 1;
2353                         ret = btrfs_search_slot(trans, extent_root, &key, path,
2354                                                 -1, 1);
2355                         BUG_ON(ret);
2356                 }
2357         }
2358
2359         if (refs == 0) {
2360                 u64 super_used;
2361                 u64 root_used;
2362                 struct extent_buffer *must_clean = NULL;
2363
2364                 if (pin) {
2365                         ret = pin_down_bytes(trans, root, path,
2366                                 bytenr, num_bytes,
2367                                 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID,
2368                                 &must_clean);
2369                         if (ret > 0)
2370                                 mark_free = 1;
2371                         BUG_ON(ret < 0);
2372                 }
2373
2374                 /* block accounting for super block */
2375                 spin_lock(&info->delalloc_lock);
2376                 super_used = btrfs_super_bytes_used(&info->super_copy);
2377                 btrfs_set_super_bytes_used(&info->super_copy,
2378                                            super_used - num_bytes);
2379
2380                 /* block accounting for root item */
2381                 root_used = btrfs_root_used(&root->root_item);
2382                 btrfs_set_root_used(&root->root_item,
2383                                            root_used - num_bytes);
2384                 spin_unlock(&info->delalloc_lock);
2385
2386                 /*
2387                  * it is going to be very rare for someone to be waiting
2388                  * on the block we're freeing.  del_items might need to
2389                  * schedule, so rather than get fancy, just force it
2390                  * to blocking here
2391                  */
2392                 if (must_clean)
2393                         btrfs_set_lock_blocking(must_clean);
2394
2395                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
2396                                       num_to_del);
2397                 BUG_ON(ret);
2398                 btrfs_release_path(extent_root, path);
2399
2400                 if (must_clean) {
2401                         clean_tree_block(NULL, root, must_clean);
2402                         btrfs_tree_unlock(must_clean);
2403                         free_extent_buffer(must_clean);
2404                 }
2405
2406                 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2407                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
2408                         BUG_ON(ret);
2409                 } else {
2410                         invalidate_mapping_pages(info->btree_inode->i_mapping,
2411                              bytenr >> PAGE_CACHE_SHIFT,
2412                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
2413                 }
2414
2415                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
2416                                          mark_free);
2417                 BUG_ON(ret);
2418         }
2419         btrfs_free_path(path);
2420         return ret;
2421 }
2422
2423 /*
2424  * remove an extent from the root, returns 0 on success
2425  */
2426 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2427                                         struct btrfs_root *root,
2428                                         u64 bytenr, u64 num_bytes, u64 parent,
2429                                         u64 root_objectid, u64 ref_generation,
2430                                         u64 owner_objectid, int pin,
2431                                         int refs_to_drop)
2432 {
2433         WARN_ON(num_bytes < root->sectorsize);
2434
2435         /*
2436          * if metadata always pin
2437          * if data pin when any transaction has committed this
2438          */
2439         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID ||
2440             ref_generation != trans->transid)
2441                 pin = 1;
2442
2443         if (ref_generation != trans->transid)
2444                 pin = 1;
2445
2446         return __free_extent(trans, root, bytenr, num_bytes, parent,
2447                             root_objectid, ref_generation,
2448                             owner_objectid, pin, pin == 0, refs_to_drop);
2449 }
2450
2451 /*
2452  * when we free an extent, it is possible (and likely) that we free the last
2453  * delayed ref for that extent as well.  This searches the delayed ref tree for
2454  * a given extent, and if there are no other delayed refs to be processed, it
2455  * removes it from the tree.
2456  */
2457 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
2458                                       struct btrfs_root *root, u64 bytenr)
2459 {
2460         struct btrfs_delayed_ref_head *head;
2461         struct btrfs_delayed_ref_root *delayed_refs;
2462         struct btrfs_delayed_ref_node *ref;
2463         struct rb_node *node;
2464         int ret;
2465
2466         delayed_refs = &trans->transaction->delayed_refs;
2467         spin_lock(&delayed_refs->lock);
2468         head = btrfs_find_delayed_ref_head(trans, bytenr);
2469         if (!head)
2470                 goto out;
2471
2472         node = rb_prev(&head->node.rb_node);
2473         if (!node)
2474                 goto out;
2475
2476         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2477
2478         /* there are still entries for this ref, we can't drop it */
2479         if (ref->bytenr == bytenr)
2480                 goto out;
2481
2482         /*
2483          * waiting for the lock here would deadlock.  If someone else has it
2484          * locked they are already in the process of dropping it anyway
2485          */
2486         if (!mutex_trylock(&head->mutex))
2487                 goto out;
2488
2489         /*
2490          * at this point we have a head with no other entries.  Go
2491          * ahead and process it.
2492          */
2493         head->node.in_tree = 0;
2494         rb_erase(&head->node.rb_node, &delayed_refs->root);
2495
2496         delayed_refs->num_entries--;
2497
2498         /*
2499          * we don't take a ref on the node because we're removing it from the
2500          * tree, so we just steal the ref the tree was holding.
2501          */
2502         delayed_refs->num_heads--;
2503         if (list_empty(&head->cluster))
2504                 delayed_refs->num_heads_ready--;
2505
2506         list_del_init(&head->cluster);
2507         spin_unlock(&delayed_refs->lock);
2508
2509         ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
2510                                   &head->node, head->must_insert_reserved);
2511         BUG_ON(ret);
2512         btrfs_put_delayed_ref(&head->node);
2513         return 0;
2514 out:
2515         spin_unlock(&delayed_refs->lock);
2516         return 0;
2517 }
2518
2519 int btrfs_free_extent(struct btrfs_trans_handle *trans,
2520                       struct btrfs_root *root,
2521                       u64 bytenr, u64 num_bytes, u64 parent,
2522                       u64 root_objectid, u64 ref_generation,
2523                       u64 owner_objectid, int pin)
2524 {
2525         int ret;
2526
2527         /*
2528          * tree log blocks never actually go into the extent allocation
2529          * tree, just update pinning info and exit early.
2530          *
2531          * data extents referenced by the tree log do need to have
2532          * their reference counts bumped.
2533          */
2534         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID &&
2535             owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2536                 /* unlocks the pinned mutex */
2537                 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2538                 update_reserved_extents(root, bytenr, num_bytes, 0);
2539                 ret = 0;
2540         } else {
2541                 ret = btrfs_add_delayed_ref(trans, bytenr, num_bytes, parent,
2542                                        root_objectid, ref_generation,
2543                                        owner_objectid,
2544                                        BTRFS_DROP_DELAYED_REF, 1);
2545                 BUG_ON(ret);
2546                 ret = check_ref_cleanup(trans, root, bytenr);
2547                 BUG_ON(ret);
2548         }
2549         return ret;
2550 }
2551
2552 static u64 stripe_align(struct btrfs_root *root, u64 val)
2553 {
2554         u64 mask = ((u64)root->stripesize - 1);
2555         u64 ret = (val + mask) & ~mask;
2556         return ret;
2557 }
2558
2559 /*
2560  * walks the btree of allocated extents and find a hole of a given size.
2561  * The key ins is changed to record the hole:
2562  * ins->objectid == block start
2563  * ins->flags = BTRFS_EXTENT_ITEM_KEY
2564  * ins->offset == number of blocks
2565  * Any available blocks before search_start are skipped.
2566  */
2567 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2568                                      struct btrfs_root *orig_root,
2569                                      u64 num_bytes, u64 empty_size,
2570                                      u64 search_start, u64 search_end,
2571                                      u64 hint_byte, struct btrfs_key *ins,
2572                                      u64 exclude_start, u64 exclude_nr,
2573                                      int data)
2574 {
2575         int ret = 0;
2576         struct btrfs_root *root = orig_root->fs_info->extent_root;
2577         struct btrfs_free_cluster *last_ptr = NULL;
2578         struct btrfs_block_group_cache *block_group = NULL;
2579         int empty_cluster = 2 * 1024 * 1024;
2580         int allowed_chunk_alloc = 0;
2581         struct btrfs_space_info *space_info;
2582         int last_ptr_loop = 0;
2583         int loop = 0;
2584
2585         WARN_ON(num_bytes < root->sectorsize);
2586         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2587         ins->objectid = 0;
2588         ins->offset = 0;
2589
2590         space_info = __find_space_info(root->fs_info, data);
2591
2592         if (orig_root->ref_cows || empty_size)
2593                 allowed_chunk_alloc = 1;
2594
2595         if (data & BTRFS_BLOCK_GROUP_METADATA) {
2596                 last_ptr = &root->fs_info->meta_alloc_cluster;
2597                 if (!btrfs_test_opt(root, SSD))
2598                         empty_cluster = 64 * 1024;
2599         }
2600
2601         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
2602                 last_ptr = &root->fs_info->data_alloc_cluster;
2603         }
2604
2605         if (last_ptr) {
2606                 spin_lock(&last_ptr->lock);
2607                 if (last_ptr->block_group)
2608                         hint_byte = last_ptr->window_start;
2609                 spin_unlock(&last_ptr->lock);
2610         }
2611
2612         search_start = max(search_start, first_logical_byte(root, 0));
2613         search_start = max(search_start, hint_byte);
2614
2615         if (!last_ptr) {
2616                 empty_cluster = 0;
2617                 loop = 1;
2618         }
2619
2620         if (search_start == hint_byte) {
2621                 block_group = btrfs_lookup_block_group(root->fs_info,
2622                                                        search_start);
2623                 if (block_group && block_group_bits(block_group, data)) {
2624                         down_read(&space_info->groups_sem);
2625                         if (list_empty(&block_group->list) ||
2626                             block_group->ro) {
2627                                 /*
2628                                  * someone is removing this block group,
2629                                  * we can't jump into the have_block_group
2630                                  * target because our list pointers are not
2631                                  * valid
2632                                  */
2633                                 btrfs_put_block_group(block_group);
2634                                 up_read(&space_info->groups_sem);
2635                         } else
2636                                 goto have_block_group;
2637                 } else if (block_group) {
2638                         btrfs_put_block_group(block_group);
2639                 }
2640         }
2641
2642 search:
2643         down_read(&space_info->groups_sem);
2644         list_for_each_entry(block_group, &space_info->block_groups, list) {
2645                 u64 offset;
2646
2647                 atomic_inc(&block_group->count);
2648                 search_start = block_group->key.objectid;
2649
2650 have_block_group:
2651                 if (unlikely(!block_group->cached)) {
2652                         mutex_lock(&block_group->cache_mutex);
2653                         ret = cache_block_group(root, block_group);
2654                         mutex_unlock(&block_group->cache_mutex);
2655                         if (ret) {
2656                                 btrfs_put_block_group(block_group);
2657                                 break;
2658                         }
2659                 }
2660
2661                 if (unlikely(block_group->ro))
2662                         goto loop;
2663
2664                 if (last_ptr) {
2665                         /*
2666                          * the refill lock keeps out other
2667                          * people trying to start a new cluster
2668                          */
2669                         spin_lock(&last_ptr->refill_lock);
2670                         if (last_ptr->block_group &&
2671                             (last_ptr->block_group->ro ||
2672                             !block_group_bits(last_ptr->block_group, data))) {
2673                                 offset = 0;
2674                                 goto refill_cluster;
2675                         }
2676
2677                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
2678                                                  num_bytes, search_start);
2679                         if (offset) {
2680                                 /* we have a block, we're done */
2681                                 spin_unlock(&last_ptr->refill_lock);
2682                                 goto checks;
2683                         }
2684
2685                         spin_lock(&last_ptr->lock);
2686                         /*
2687                          * whoops, this cluster doesn't actually point to
2688                          * this block group.  Get a ref on the block
2689                          * group is does point to and try again
2690                          */
2691                         if (!last_ptr_loop && last_ptr->block_group &&
2692                             last_ptr->block_group != block_group) {
2693
2694                                 btrfs_put_block_group(block_group);
2695                                 block_group = last_ptr->block_group;
2696                                 atomic_inc(&block_group->count);
2697                                 spin_unlock(&last_ptr->lock);
2698                                 spin_unlock(&last_ptr->refill_lock);
2699
2700                                 last_ptr_loop = 1;
2701                                 search_start = block_group->key.objectid;
2702                                 /*
2703                                  * we know this block group is properly
2704                                  * in the list because
2705                                  * btrfs_remove_block_group, drops the
2706                                  * cluster before it removes the block
2707                                  * group from the list
2708                                  */
2709                                 goto have_block_group;
2710                         }
2711                         spin_unlock(&last_ptr->lock);
2712 refill_cluster:
2713                         /*
2714                          * this cluster didn't work out, free it and
2715                          * start over
2716                          */
2717                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
2718
2719                         last_ptr_loop = 0;
2720
2721                         /* allocate a cluster in this block group */
2722                         ret = btrfs_find_space_cluster(trans,
2723                                                block_group, last_ptr,
2724                                                offset, num_bytes,
2725                                                empty_cluster + empty_size);
2726                         if (ret == 0) {
2727                                 /*
2728                                  * now pull our allocation out of this
2729                                  * cluster
2730                                  */
2731                                 offset = btrfs_alloc_from_cluster(block_group,
2732                                                   last_ptr, num_bytes,
2733                                                   search_start);
2734                                 if (offset) {
2735                                         /* we found one, proceed */
2736                                         spin_unlock(&last_ptr->refill_lock);
2737                                         goto checks;
2738                                 }
2739                         }
2740                         /*
2741                          * at this point we either didn't find a cluster
2742                          * or we weren't able to allocate a block from our
2743                          * cluster.  Free the cluster we've been trying
2744                          * to use, and go to the next block group
2745                          */
2746                         if (loop < 2) {
2747                                 btrfs_return_cluster_to_free_space(NULL,
2748                                                                    last_ptr);
2749                                 spin_unlock(&last_ptr->refill_lock);
2750                                 goto loop;
2751                         }
2752                         spin_unlock(&last_ptr->refill_lock);
2753                 }
2754
2755                 offset = btrfs_find_space_for_alloc(block_group, search_start,
2756                                                     num_bytes, empty_size);
2757                 if (!offset)
2758                         goto loop;
2759 checks:
2760                 search_start = stripe_align(root, offset);
2761
2762                 /* move on to the next group */
2763                 if (search_start + num_bytes >= search_end) {
2764                         btrfs_add_free_space(block_group, offset, num_bytes);
2765                         goto loop;
2766                 }
2767
2768                 /* move on to the next group */
2769                 if (search_start + num_bytes >
2770                     block_group->key.objectid + block_group->key.offset) {
2771                         btrfs_add_free_space(block_group, offset, num_bytes);
2772                         goto loop;
2773                 }
2774
2775                 if (exclude_nr > 0 &&
2776                     (search_start + num_bytes > exclude_start &&
2777                      search_start < exclude_start + exclude_nr)) {
2778                         search_start = exclude_start + exclude_nr;
2779
2780                         btrfs_add_free_space(block_group, offset, num_bytes);
2781                         /*
2782                          * if search_start is still in this block group
2783                          * then we just re-search this block group
2784                          */
2785                         if (search_start >= block_group->key.objectid &&
2786                             search_start < (block_group->key.objectid +
2787                                             block_group->key.offset))
2788                                 goto have_block_group;
2789                         goto loop;
2790                 }
2791
2792                 ins->objectid = search_start;
2793                 ins->offset = num_bytes;
2794
2795                 if (offset < search_start)
2796                         btrfs_add_free_space(block_group, offset,
2797                                              search_start - offset);
2798                 BUG_ON(offset > search_start);
2799
2800                 /* we are all good, lets return */
2801                 break;
2802 loop:
2803                 btrfs_put_block_group(block_group);
2804         }
2805         up_read(&space_info->groups_sem);
2806
2807         /* loop == 0, try to find a clustered alloc in every block group
2808          * loop == 1, try again after forcing a chunk allocation
2809          * loop == 2, set empty_size and empty_cluster to 0 and try again
2810          */
2811         if (!ins->objectid && loop < 3 &&
2812             (empty_size || empty_cluster || allowed_chunk_alloc)) {
2813                 if (loop >= 2) {
2814                         empty_size = 0;
2815                         empty_cluster = 0;
2816                 }
2817
2818                 if (allowed_chunk_alloc) {
2819                         ret = do_chunk_alloc(trans, root, num_bytes +
2820                                              2 * 1024 * 1024, data, 1);
2821                         allowed_chunk_alloc = 0;
2822                 } else {
2823                         space_info->force_alloc = 1;
2824                 }
2825
2826                 if (loop < 3) {
2827                         loop++;
2828                         goto search;
2829                 }
2830                 ret = -ENOSPC;
2831         } else if (!ins->objectid) {
2832                 ret = -ENOSPC;
2833         }
2834
2835         /* we found what we needed */
2836         if (ins->objectid) {
2837                 if (!(data & BTRFS_BLOCK_GROUP_DATA))
2838                         trans->block_group = block_group->key.objectid;
2839
2840                 btrfs_put_block_group(block_group);
2841                 ret = 0;
2842         }
2843
2844         return ret;
2845 }
2846
2847 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
2848 {
2849         struct btrfs_block_group_cache *cache;
2850
2851         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
2852                (unsigned long long)(info->total_bytes - info->bytes_used -
2853                                     info->bytes_pinned - info->bytes_reserved),
2854                (info->full) ? "" : "not ");
2855         printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
2856                " may_use=%llu, used=%llu\n",
2857                (unsigned long long)info->total_bytes,
2858                (unsigned long long)info->bytes_pinned,
2859                (unsigned long long)info->bytes_delalloc,
2860                (unsigned long long)info->bytes_may_use,
2861                (unsigned long long)info->bytes_used);
2862
2863         down_read(&info->groups_sem);
2864         list_for_each_entry(cache, &info->block_groups, list) {
2865                 spin_lock(&cache->lock);
2866                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
2867                        "%llu pinned %llu reserved\n",
2868                        (unsigned long long)cache->key.objectid,
2869                        (unsigned long long)cache->key.offset,
2870                        (unsigned long long)btrfs_block_group_used(&cache->item),
2871                        (unsigned long long)cache->pinned,
2872                        (unsigned long long)cache->reserved);
2873                 btrfs_dump_free_space(cache, bytes);
2874                 spin_unlock(&cache->lock);
2875         }
2876         up_read(&info->groups_sem);
2877 }
2878
2879 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2880                                   struct btrfs_root *root,
2881                                   u64 num_bytes, u64 min_alloc_size,
2882                                   u64 empty_size, u64 hint_byte,
2883                                   u64 search_end, struct btrfs_key *ins,
2884                                   u64 data)
2885 {
2886         int ret;
2887         u64 search_start = 0;
2888         struct btrfs_fs_info *info = root->fs_info;
2889
2890         data = btrfs_get_alloc_profile(root, data);
2891 again:
2892         /*
2893          * the only place that sets empty_size is btrfs_realloc_node, which
2894          * is not called recursively on allocations
2895          */
2896         if (empty_size || root->ref_cows) {
2897                 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2898                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2899                                      2 * 1024 * 1024,
2900                                      BTRFS_BLOCK_GROUP_METADATA |
2901                                      (info->metadata_alloc_profile &
2902                                       info->avail_metadata_alloc_bits), 0);
2903                 }
2904                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2905                                      num_bytes + 2 * 1024 * 1024, data, 0);
2906         }
2907
2908         WARN_ON(num_bytes < root->sectorsize);
2909         ret = find_free_extent(trans, root, num_bytes, empty_size,
2910                                search_start, search_end, hint_byte, ins,
2911                                trans->alloc_exclude_start,
2912                                trans->alloc_exclude_nr, data);
2913
2914         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2915                 num_bytes = num_bytes >> 1;
2916                 num_bytes = num_bytes & ~(root->sectorsize - 1);
2917                 num_bytes = max(num_bytes, min_alloc_size);
2918                 do_chunk_alloc(trans, root->fs_info->extent_root,
2919                                num_bytes, data, 1);
2920                 goto again;
2921         }
2922         if (ret) {
2923                 struct btrfs_space_info *sinfo;
2924
2925                 sinfo = __find_space_info(root->fs_info, data);
2926                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
2927                        "wanted %llu\n", (unsigned long long)data,
2928                        (unsigned long long)num_bytes);
2929                 dump_space_info(sinfo, num_bytes);
2930                 BUG();
2931         }
2932
2933         return ret;
2934 }
2935
2936 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2937 {
2938         struct btrfs_block_group_cache *cache;
2939         int ret = 0;
2940
2941         cache = btrfs_lookup_block_group(root->fs_info, start);
2942         if (!cache) {
2943                 printk(KERN_ERR "Unable to find block group for %llu\n",
2944                        (unsigned long long)start);
2945                 return -ENOSPC;
2946         }
2947
2948         ret = btrfs_discard_extent(root, start, len);
2949
2950         btrfs_add_free_space(cache, start, len);
2951         btrfs_put_block_group(cache);
2952         update_reserved_extents(root, start, len, 0);
2953
2954         return ret;
2955 }
2956
2957 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2958                                   struct btrfs_root *root,
2959                                   u64 num_bytes, u64 min_alloc_size,
2960                                   u64 empty_size, u64 hint_byte,
2961                                   u64 search_end, struct btrfs_key *ins,
2962                                   u64 data)
2963 {
2964         int ret;
2965         ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2966                                      empty_size, hint_byte, search_end, ins,
2967                                      data);
2968         update_reserved_extents(root, ins->objectid, ins->offset, 1);
2969         return ret;
2970 }
2971
2972 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2973                                          struct btrfs_root *root, u64 parent,
2974                                          u64 root_objectid, u64 ref_generation,
2975                                          u64 owner, struct btrfs_key *ins,
2976                                          int ref_mod)
2977 {
2978         int ret;
2979         u64 super_used;
2980         u64 root_used;
2981         u64 num_bytes = ins->offset;
2982         u32 sizes[2];
2983         struct btrfs_fs_info *info = root->fs_info;
2984         struct btrfs_root *extent_root = info->extent_root;
2985         struct btrfs_extent_item *extent_item;
2986         struct btrfs_extent_ref *ref;
2987         struct btrfs_path *path;
2988         struct btrfs_key keys[2];
2989
2990         if (parent == 0)
2991                 parent = ins->objectid;
2992
2993         /* block accounting for super block */
2994         spin_lock(&info->delalloc_lock);
2995         super_used = btrfs_super_bytes_used(&info->super_copy);
2996         btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2997
2998         /* block accounting for root item */
2999         root_used = btrfs_root_used(&root->root_item);
3000         btrfs_set_root_used(&root->root_item, root_used + num_bytes);
3001         spin_unlock(&info->delalloc_lock);
3002
3003         memcpy(&keys[0], ins, sizeof(*ins));
3004         keys[1].objectid = ins->objectid;
3005         keys[1].type = BTRFS_EXTENT_REF_KEY;
3006         keys[1].offset = parent;
3007         sizes[0] = sizeof(*extent_item);
3008         sizes[1] = sizeof(*ref);
3009
3010         path = btrfs_alloc_path();
3011         BUG_ON(!path);
3012
3013         path->leave_spinning = 1;
3014         ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
3015                                        sizes, 2);
3016         BUG_ON(ret);
3017
3018         extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3019                                      struct btrfs_extent_item);
3020         btrfs_set_extent_refs(path->nodes[0], extent_item, ref_mod);
3021         ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3022                              struct btrfs_extent_ref);
3023
3024         btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
3025         btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
3026         btrfs_set_ref_objectid(path->nodes[0], ref, owner);
3027         btrfs_set_ref_num_refs(path->nodes[0], ref, ref_mod);
3028
3029         btrfs_mark_buffer_dirty(path->nodes[0]);
3030
3031         trans->alloc_exclude_start = 0;
3032         trans->alloc_exclude_nr = 0;
3033         btrfs_free_path(path);
3034
3035         if (ret)
3036                 goto out;
3037
3038         ret = update_block_group(trans, root, ins->objectid,
3039                                  ins->offset, 1, 0);
3040         if (ret) {
3041                 printk(KERN_ERR "btrfs update block group failed for %llu "
3042                        "%llu\n", (unsigned long long)ins->objectid,
3043                        (unsigned long long)ins->offset);
3044                 BUG();
3045         }
3046 out:
3047         return ret;
3048 }
3049
3050 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3051                                 struct btrfs_root *root, u64 parent,
3052                                 u64 root_objectid, u64 ref_generation,
3053                                 u64 owner, struct btrfs_key *ins)
3054 {
3055         int ret;
3056
3057         if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
3058                 return 0;
3059
3060         ret = btrfs_add_delayed_ref(trans, ins->objectid,
3061                                     ins->offset, parent, root_objectid,
3062                                     ref_generation, owner,
3063                                     BTRFS_ADD_DELAYED_EXTENT, 0);
3064         BUG_ON(ret);
3065         return ret;
3066 }
3067
3068 /*
3069  * this is used by the tree logging recovery code.  It records that
3070  * an extent has been allocated and makes sure to clear the free
3071  * space cache bits as well
3072  */
3073 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3074                                 struct btrfs_root *root, u64 parent,
3075                                 u64 root_objectid, u64 ref_generation,
3076                                 u64 owner, struct btrfs_key *ins)
3077 {
3078         int ret;
3079         struct btrfs_block_group_cache *block_group;
3080
3081         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
3082         mutex_lock(&block_group->cache_mutex);
3083         cache_block_group(root, block_group);
3084         mutex_unlock(&block_group->cache_mutex);
3085
3086         ret = btrfs_remove_free_space(block_group, ins->objectid,
3087                                       ins->offset);
3088         BUG_ON(ret);
3089         btrfs_put_block_group(block_group);
3090         ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3091                                             ref_generation, owner, ins, 1);
3092         return ret;
3093 }
3094
3095 /*
3096  * finds a free extent and does all the dirty work required for allocation
3097  * returns the key for the extent through ins, and a tree buffer for
3098  * the first block of the extent through buf.
3099  *
3100  * returns 0 if everything worked, non-zero otherwise.
3101  */
3102 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
3103                        struct btrfs_root *root,
3104                        u64 num_bytes, u64 parent, u64 min_alloc_size,
3105                        u64 root_objectid, u64 ref_generation,
3106                        u64 owner_objectid, u64 empty_size, u64 hint_byte,
3107                        u64 search_end, struct btrfs_key *ins, u64 data)
3108 {
3109         int ret;
3110         ret = __btrfs_reserve_extent(trans, root, num_bytes,
3111                                      min_alloc_size, empty_size, hint_byte,
3112                                      search_end, ins, data);
3113         BUG_ON(ret);
3114         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
3115                 ret = btrfs_add_delayed_ref(trans, ins->objectid,
3116                                             ins->offset, parent, root_objectid,
3117                                             ref_generation, owner_objectid,
3118                                             BTRFS_ADD_DELAYED_EXTENT, 0);
3119                 BUG_ON(ret);
3120         }
3121         update_reserved_extents(root, ins->objectid, ins->offset, 1);
3122         return ret;
3123 }
3124
3125 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3126                                             struct btrfs_root *root,
3127                                             u64 bytenr, u32 blocksize,
3128                                             int level)
3129 {
3130         struct extent_buffer *buf;
3131
3132         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
3133         if (!buf)
3134                 return ERR_PTR(-ENOMEM);
3135         btrfs_set_header_generation(buf, trans->transid);
3136         btrfs_set_buffer_lockdep_class(buf, level);
3137         btrfs_tree_lock(buf);
3138         clean_tree_block(trans, root, buf);
3139
3140         btrfs_set_lock_blocking(buf);
3141         btrfs_set_buffer_uptodate(buf);
3142
3143         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3144                 set_extent_dirty(&root->dirty_log_pages, buf->start,
3145                          buf->start + buf->len - 1, GFP_NOFS);
3146         } else {
3147                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3148                          buf->start + buf->len - 1, GFP_NOFS);
3149         }
3150         trans->blocks_used++;
3151         /* this returns a buffer locked for blocking */
3152         return buf;
3153 }
3154
3155 /*
3156  * helper function to allocate a block for a given tree
3157  * returns the tree buffer or NULL.
3158  */
3159 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3160                                              struct btrfs_root *root,
3161                                              u32 blocksize, u64 parent,
3162                                              u64 root_objectid,
3163                                              u64 ref_generation,
3164                                              int level,
3165                                              u64 hint,
3166                                              u64 empty_size)
3167 {
3168         struct btrfs_key ins;
3169         int ret;
3170         struct extent_buffer *buf;
3171
3172         ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
3173                                  root_objectid, ref_generation, level,
3174                                  empty_size, hint, (u64)-1, &ins, 0);
3175         if (ret) {
3176                 BUG_ON(ret > 0);
3177                 return ERR_PTR(ret);
3178         }
3179
3180         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
3181                                     blocksize, level);
3182         return buf;
3183 }
3184
3185 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3186                         struct btrfs_root *root, struct extent_buffer *leaf)
3187 {
3188         u64 leaf_owner;
3189         u64 leaf_generation;
3190         struct refsort *sorted;
3191         struct btrfs_key key;
3192         struct btrfs_file_extent_item *fi;
3193         int i;
3194         int nritems;
3195         int ret;
3196         int refi = 0;
3197         int slot;
3198
3199         BUG_ON(!btrfs_is_leaf(leaf));
3200         nritems = btrfs_header_nritems(leaf);
3201         leaf_owner = btrfs_header_owner(leaf);
3202         leaf_generation = btrfs_header_generation(leaf);
3203
3204         sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3205         /* we do this loop twice.  The first time we build a list
3206          * of the extents we have a reference on, then we sort the list
3207          * by bytenr.  The second time around we actually do the
3208          * extent freeing.
3209          */
3210         for (i = 0; i < nritems; i++) {
3211                 u64 disk_bytenr;
3212                 cond_resched();
3213
3214                 btrfs_item_key_to_cpu(leaf, &key, i);
3215
3216                 /* only extents have references, skip everything else */
3217                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3218                         continue;
3219
3220                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3221
3222                 /* inline extents live in the btree, they don't have refs */
3223                 if (btrfs_file_extent_type(leaf, fi) ==
3224                     BTRFS_FILE_EXTENT_INLINE)
3225                         continue;
3226
3227                 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3228
3229                 /* holes don't have refs */
3230                 if (disk_bytenr == 0)
3231                         continue;
3232
3233                 sorted[refi].bytenr = disk_bytenr;
3234                 sorted[refi].slot = i;
3235                 refi++;
3236         }
3237
3238         if (refi == 0)
3239                 goto out;
3240
3241         sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3242
3243         for (i = 0; i < refi; i++) {
3244                 u64 disk_bytenr;
3245
3246                 disk_bytenr = sorted[i].bytenr;
3247                 slot = sorted[i].slot;
3248
3249                 cond_resched();
3250
3251                 btrfs_item_key_to_cpu(leaf, &key, slot);
3252                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3253                         continue;
3254
3255                 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
3256
3257                 ret = btrfs_free_extent(trans, root, disk_bytenr,
3258                                 btrfs_file_extent_disk_num_bytes(leaf, fi),
3259                                 leaf->start, leaf_owner, leaf_generation,
3260                                 key.objectid, 0);
3261                 BUG_ON(ret);
3262
3263                 atomic_inc(&root->fs_info->throttle_gen);
3264                 wake_up(&root->fs_info->transaction_throttle);
3265                 cond_resched();
3266         }
3267 out:
3268         kfree(sorted);
3269         return 0;
3270 }
3271
3272 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3273                                         struct btrfs_root *root,
3274                                         struct btrfs_leaf_ref *ref)
3275 {
3276         int i;
3277         int ret;
3278         struct btrfs_extent_info *info;
3279         struct refsort *sorted;
3280
3281         if (ref->nritems == 0)
3282                 return 0;
3283
3284         sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
3285         for (i = 0; i < ref->nritems; i++) {
3286                 sorted[i].bytenr = ref->extents[i].bytenr;
3287                 sorted[i].slot = i;
3288         }
3289         sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
3290
3291         /*
3292          * the items in the ref were sorted when the ref was inserted
3293          * into the ref cache, so this is already in order
3294          */
3295         for (i = 0; i < ref->nritems; i++) {
3296                 info = ref->extents + sorted[i].slot;
3297                 ret = btrfs_free_extent(trans, root, info->bytenr,
3298                                           info->num_bytes, ref->bytenr,
3299                                           ref->owner, ref->generation,
3300                                           info->objectid, 0);
3301
3302                 atomic_inc(&root->fs_info->throttle_gen);
3303                 wake_up(&root->fs_info->transaction_throttle);
3304                 cond_resched();
3305
3306                 BUG_ON(ret);
3307                 info++;
3308         }
3309
3310         kfree(sorted);
3311         return 0;
3312 }
3313
3314 static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans,
3315                                      struct btrfs_root *root, u64 start,
3316                                      u64 len, u32 *refs)
3317 {
3318         int ret;
3319
3320         ret = btrfs_lookup_extent_ref(trans, root, start, len, refs);
3321         BUG_ON(ret);
3322
3323 #if 0 /* some debugging code in case we see problems here */
3324         /* if the refs count is one, it won't get increased again.  But
3325          * if the ref count is > 1, someone may be decreasing it at
3326          * the same time we are.
3327          */
3328         if (*refs != 1) {
3329                 struct extent_buffer *eb = NULL;
3330                 eb = btrfs_find_create_tree_block(root, start, len);
3331                 if (eb)
3332                         btrfs_tree_lock(eb);
3333
3334                 mutex_lock(&root->fs_info->alloc_mutex);
3335                 ret = lookup_extent_ref(NULL, root, start, len, refs);
3336                 BUG_ON(ret);
3337                 mutex_unlock(&root->fs_info->alloc_mutex);
3338
3339                 if (eb) {
3340                         btrfs_tree_unlock(eb);
3341                         free_extent_buffer(eb);
3342                 }
3343                 if (*refs == 1) {
3344                         printk(KERN_ERR "btrfs block %llu went down to one "
3345                                "during drop_snap\n", (unsigned long long)start);
3346                 }
3347
3348         }
3349 #endif
3350
3351         cond_resched();
3352         return ret;
3353 }
3354
3355 /*
3356  * this is used while deleting old snapshots, and it drops the refs
3357  * on a whole subtree starting from a level 1 node.
3358  *
3359  * The idea is to sort all the leaf pointers, and then drop the
3360  * ref on all the leaves in order.  Most of the time the leaves
3361  * will have ref cache entries, so no leaf IOs will be required to
3362  * find the extents they have references on.
3363  *
3364  * For each leaf, any references it has are also dropped in order
3365  *
3366  * This ends up dropping the references in something close to optimal
3367  * order for reading and modifying the extent allocation tree.
3368  */
3369 static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
3370                                         struct btrfs_root *root,
3371                                         struct btrfs_path *path)
3372 {
3373         u64 bytenr;
3374         u64 root_owner;
3375         u64 root_gen;
3376         struct extent_buffer *eb = path->nodes[1];
3377         struct extent_buffer *leaf;
3378         struct btrfs_leaf_ref *ref;
3379         struct refsort *sorted = NULL;
3380         int nritems = btrfs_header_nritems(eb);
3381         int ret;
3382         int i;
3383         int refi = 0;
3384         int slot = path->slots[1];
3385         u32 blocksize = btrfs_level_size(root, 0);
3386         u32 refs;
3387
3388         if (nritems == 0)
3389                 goto out;
3390
3391         root_owner = btrfs_header_owner(eb);
3392         root_gen = btrfs_header_generation(eb);
3393         sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3394
3395         /*
3396          * step one, sort all the leaf pointers so we don't scribble
3397          * randomly into the extent allocation tree
3398          */
3399         for (i = slot; i < nritems; i++) {
3400                 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
3401                 sorted[refi].slot = i;
3402                 refi++;
3403         }
3404
3405         /*
3406          * nritems won't be zero, but if we're picking up drop_snapshot
3407          * after a crash, slot might be > 0, so double check things
3408          * just in case.
3409          */
3410         if (refi == 0)
3411                 goto out;
3412
3413         sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3414
3415         /*
3416          * the first loop frees everything the leaves point to
3417          */
3418         for (i = 0; i < refi; i++) {
3419                 u64 ptr_gen;
3420
3421                 bytenr = sorted[i].bytenr;
3422
3423                 /*
3424                  * check the reference count on this leaf.  If it is > 1
3425                  * we just decrement it below and don't update any
3426                  * of the refs the leaf points to.
3427                  */
3428                 ret = drop_snap_lookup_refcount(trans, root, bytenr,
3429                                                 blocksize, &refs);
3430                 BUG_ON(ret);
3431                 if (refs != 1)
3432                         continue;
3433
3434                 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
3435
3436                 /*
3437                  * the leaf only had one reference, which means the
3438                  * only thing pointing to this leaf is the snapshot
3439                  * we're deleting.  It isn't possible for the reference
3440                  * count to increase again later
3441                  *
3442                  * The reference cache is checked for the leaf,
3443                  * and if found we'll be able to drop any refs held by
3444                  * the leaf without needing to read it in.
3445                  */
3446                 ref = btrfs_lookup_leaf_ref(root, bytenr);
3447                 if (ref && ref->generation != ptr_gen) {
3448                         btrfs_free_leaf_ref(root, ref);
3449                         ref = NULL;
3450                 }
3451                 if (ref) {
3452                         ret = cache_drop_leaf_ref(trans, root, ref);
3453                         BUG_ON(ret);
3454                         btrfs_remove_leaf_ref(root, ref);
3455                         btrfs_free_leaf_ref(root, ref);
3456                 } else {
3457                         /*
3458                          * the leaf wasn't in the reference cache, so
3459                          * we have to read it.
3460                          */
3461                         leaf = read_tree_block(root, bytenr, blocksize,
3462                                                ptr_gen);
3463                         ret = btrfs_drop_leaf_ref(trans, root, leaf);
3464                         BUG_ON(ret);
3465                         free_extent_buffer(leaf);
3466                 }
3467                 atomic_inc(&root->fs_info->throttle_gen);
3468                 wake_up(&root->fs_info->transaction_throttle);
3469                 cond_resched();
3470         }
3471
3472         /*
3473          * run through the loop again to free the refs on the leaves.
3474          * This is faster than doing it in the loop above because
3475          * the leaves are likely to be clustered together.  We end up
3476          * working in nice chunks on the extent allocation tree.
3477          */
3478         for (i = 0; i < refi; i++) {
3479                 bytenr = sorted[i].bytenr;
3480                 ret = btrfs_free_extent(trans, root, bytenr,
3481                                         blocksize, eb->start,
3482                                         root_owner, root_gen, 0, 1);
3483                 BUG_ON(ret);
3484
3485                 atomic_inc(&root->fs_info->throttle_gen);
3486                 wake_up(&root->fs_info->transaction_throttle);
3487                 cond_resched();
3488         }
3489 out:
3490         kfree(sorted);
3491
3492         /*
3493          * update the path to show we've processed the entire level 1
3494          * node.  This will get saved into the root's drop_snapshot_progress
3495          * field so these drops are not repeated again if this transaction
3496          * commits.
3497          */
3498         path->slots[1] = nritems;
3499         return 0;
3500 }
3501
3502 /*
3503  * helper function for drop_snapshot, this walks down the tree dropping ref
3504  * counts as it goes.
3505  */
3506 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3507                                    struct btrfs_root *root,
3508                                    struct btrfs_path *path, int *level)
3509 {
3510         u64 root_owner;
3511         u64 root_gen;
3512         u64 bytenr;
3513         u64 ptr_gen;
3514         struct extent_buffer *next;
3515         struct extent_buffer *cur;
3516         struct extent_buffer *parent;
3517         u32 blocksize;
3518         int ret;
3519         u32 refs;
3520
3521         WARN_ON(*level < 0);
3522         WARN_ON(*level >= BTRFS_MAX_LEVEL);
3523         ret = drop_snap_lookup_refcount(trans, root, path->nodes[*level]->start,
3524                                 path->nodes[*level]->len, &refs);
3525         BUG_ON(ret);
3526         if (refs > 1)
3527                 goto out;
3528
3529         /*
3530          * walk down to the last node level and free all the leaves
3531          */
3532         while (*level >= 0) {
3533                 WARN_ON(*level < 0);
3534                 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3535                 cur = path->nodes[*level];
3536
3537                 if (btrfs_header_level(cur) != *level)
3538                         WARN_ON(1);
3539
3540                 if (path->slots[*level] >=
3541                     btrfs_header_nritems(cur))
3542                         break;
3543
3544                 /* the new code goes down to level 1 and does all the
3545                  * leaves pointed to that node in bulk.  So, this check
3546                  * for level 0 will always be false.
3547                  *
3548                  * But, the disk format allows the drop_snapshot_progress
3549                  * field in the root to leave things in a state where
3550                  * a leaf will need cleaning up here.  If someone crashes
3551                  * with the old code and then boots with the new code,
3552                  * we might find a leaf here.
3553                  */
3554                 if (*level == 0) {
3555                         ret = btrfs_drop_leaf_ref(trans, root, cur);
3556                         BUG_ON(ret);
3557                         break;
3558                 }
3559
3560                 /*
3561                  * once we get to level one, process the whole node
3562                  * at once, including everything below it.
3563                  */
3564                 if (*level == 1) {
3565                         ret = drop_level_one_refs(trans, root, path);
3566                         BUG_ON(ret);
3567                         break;
3568                 }
3569
3570                 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3571                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3572                 blocksize = btrfs_level_size(root, *level - 1);
3573
3574                 ret = drop_snap_lookup_refcount(trans, root, bytenr,
3575                                                 blocksize, &refs);
3576                 BUG_ON(ret);
3577
3578                 /*
3579                  * if there is more than one reference, we don't need
3580                  * to read that node to drop any references it has.  We
3581                  * just drop the ref we hold on that node and move on to the
3582                  * next slot in this level.
3583                  */
3584                 if (refs != 1) {
3585                         parent = path->nodes[*level];
3586                         root_owner = btrfs_header_owner(parent);
3587                         root_gen = btrfs_header_generation(parent);
3588                         path->slots[*level]++;
3589
3590                         ret = btrfs_free_extent(trans, root, bytenr,
3591                                                 blocksize, parent->start,
3592                                                 root_owner, root_gen,
3593                                                 *level - 1, 1);
3594                         BUG_ON(ret);
3595
3596                         atomic_inc(&root->fs_info->throttle_gen);
3597                         wake_up(&root->fs_info->transaction_throttle);
3598                         cond_resched();
3599
3600                         continue;
3601                 }
3602
3603                 /*
3604                  * we need to keep freeing things in the next level down.
3605                  * read the block and loop around to process it
3606                  */
3607                 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3608                 WARN_ON(*level <= 0);
3609                 if (path->nodes[*level-1])
3610                         free_extent_buffer(path->nodes[*level-1]);
3611                 path->nodes[*level-1] = next;
3612                 *level = btrfs_header_level(next);
3613                 path->slots[*level] = 0;
3614                 cond_resched();
3615         }
3616 out:
3617         WARN_ON(*level < 0);
3618         WARN_ON(*level >= BTRFS_MAX_LEVEL);
3619
3620         if (path->nodes[*level] == root->node) {
3621                 parent = path->nodes[*level];
3622                 bytenr = path->nodes[*level]->start;
3623         } else {
3624                 parent = path->nodes[*level + 1];
3625                 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
3626         }
3627
3628         blocksize = btrfs_level_size(root, *level);
3629         root_owner = btrfs_header_owner(parent);
3630         root_gen = btrfs_header_generation(parent);
3631
3632         /*
3633          * cleanup and free the reference on the last node
3634          * we processed
3635          */
3636         ret = btrfs_free_extent(trans, root, bytenr, blocksize,
3637                                   parent->start, root_owner, root_gen,
3638                                   *level, 1);
3639         free_extent_buffer(path->nodes[*level]);
3640         path->nodes[*level] = NULL;
3641
3642         *level += 1;
3643         BUG_ON(ret);
3644
3645         cond_resched();
3646         return 0;
3647 }
3648
3649 /*
3650  * helper function for drop_subtree, this function is similar to
3651  * walk_down_tree. The main difference is that it checks reference
3652  * counts while tree blocks are locked.
3653  */
3654 static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3655                                       struct btrfs_root *root,
3656                                       struct btrfs_path *path, int *level)
3657 {
3658         struct extent_buffer *next;
3659         struct extent_buffer *cur;
3660         struct extent_buffer *parent;
3661         u64 bytenr;
3662         u64 ptr_gen;
3663         u32 blocksize;
3664         u32 refs;
3665         int ret;
3666
3667         cur = path->nodes[*level];
3668         ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
3669                                       &refs);
3670         BUG_ON(ret);
3671         if (refs > 1)
3672                 goto out;
3673
3674         while (*level >= 0) {
3675                 cur = path->nodes[*level];
3676                 if (*level == 0) {
3677                         ret = btrfs_drop_leaf_ref(trans, root, cur);
3678                         BUG_ON(ret);
3679                         clean_tree_block(trans, root, cur);
3680                         break;
3681                 }
3682                 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
3683                         clean_tree_block(trans, root, cur);
3684                         break;
3685                 }
3686
3687                 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3688                 blocksize = btrfs_level_size(root, *level - 1);
3689                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3690
3691                 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3692                 btrfs_tree_lock(next);
3693                 btrfs_set_lock_blocking(next);
3694
3695                 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3696                                               &refs);
3697                 BUG_ON(ret);
3698                 if (refs > 1) {
3699                         parent = path->nodes[*level];
3700                         ret = btrfs_free_extent(trans, root, bytenr,
3701                                         blocksize, parent->start,
3702                                         btrfs_header_owner(parent),
3703                                         btrfs_header_generation(parent),
3704                                         *level - 1, 1);
3705                         BUG_ON(ret);
3706                         path->slots[*level]++;
3707                         btrfs_tree_unlock(next);
3708                         free_extent_buffer(next);
3709                         continue;
3710                 }
3711
3712                 *level = btrfs_header_level(next);
3713                 path->nodes[*level] = next;
3714                 path->slots[*level] = 0;
3715                 path->locks[*level] = 1;
3716                 cond_resched();
3717         }
3718 out:
3719         parent = path->nodes[*level + 1];
3720         bytenr = path->nodes[*level]->start;
3721         blocksize = path->nodes[*level]->len;
3722
3723         ret = btrfs_free_extent(trans, root, bytenr, blocksize,
3724                         parent->start, btrfs_header_owner(parent),
3725                         btrfs_header_generation(parent), *level, 1);
3726         BUG_ON(ret);
3727
3728         if (path->locks[*level]) {
3729                 btrfs_tree_unlock(path->nodes[*level]);
3730                 path->locks[*level] = 0;
3731         }
3732         free_extent_buffer(path->nodes[*level]);
3733         path->nodes[*level] = NULL;
3734         *level += 1;
3735         cond_resched();
3736         return 0;
3737 }
3738
3739 /*
3740  * helper for dropping snapshots.  This walks back up the tree in the path
3741  * to find the first node higher up where we haven't yet gone through
3742  * all the slots
3743  */
3744 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3745                                  struct btrfs_root *root,
3746                                  struct btrfs_path *path,
3747                                  int *level, int max_level)
3748 {
3749         u64 root_owner;
3750         u64 root_gen;
3751         struct btrfs_root_item *root_item = &root->root_item;
3752         int i;
3753         int slot;
3754         int ret;
3755
3756         for (i = *level; i < max_level && path->nodes[i]; i++) {
3757                 slot = path->slots[i];
3758                 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3759                         struct extent_buffer *node;
3760                         struct btrfs_disk_key disk_key;
3761
3762                         /*
3763                          * there is more work to do in this level.
3764                          * Update the drop_progress marker to reflect
3765                          * the work we've done so far, and then bump
3766                          * the slot number
3767                          */
3768                         node = path->nodes[i];
3769                         path->slots[i]++;
3770                         *level = i;
3771                         WARN_ON(*level == 0);
3772                         btrfs_node_key(node, &disk_key, path->slots[i]);
3773                         memcpy(&root_item->drop_progress,
3774                                &disk_key, sizeof(disk_key));
3775                         root_item->drop_level = i;
3776                         return 0;
3777                 } else {
3778                         struct extent_buffer *parent;
3779
3780                         /*
3781                          * this whole node is done, free our reference
3782                          * on it and go up one level
3783                          */
3784                         if (path->nodes[*level] == root->node)
3785                                 parent = path->nodes[*level];
3786                         else
3787                                 parent = path->nodes[*level + 1];
3788
3789                         root_owner = btrfs_header_owner(parent);
3790                         root_gen = btrfs_header_generation(parent);
3791
3792                         clean_tree_block(trans, root, path->nodes[*level]);
3793                         ret = btrfs_free_extent(trans, root,
3794                                                 path->nodes[*level]->start,
3795                                                 path->nodes[*level]->len,
3796                                                 parent->start, root_owner,
3797                                                 root_gen, *level, 1);
3798                         BUG_ON(ret);
3799                         if (path->locks[*level]) {
3800                                 btrfs_tree_unlock(path->nodes[*level]);
3801                                 path->locks[*level] = 0;
3802                         }
3803                         free_extent_buffer(path->nodes[*level]);
3804                         path->nodes[*level] = NULL;
3805                         *level = i + 1;
3806                 }
3807         }
3808         return 1;
3809 }
3810
3811 /*
3812  * drop the reference count on the tree rooted at 'snap'.  This traverses
3813  * the tree freeing any blocks that have a ref count of zero after being
3814  * decremented.
3815  */
3816 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3817                         *root)
3818 {
3819         int ret = 0;
3820         int wret;
3821         int level;
3822         struct btrfs_path *path;
3823         int i;
3824         int orig_level;
3825         int update_count;
3826         struct btrfs_root_item *root_item = &root->root_item;
3827
3828         WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
3829         path = btrfs_alloc_path();
3830         BUG_ON(!path);
3831
3832         level = btrfs_header_level(root->node);
3833         orig_level = level;
3834         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3835                 path->nodes[level] = root->node;
3836                 extent_buffer_get(root->node);
3837                 path->slots[level] = 0;
3838         } else {
3839                 struct btrfs_key key;
3840                 struct btrfs_disk_key found_key;
3841                 struct extent_buffer *node;
3842
3843                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3844                 level = root_item->drop_level;
3845                 path->lowest_level = level;
3846                 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3847                 if (wret < 0) {
3848                         ret = wret;
3849                         goto out;
3850                 }
3851                 node = path->nodes[level];
3852                 btrfs_node_key(node, &found_key, path->slots[level]);
3853                 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3854                                sizeof(found_key)));
3855                 /*
3856                  * unlock our path, this is safe because only this
3857                  * function is allowed to delete this snapshot
3858                  */
3859                 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3860                         if (path->nodes[i] && path->locks[i]) {
3861                                 path->locks[i] = 0;
3862                                 btrfs_tree_unlock(path->nodes[i]);
3863                         }
3864                 }
3865         }
3866         while (1) {
3867                 unsigned long update;
3868                 wret = walk_down_tree(trans, root, path, &level);
3869                 if (wret > 0)
3870                         break;
3871                 if (wret < 0)
3872                         ret = wret;
3873
3874                 wret = walk_up_tree(trans, root, path, &level,
3875                                     BTRFS_MAX_LEVEL);
3876                 if (wret > 0)
3877                         break;
3878                 if (wret < 0)
3879                         ret = wret;
3880                 if (trans->transaction->in_commit ||
3881                     trans->transaction->delayed_refs.flushing) {
3882                         ret = -EAGAIN;
3883                         break;
3884                 }
3885                 atomic_inc(&root->fs_info->throttle_gen);
3886                 wake_up(&root->fs_info->transaction_throttle);
3887                 for (update_count = 0; update_count < 16; update_count++) {
3888                         update = trans->delayed_ref_updates;
3889                         trans->delayed_ref_updates = 0;
3890                         if (update)
3891                                 btrfs_run_delayed_refs(trans, root, update);
3892                         else
3893                                 break;
3894                 }
3895         }
3896         for (i = 0; i <= orig_level; i++) {
3897                 if (path->nodes[i]) {
3898                         free_extent_buffer(path->nodes[i]);
3899                         path->nodes[i] = NULL;
3900                 }
3901         }
3902 out:
3903         btrfs_free_path(path);
3904         return ret;
3905 }
3906
3907 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3908                         struct btrfs_root *root,
3909                         struct extent_buffer *node,
3910                         struct extent_buffer *parent)
3911 {
3912         struct btrfs_path *path;
3913         int level;
3914         int parent_level;
3915         int ret = 0;
3916         int wret;
3917
3918         path = btrfs_alloc_path();
3919         BUG_ON(!path);
3920
3921         btrfs_assert_tree_locked(parent);
3922         parent_level = btrfs_header_level(parent);
3923         extent_buffer_get(parent);
3924         path->nodes[parent_level] = parent;
3925         path->slots[parent_level] = btrfs_header_nritems(parent);
3926
3927         btrfs_assert_tree_locked(node);
3928         level = btrfs_header_level(node);
3929         extent_buffer_get(node);
3930         path->nodes[level] = node;
3931         path->slots[level] = 0;
3932
3933         while (1) {
3934                 wret = walk_down_subtree(trans, root, path, &level);
3935                 if (wret < 0)
3936                         ret = wret;
3937                 if (wret != 0)
3938                         break;
3939
3940                 wret = walk_up_tree(trans, root, path, &level, parent_level);
3941                 if (wret < 0)
3942                         ret = wret;
3943                 if (wret != 0)
3944                         break;
3945         }
3946
3947         btrfs_free_path(path);
3948         return ret;
3949 }
3950
3951 static unsigned long calc_ra(unsigned long start, unsigned long last,
3952                              unsigned long nr)
3953 {
3954         return min(last, start + nr - 1);
3955 }
3956
3957 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
3958                                          u64 len)
3959 {
3960         u64 page_start;
3961         u64 page_end;
3962         unsigned long first_index;
3963         unsigned long last_index;
3964         unsigned long i;
3965         struct page *page;
3966         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3967         struct file_ra_state *ra;
3968         struct btrfs_ordered_extent *ordered;
3969         unsigned int total_read = 0;
3970         unsigned int total_dirty = 0;
3971         int ret = 0;
3972
3973         ra = kzalloc(sizeof(*ra), GFP_NOFS);
3974
3975         mutex_lock(&inode->i_mutex);
3976         first_index = start >> PAGE_CACHE_SHIFT;
3977         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
3978
3979         /* make sure the dirty trick played by the caller work */
3980         ret = invalidate_inode_pages2_range(inode->i_mapping,
3981                                             first_index, last_index);
3982         if (ret)
3983                 goto out_unlock;
3984
3985         file_ra_state_init(ra, inode->i_mapping);
3986
3987         for (i = first_index ; i <= last_index; i++) {
3988                 if (total_read % ra->ra_pages == 0) {
3989                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
3990                                        calc_ra(i, last_index, ra->ra_pages));
3991                 }
3992                 total_read++;
3993 again:
3994                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
3995                         BUG_ON(1);
3996                 page = grab_cache_page(inode->i_mapping, i);
3997                 if (!page) {
3998                         ret = -ENOMEM;
3999                         goto out_unlock;
4000                 }
4001                 if (!PageUptodate(page)) {
4002                         btrfs_readpage(NULL, page);
4003                         lock_page(page);
4004                         if (!PageUptodate(page)) {
4005                                 unlock_page(page);
4006                                 page_cache_release(page);
4007                                 ret = -EIO;
4008                                 goto out_unlock;
4009                         }
4010                 }
4011                 wait_on_page_writeback(page);
4012
4013                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
4014                 page_end = page_start + PAGE_CACHE_SIZE - 1;
4015                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4016
4017                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4018                 if (ordered) {
4019                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4020                         unlock_page(page);
4021                         page_cache_release(page);
4022                         btrfs_start_ordered_extent(inode, ordered, 1);
4023                         btrfs_put_ordered_extent(ordered);
4024                         goto again;
4025                 }
4026                 set_page_extent_mapped(page);
4027
4028                 if (i == first_index)
4029                         set_extent_bits(io_tree, page_start, page_end,
4030                                         EXTENT_BOUNDARY, GFP_NOFS);
4031                 btrfs_set_extent_delalloc(inode, page_start, page_end);
4032
4033                 set_page_dirty(page);
4034                 total_dirty++;
4035
4036                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4037                 unlock_page(page);
4038                 page_cache_release(page);
4039         }
4040
4041 out_unlock:
4042         kfree(ra);
4043         mutex_unlock(&inode->i_mutex);
4044         balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
4045         return ret;
4046 }
4047
4048 static noinline int relocate_data_extent(struct inode *reloc_inode,
4049                                          struct btrfs_key *extent_key,
4050                                          u64 offset)
4051 {
4052         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4053         struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
4054         struct extent_map *em;
4055         u64 start = extent_key->objectid - offset;
4056         u64 end = start + extent_key->offset - 1;
4057
4058         em = alloc_extent_map(GFP_NOFS);
4059         BUG_ON(!em || IS_ERR(em));
4060
4061         em->start = start;
4062         em->len = extent_key->offset;
4063         em->block_len = extent_key->offset;
4064         em->block_start = extent_key->objectid;
4065         em->bdev = root->fs_info->fs_devices->latest_bdev;
4066         set_bit(EXTENT_FLAG_PINNED, &em->flags);
4067
4068         /* setup extent map to cheat btrfs_readpage */
4069         lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4070         while (1) {
4071                 int ret;
4072                 spin_lock(&em_tree->lock);
4073                 ret = add_extent_mapping(em_tree, em);
4074                 spin_unlock(&em_tree->lock);
4075                 if (ret != -EEXIST) {
4076                         free_extent_map(em);
4077                         break;
4078                 }
4079                 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
4080         }
4081         unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4082
4083         return relocate_inode_pages(reloc_inode, start, extent_key->offset);
4084 }
4085
4086 struct btrfs_ref_path {
4087         u64 extent_start;
4088         u64 nodes[BTRFS_MAX_LEVEL];
4089         u64 root_objectid;
4090         u64 root_generation;
4091         u64 owner_objectid;
4092         u32 num_refs;
4093         int lowest_level;
4094         int current_level;
4095         int shared_level;
4096
4097         struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
4098         u64 new_nodes[BTRFS_MAX_LEVEL];
4099 };
4100
4101 struct disk_extent {
4102         u64 ram_bytes;
4103         u64 disk_bytenr;
4104         u64 disk_num_bytes;
4105         u64 offset;
4106         u64 num_bytes;
4107         u8 compression;
4108         u8 encryption;
4109         u16 other_encoding;
4110 };
4111
4112 static int is_cowonly_root(u64 root_objectid)
4113 {
4114         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
4115             root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
4116             root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
4117             root_objectid == BTRFS_DEV_TREE_OBJECTID ||
4118             root_objectid == BTRFS_TREE_LOG_OBJECTID ||
4119             root_objectid == BTRFS_CSUM_TREE_OBJECTID)
4120                 return 1;
4121         return 0;
4122 }
4123
4124 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
4125                                     struct btrfs_root *extent_root,
4126                                     struct btrfs_ref_path *ref_path,
4127                                     int first_time)
4128 {
4129         struct extent_buffer *leaf;
4130         struct btrfs_path *path;
4131         struct btrfs_extent_ref *ref;
4132         struct btrfs_key key;
4133         struct btrfs_key found_key;
4134         u64 bytenr;
4135         u32 nritems;
4136         int level;
4137         int ret = 1;
4138
4139         path = btrfs_alloc_path();
4140         if (!path)
4141                 return -ENOMEM;
4142
4143         if (first_time) {
4144                 ref_path->lowest_level = -1;
4145                 ref_path->current_level = -1;
4146                 ref_path->shared_level = -1;
4147                 goto walk_up;
4148         }
4149 walk_down:
4150         level = ref_path->current_level - 1;
4151         while (level >= -1) {
4152                 u64 parent;
4153                 if (level < ref_path->lowest_level)
4154                         break;
4155
4156                 if (level >= 0)
4157                         bytenr = ref_path->nodes[level];
4158                 else
4159                         bytenr = ref_path->extent_start;
4160                 BUG_ON(bytenr == 0);
4161
4162                 parent = ref_path->nodes[level + 1];
4163                 ref_path->nodes[level + 1] = 0;
4164                 ref_path->current_level = level;
4165                 BUG_ON(parent == 0);
4166
4167                 key.objectid = bytenr;
4168                 key.offset = parent + 1;
4169                 key.type = BTRFS_EXTENT_REF_KEY;
4170
4171                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4172                 if (ret < 0)
4173                         goto out;
4174                 BUG_ON(ret == 0);
4175
4176                 leaf = path->nodes[0];
4177                 nritems = btrfs_header_nritems(leaf);
4178                 if (path->slots[0] >= nritems) {
4179                         ret = btrfs_next_leaf(extent_root, path);
4180                         if (ret < 0)
4181                                 goto out;
4182                         if (ret > 0)
4183                                 goto next;
4184                         leaf = path->nodes[0];
4185                 }
4186
4187                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4188                 if (found_key.objectid == bytenr &&
4189                     found_key.type == BTRFS_EXTENT_REF_KEY) {
4190                         if (level < ref_path->shared_level)
4191                                 ref_path->shared_level = level;
4192                         goto found;
4193                 }
4194 next:
4195                 level--;
4196                 btrfs_release_path(extent_root, path);
4197                 cond_resched();
4198         }
4199         /* reached lowest level */
4200         ret = 1;
4201         goto out;
4202 walk_up:
4203         level = ref_path->current_level;
4204         while (level < BTRFS_MAX_LEVEL - 1) {
4205                 u64 ref_objectid;
4206
4207                 if (level >= 0)
4208                         bytenr = ref_path->nodes[level];
4209                 else
4210                         bytenr = ref_path->extent_start;
4211
4212                 BUG_ON(bytenr == 0);
4213
4214                 key.objectid = bytenr;
4215                 key.offset = 0;
4216                 key.type = BTRFS_EXTENT_REF_KEY;
4217
4218                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4219                 if (ret < 0)
4220                         goto out;
4221
4222                 leaf = path->nodes[0];
4223                 nritems = btrfs_header_nritems(leaf);
4224                 if (path->slots[0] >= nritems) {
4225                         ret = btrfs_next_leaf(extent_root, path);
4226                         if (ret < 0)
4227                                 goto out;
4228                         if (ret > 0) {
4229                                 /* the extent was freed by someone */
4230                                 if (ref_path->lowest_level == level)
4231                                         goto out;
4232                                 btrfs_release_path(extent_root, path);
4233                                 goto walk_down;
4234                         }
4235                         leaf = path->nodes[0];
4236                 }
4237
4238                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4239                 if (found_key.objectid != bytenr ||
4240                                 found_key.type != BTRFS_EXTENT_REF_KEY) {
4241                         /* the extent was freed by someone */
4242                         if (ref_path->lowest_level == level) {
4243                                 ret = 1;
4244                                 goto out;
4245                         }
4246                         btrfs_release_path(extent_root, path);
4247                         goto walk_down;
4248                 }
4249 found:
4250                 ref = btrfs_item_ptr(leaf, path->slots[0],
4251                                 struct btrfs_extent_ref);
4252                 ref_objectid = btrfs_ref_objectid(leaf, ref);
4253                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4254                         if (first_time) {
4255                                 level = (int)ref_objectid;
4256                                 BUG_ON(level >= BTRFS_MAX_LEVEL);
4257                                 ref_path->lowest_level = level;
4258                                 ref_path->current_level = level;
4259                                 ref_path->nodes[level] = bytenr;
4260                         } else {
4261                                 WARN_ON(ref_objectid != level);
4262                         }
4263                 } else {
4264                         WARN_ON(level != -1);
4265                 }
4266                 first_time = 0;
4267
4268                 if (ref_path->lowest_level == level) {
4269                         ref_path->owner_objectid = ref_objectid;
4270                         ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
4271                 }
4272
4273                 /*
4274                  * the block is tree root or the block isn't in reference
4275                  * counted tree.
4276                  */
4277                 if (found_key.objectid == found_key.offset ||
4278                     is_cowonly_root(btrfs_ref_root(leaf, ref))) {
4279                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4280                         ref_path->root_generation =
4281                                 btrfs_ref_generation(leaf, ref);
4282                         if (level < 0) {
4283                                 /* special reference from the tree log */
4284                                 ref_path->nodes[0] = found_key.offset;
4285                                 ref_path->current_level = 0;
4286                         }
4287                         ret = 0;
4288                         goto out;
4289                 }
4290
4291                 level++;
4292                 BUG_ON(ref_path->nodes[level] != 0);
4293                 ref_path->nodes[level] = found_key.offset;
4294                 ref_path->current_level = level;
4295
4296                 /*
4297                  * the reference was created in the running transaction,
4298                  * no need to continue walking up.
4299                  */
4300                 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
4301                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4302                         ref_path->root_generation =
4303                                 btrfs_ref_generation(leaf, ref);
4304                         ret = 0;
4305                         goto out;
4306                 }
4307
4308                 btrfs_release_path(extent_root, path);
4309                 cond_resched();
4310         }
4311         /* reached max tree level, but no tree root found. */
4312         BUG();
4313 out:
4314         btrfs_free_path(path);
4315         return ret;
4316 }
4317
4318 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
4319                                 struct btrfs_root *extent_root,
4320                                 struct btrfs_ref_path *ref_path,
4321                                 u64 extent_start)
4322 {
4323         memset(ref_path, 0, sizeof(*ref_path));
4324         ref_path->extent_start = extent_start;
4325
4326         return __next_ref_path(trans, extent_root, ref_path, 1);
4327 }
4328
4329 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
4330                                struct btrfs_root *extent_root,
4331                                struct btrfs_ref_path *ref_path)
4332 {
4333         return __next_ref_path(trans, extent_root, ref_path, 0);
4334 }
4335
4336 static noinline int get_new_locations(struct inode *reloc_inode,
4337                                       struct btrfs_key *extent_key,
4338                                       u64 offset, int no_fragment,
4339                                       struct disk_extent **extents,
4340                                       int *nr_extents)
4341 {
4342         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4343         struct btrfs_path *path;
4344         struct btrfs_file_extent_item *fi;
4345         struct extent_buffer *leaf;
4346         struct disk_extent *exts = *extents;
4347         struct btrfs_key found_key;
4348         u64 cur_pos;
4349         u64 last_byte;
4350         u32 nritems;
4351         int nr = 0;
4352         int max = *nr_extents;
4353         int ret;
4354
4355         WARN_ON(!no_fragment && *extents);
4356         if (!exts) {
4357                 max = 1;
4358                 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
4359                 if (!exts)
4360                         return -ENOMEM;
4361         }
4362
4363         path = btrfs_alloc_path();
4364         BUG_ON(!path);
4365
4366         cur_pos = extent_key->objectid - offset;
4367         last_byte = extent_key->objectid + extent_key->offset;
4368         ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
4369                                        cur_pos, 0);
4370         if (ret < 0)
4371                 goto out;
4372         if (ret > 0) {
4373                 ret = -ENOENT;
4374                 goto out;
4375         }
4376
4377         while (1) {
4378                 leaf = path->nodes[0];
4379                 nritems = btrfs_header_nritems(leaf);
4380                 if (path->slots[0] >= nritems) {
4381                         ret = btrfs_next_leaf(root, path);
4382                         if (ret < 0)
4383                                 goto out;
4384                         if (ret > 0)
4385                                 break;
4386                         leaf = path->nodes[0];
4387                 }
4388
4389                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4390                 if (found_key.offset != cur_pos ||
4391                     found_key.type != BTRFS_EXTENT_DATA_KEY ||
4392                     found_key.objectid != reloc_inode->i_ino)
4393                         break;
4394
4395                 fi = btrfs_item_ptr(leaf, path->slots[0],
4396                                     struct btrfs_file_extent_item);
4397                 if (btrfs_file_extent_type(leaf, fi) !=
4398                     BTRFS_FILE_EXTENT_REG ||
4399                     btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4400                         break;
4401
4402                 if (nr == max) {
4403                         struct disk_extent *old = exts;
4404                         max *= 2;
4405                         exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
4406                         memcpy(exts, old, sizeof(*exts) * nr);
4407                         if (old != *extents)
4408                                 kfree(old);
4409                 }
4410
4411                 exts[nr].disk_bytenr =
4412                         btrfs_file_extent_disk_bytenr(leaf, fi);
4413                 exts[nr].disk_num_bytes =
4414                         btrfs_file_extent_disk_num_bytes(leaf, fi);
4415                 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
4416                 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4417                 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
4418                 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
4419                 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
4420                 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
4421                                                                            fi);
4422                 BUG_ON(exts[nr].offset > 0);
4423                 BUG_ON(exts[nr].compression || exts[nr].encryption);
4424                 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
4425
4426                 cur_pos += exts[nr].num_bytes;
4427                 nr++;
4428
4429                 if (cur_pos + offset >= last_byte)
4430                         break;
4431
4432                 if (no_fragment) {
4433                         ret = 1;
4434                         goto out;
4435                 }
4436                 path->slots[0]++;
4437         }
4438
4439         BUG_ON(cur_pos + offset > last_byte);
4440         if (cur_pos + offset < last_byte) {
4441                 ret = -ENOENT;
4442                 goto out;
4443         }
4444         ret = 0;
4445 out:
4446         btrfs_free_path(path);
4447         if (ret) {
4448                 if (exts != *extents)
4449                         kfree(exts);
4450         } else {
4451                 *extents = exts;
4452                 *nr_extents = nr;
4453         }
4454         return ret;
4455 }
4456
4457 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4458                                         struct btrfs_root *root,
4459                                         struct btrfs_path *path,
4460                                         struct btrfs_key *extent_key,
4461                                         struct btrfs_key *leaf_key,
4462                                         struct btrfs_ref_path *ref_path,
4463                                         struct disk_extent *new_extents,
4464                                         int nr_extents)
4465 {
4466         struct extent_buffer *leaf;
4467         struct btrfs_file_extent_item *fi;
4468         struct inode *inode = NULL;
4469         struct btrfs_key key;
4470         u64 lock_start = 0;
4471         u64 lock_end = 0;
4472         u64 num_bytes;
4473         u64 ext_offset;
4474         u64 search_end = (u64)-1;
4475         u32 nritems;
4476         int nr_scaned = 0;
4477         int extent_locked = 0;
4478         int extent_type;
4479         int ret;
4480
4481         memcpy(&key, leaf_key, sizeof(key));
4482         if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4483                 if (key.objectid < ref_path->owner_objectid ||
4484                     (key.objectid == ref_path->owner_objectid &&
4485                      key.type < BTRFS_EXTENT_DATA_KEY)) {
4486                         key.objectid = ref_path->owner_objectid;
4487                         key.type = BTRFS_EXTENT_DATA_KEY;
4488                         key.offset = 0;
4489                 }
4490         }
4491
4492         while (1) {
4493                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4494                 if (ret < 0)
4495                         goto out;
4496
4497                 leaf = path->nodes[0];
4498                 nritems = btrfs_header_nritems(leaf);
4499 next:
4500                 if (extent_locked && ret > 0) {
4501                         /*
4502                          * the file extent item was modified by someone
4503                          * before the extent got locked.
4504                          */
4505                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4506                                       lock_end, GFP_NOFS);
4507                         extent_locked = 0;
4508                 }
4509
4510                 if (path->slots[0] >= nritems) {
4511                         if (++nr_scaned > 2)
4512                                 break;
4513
4514                         BUG_ON(extent_locked);
4515                         ret = btrfs_next_leaf(root, path);
4516                         if (ret < 0)
4517                                 goto out;
4518                         if (ret > 0)
4519                                 break;
4520                         leaf = path->nodes[0];
4521                         nritems = btrfs_header_nritems(leaf);
4522                 }
4523
4524                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4525
4526                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4527                         if ((key.objectid > ref_path->owner_objectid) ||
4528                             (key.objectid == ref_path->owner_objectid &&
4529                              key.type > BTRFS_EXTENT_DATA_KEY) ||
4530                             key.offset >= search_end)
4531                                 break;
4532                 }
4533
4534                 if (inode && key.objectid != inode->i_ino) {
4535                         BUG_ON(extent_locked);
4536                         btrfs_release_path(root, path);
4537                         mutex_unlock(&inode->i_mutex);
4538                         iput(inode);
4539                         inode = NULL;
4540                         continue;
4541                 }
4542
4543                 if (key.type != BTRFS_EXTENT_DATA_KEY) {
4544                         path->slots[0]++;
4545                         ret = 1;
4546                         goto next;
4547                 }
4548                 fi = btrfs_item_ptr(leaf, path->slots[0],
4549                                     struct btrfs_file_extent_item);
4550                 extent_type = btrfs_file_extent_type(leaf, fi);
4551                 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
4552                      extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
4553                     (btrfs_file_extent_disk_bytenr(leaf, fi) !=
4554                      extent_key->objectid)) {
4555                         path->slots[0]++;
4556                         ret = 1;
4557                         goto next;
4558                 }
4559
4560                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4561                 ext_offset = btrfs_file_extent_offset(leaf, fi);
4562
4563                 if (search_end == (u64)-1) {
4564                         search_end = key.offset - ext_offset +
4565                                 btrfs_file_extent_ram_bytes(leaf, fi);
4566                 }
4567
4568                 if (!extent_locked) {
4569                         lock_start = key.offset;
4570                         lock_end = lock_start + num_bytes - 1;
4571                 } else {
4572                         if (lock_start > key.offset ||
4573                             lock_end + 1 < key.offset + num_bytes) {
4574                                 unlock_extent(&BTRFS_I(inode)->io_tree,
4575                                               lock_start, lock_end, GFP_NOFS);
4576                                 extent_locked = 0;
4577                         }
4578                 }
4579
4580                 if (!inode) {
4581                         btrfs_release_path(root, path);
4582
4583                         inode = btrfs_iget_locked(root->fs_info->sb,
4584                                                   key.objectid, root);
4585                         if (inode->i_state & I_NEW) {
4586                                 BTRFS_I(inode)->root = root;
4587                                 BTRFS_I(inode)->location.objectid =
4588                                         key.objectid;
4589                                 BTRFS_I(inode)->location.type =
4590                                         BTRFS_INODE_ITEM_KEY;
4591                                 BTRFS_I(inode)->location.offset = 0;
4592                                 btrfs_read_locked_inode(inode);
4593                                 unlock_new_inode(inode);
4594                         }
4595                         /*
4596                          * some code call btrfs_commit_transaction while
4597                          * holding the i_mutex, so we can't use mutex_lock
4598                          * here.
4599                          */
4600                         if (is_bad_inode(inode) ||
4601                             !mutex_trylock(&inode->i_mutex)) {
4602                                 iput(inode);
4603                                 inode = NULL;
4604                                 key.offset = (u64)-1;
4605                                 goto skip;
4606                         }
4607                 }
4608
4609                 if (!extent_locked) {
4610                         struct btrfs_ordered_extent *ordered;
4611
4612                         btrfs_release_path(root, path);
4613
4614                         lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4615                                     lock_end, GFP_NOFS);
4616                         ordered = btrfs_lookup_first_ordered_extent(inode,
4617                                                                     lock_end);
4618                         if (ordered &&
4619                             ordered->file_offset <= lock_end &&
4620                             ordered->file_offset + ordered->len > lock_start) {
4621                                 unlock_extent(&BTRFS_I(inode)->io_tree,
4622                                               lock_start, lock_end, GFP_NOFS);
4623                                 btrfs_start_ordered_extent(inode, ordered, 1);
4624                                 btrfs_put_ordered_extent(ordered);
4625                                 key.offset += num_bytes;
4626                                 goto skip;
4627                         }
4628                         if (ordered)
4629                                 btrfs_put_ordered_extent(ordered);
4630
4631                         extent_locked = 1;
4632                         continue;
4633                 }
4634
4635                 if (nr_extents == 1) {
4636                         /* update extent pointer in place */
4637                         btrfs_set_file_extent_disk_bytenr(leaf, fi,
4638                                                 new_extents[0].disk_bytenr);
4639                         btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4640                                                 new_extents[0].disk_num_bytes);
4641                         btrfs_mark_buffer_dirty(leaf);
4642
4643                         btrfs_drop_extent_cache(inode, key.offset,
4644                                                 key.offset + num_bytes - 1, 0);
4645
4646                         ret = btrfs_inc_extent_ref(trans, root,
4647                                                 new_extents[0].disk_bytenr,
4648                                                 new_extents[0].disk_num_bytes,
4649                                                 leaf->start,
4650                                                 root->root_key.objectid,
4651                                                 trans->transid,
4652                                                 key.objectid);
4653                         BUG_ON(ret);
4654
4655                         ret = btrfs_free_extent(trans, root,
4656                                                 extent_key->objectid,
4657                                                 extent_key->offset,
4658                                                 leaf->start,
4659                                                 btrfs_header_owner(leaf),
4660                                                 btrfs_header_generation(leaf),
4661                                                 key.objectid, 0);
4662                         BUG_ON(ret);
4663
4664                         btrfs_release_path(root, path);
4665                         key.offset += num_bytes;
4666                 } else {
4667                         BUG_ON(1);
4668 #if 0
4669                         u64 alloc_hint;
4670                         u64 extent_len;
4671                         int i;
4672                         /*
4673                          * drop old extent pointer at first, then insert the
4674                          * new pointers one bye one
4675                          */
4676                         btrfs_release_path(root, path);
4677                         ret = btrfs_drop_extents(trans, root, inode, key.offset,
4678                                                  key.offset + num_bytes,
4679                                                  key.offset, &alloc_hint);
4680                         BUG_ON(ret);
4681
4682                         for (i = 0; i < nr_extents; i++) {
4683                                 if (ext_offset >= new_extents[i].num_bytes) {
4684                                         ext_offset -= new_extents[i].num_bytes;
4685                                         continue;
4686                                 }
4687                                 extent_len = min(new_extents[i].num_bytes -
4688                                                  ext_offset, num_bytes);
4689
4690                                 ret = btrfs_insert_empty_item(trans, root,
4691                                                               path, &key,
4692                                                               sizeof(*fi));
4693                                 BUG_ON(ret);
4694
4695                                 leaf = path->nodes[0];
4696                                 fi = btrfs_item_ptr(leaf, path->slots[0],
4697                                                 struct btrfs_file_extent_item);
4698                                 btrfs_set_file_extent_generation(leaf, fi,
4699                                                         trans->transid);
4700                                 btrfs_set_file_extent_type(leaf, fi,
4701                                                         BTRFS_FILE_EXTENT_REG);
4702                                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4703                                                 new_extents[i].disk_bytenr);
4704                                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4705                                                 new_extents[i].disk_num_bytes);
4706                                 btrfs_set_file_extent_ram_bytes(leaf, fi,
4707                                                 new_extents[i].ram_bytes);
4708
4709                                 btrfs_set_file_extent_compression(leaf, fi,
4710                                                 new_extents[i].compression);
4711                                 btrfs_set_file_extent_encryption(leaf, fi,
4712                                                 new_extents[i].encryption);
4713                                 btrfs_set_file_extent_other_encoding(leaf, fi,
4714                                                 new_extents[i].other_encoding);
4715
4716                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4717                                                         extent_len);
4718                                 ext_offset += new_extents[i].offset;
4719                                 btrfs_set_file_extent_offset(leaf, fi,
4720                                                         ext_offset);
4721                                 btrfs_mark_buffer_dirty(leaf);
4722
4723                                 btrfs_drop_extent_cache(inode, key.offset,
4724                                                 key.offset + extent_len - 1, 0);
4725
4726                                 ret = btrfs_inc_extent_ref(trans, root,
4727                                                 new_extents[i].disk_bytenr,
4728                                                 new_extents[i].disk_num_bytes,
4729                                                 leaf->start,
4730                                                 root->root_key.objectid,
4731                                                 trans->transid, key.objectid);
4732                                 BUG_ON(ret);
4733                                 btrfs_release_path(root, path);
4734
4735                                 inode_add_bytes(inode, extent_len);
4736
4737                                 ext_offset = 0;
4738                                 num_bytes -= extent_len;
4739                                 key.offset += extent_len;
4740
4741                                 if (num_bytes == 0)
4742                                         break;
4743                         }
4744                         BUG_ON(i >= nr_extents);
4745 #endif
4746                 }
4747
4748                 if (extent_locked) {
4749                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4750                                       lock_end, GFP_NOFS);
4751                         extent_locked = 0;
4752                 }
4753 skip:
4754                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
4755                     key.offset >= search_end)
4756                         break;
4757
4758                 cond_resched();
4759         }
4760         ret = 0;
4761 out:
4762         btrfs_release_path(root, path);
4763         if (inode) {
4764                 mutex_unlock(&inode->i_mutex);
4765                 if (extent_locked) {
4766                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4767                                       lock_end, GFP_NOFS);
4768                 }
4769                 iput(inode);
4770         }
4771         return ret;
4772 }
4773
4774 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4775                                struct btrfs_root *root,
4776                                struct extent_buffer *buf, u64 orig_start)
4777 {
4778         int level;
4779         int ret;
4780
4781         BUG_ON(btrfs_header_generation(buf) != trans->transid);
4782         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
4783
4784         level = btrfs_header_level(buf);
4785         if (level == 0) {
4786                 struct btrfs_leaf_ref *ref;
4787                 struct btrfs_leaf_ref *orig_ref;
4788
4789                 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
4790                 if (!orig_ref)
4791                         return -ENOENT;
4792
4793                 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
4794                 if (!ref) {
4795                         btrfs_free_leaf_ref(root, orig_ref);
4796                         return -ENOMEM;
4797                 }
4798
4799                 ref->nritems = orig_ref->nritems;
4800                 memcpy(ref->extents, orig_ref->extents,
4801                         sizeof(ref->extents[0]) * ref->nritems);
4802
4803                 btrfs_free_leaf_ref(root, orig_ref);
4804
4805                 ref->root_gen = trans->transid;
4806                 ref->bytenr = buf->start;
4807                 ref->owner = btrfs_header_owner(buf);
4808                 ref->generation = btrfs_header_generation(buf);
4809
4810                 ret = btrfs_add_leaf_ref(root, ref, 0);
4811                 WARN_ON(ret);
4812                 btrfs_free_leaf_ref(root, ref);
4813         }
4814         return 0;
4815 }
4816
4817 static noinline int invalidate_extent_cache(struct btrfs_root *root,
4818                                         struct extent_buffer *leaf,
4819                                         struct btrfs_block_group_cache *group,
4820                                         struct btrfs_root *target_root)
4821 {
4822         struct btrfs_key key;
4823         struct inode *inode = NULL;
4824         struct btrfs_file_extent_item *fi;
4825         u64 num_bytes;
4826         u64 skip_objectid = 0;
4827         u32 nritems;
4828         u32 i;
4829
4830         nritems = btrfs_header_nritems(leaf);
4831         for (i = 0; i < nritems; i++) {
4832                 btrfs_item_key_to_cpu(leaf, &key, i);
4833                 if (key.objectid == skip_objectid ||
4834                     key.type != BTRFS_EXTENT_DATA_KEY)
4835                         continue;
4836                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4837                 if (btrfs_file_extent_type(leaf, fi) ==
4838                     BTRFS_FILE_EXTENT_INLINE)
4839                         continue;
4840                 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4841                         continue;
4842                 if (!inode || inode->i_ino != key.objectid) {
4843                         iput(inode);
4844                         inode = btrfs_ilookup(target_root->fs_info->sb,
4845                                               key.objectid, target_root, 1);
4846                 }
4847                 if (!inode) {
4848                         skip_objectid = key.objectid;
4849                         continue;
4850                 }
4851                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4852
4853                 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4854                             key.offset + num_bytes - 1, GFP_NOFS);
4855                 btrfs_drop_extent_cache(inode, key.offset,
4856                                         key.offset + num_bytes - 1, 1);
4857                 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4858                               key.offset + num_bytes - 1, GFP_NOFS);
4859                 cond_resched();
4860         }
4861         iput(inode);
4862         return 0;
4863 }
4864
4865 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
4866                                         struct btrfs_root *root,
4867                                         struct extent_buffer *leaf,
4868                                         struct btrfs_block_group_cache *group,
4869                                         struct inode *reloc_inode)
4870 {
4871         struct btrfs_key key;
4872         struct btrfs_key extent_key;
4873         struct btrfs_file_extent_item *fi;
4874         struct btrfs_leaf_ref *ref;
4875         struct disk_extent *new_extent;
4876         u64 bytenr;
4877         u64 num_bytes;
4878         u32 nritems;
4879         u32 i;
4880         int ext_index;
4881         int nr_extent;
4882         int ret;
4883
4884         new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
4885         BUG_ON(!new_extent);
4886
4887         ref = btrfs_lookup_leaf_ref(root, leaf->start);
4888         BUG_ON(!ref);
4889
4890         ext_index = -1;
4891         nritems = btrfs_header_nritems(leaf);
4892         for (i = 0; i < nritems; i++) {
4893                 btrfs_item_key_to_cpu(leaf, &key, i);
4894                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4895                         continue;
4896                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4897                 if (btrfs_file_extent_type(leaf, fi) ==
4898                     BTRFS_FILE_EXTENT_INLINE)
4899                         continue;
4900                 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4901                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4902                 if (bytenr == 0)
4903                         continue;
4904
4905                 ext_index++;
4906                 if (bytenr >= group->key.objectid + group->key.offset ||
4907                     bytenr + num_bytes <= group->key.objectid)
4908                         continue;
4909
4910                 extent_key.objectid = bytenr;
4911                 extent_key.offset = num_bytes;
4912                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4913                 nr_extent = 1;
4914                 ret = get_new_locations(reloc_inode, &extent_key,
4915                                         group->key.objectid, 1,
4916                                         &new_extent, &nr_extent);
4917                 if (ret > 0)
4918                         continue;
4919                 BUG_ON(ret < 0);
4920
4921                 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
4922                 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
4923                 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
4924                 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
4925
4926                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4927                                                 new_extent->disk_bytenr);
4928                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4929                                                 new_extent->disk_num_bytes);
4930                 btrfs_mark_buffer_dirty(leaf);
4931
4932                 ret = btrfs_inc_extent_ref(trans, root,
4933                                         new_extent->disk_bytenr,
4934                                         new_extent->disk_num_bytes,
4935                                         leaf->start,
4936                                         root->root_key.objectid,
4937                                         trans->transid, key.objectid);
4938                 BUG_ON(ret);
4939
4940                 ret = btrfs_free_extent(trans, root,
4941                                         bytenr, num_bytes, leaf->start,
4942                                         btrfs_header_owner(leaf),
4943                                         btrfs_header_generation(leaf),
4944                                         key.objectid, 0);
4945                 BUG_ON(ret);
4946                 cond_resched();
4947         }
4948         kfree(new_extent);
4949         BUG_ON(ext_index + 1 != ref->nritems);
4950         btrfs_free_leaf_ref(root, ref);
4951         return 0;
4952 }
4953
4954 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
4955                           struct btrfs_root *root)
4956 {
4957         struct btrfs_root *reloc_root;
4958         int ret;
4959
4960         if (root->reloc_root) {
4961                 reloc_root = root->reloc_root;
4962                 root->reloc_root = NULL;
4963                 list_add(&reloc_root->dead_list,
4964                          &root->fs_info->dead_reloc_roots);
4965
4966                 btrfs_set_root_bytenr(&reloc_root->root_item,
4967                                       reloc_root->node->start);
4968                 btrfs_set_root_level(&root->root_item,
4969                                      btrfs_header_level(reloc_root->node));
4970                 memset(&reloc_root->root_item.drop_progress, 0,
4971                         sizeof(struct btrfs_disk_key));
4972                 reloc_root->root_item.drop_level = 0;
4973
4974                 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4975                                         &reloc_root->root_key,
4976                                         &reloc_root->root_item);
4977                 BUG_ON(ret);
4978         }
4979         return 0;
4980 }
4981
4982 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
4983 {
4984         struct btrfs_trans_handle *trans;
4985         struct btrfs_root *reloc_root;
4986         struct btrfs_root *prev_root = NULL;
4987         struct list_head dead_roots;
4988         int ret;
4989         unsigned long nr;
4990
4991         INIT_LIST_HEAD(&dead_roots);
4992         list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
4993
4994         while (!list_empty(&dead_roots)) {
4995                 reloc_root = list_entry(dead_roots.prev,
4996                                         struct btrfs_root, dead_list);
4997                 list_del_init(&reloc_root->dead_list);
4998
4999                 BUG_ON(reloc_root->commit_root != NULL);
5000                 while (1) {
5001                         trans = btrfs_join_transaction(root, 1);
5002                         BUG_ON(!trans);
5003
5004                         mutex_lock(&root->fs_info->drop_mutex);
5005                         ret = btrfs_drop_snapshot(trans, reloc_root);
5006                         if (ret != -EAGAIN)
5007                                 break;
5008                         mutex_unlock(&root->fs_info->drop_mutex);
5009
5010                         nr = trans->blocks_used;
5011                         ret = btrfs_end_transaction(trans, root);
5012                         BUG_ON(ret);
5013                         btrfs_btree_balance_dirty(root, nr);
5014                 }
5015
5016                 free_extent_buffer(reloc_root->node);
5017
5018                 ret = btrfs_del_root(trans, root->fs_info->tree_root,
5019                                      &reloc_root->root_key);
5020                 BUG_ON(ret);
5021                 mutex_unlock(&root->fs_info->drop_mutex);
5022
5023                 nr = trans->blocks_used;
5024                 ret = btrfs_end_transaction(trans, root);
5025                 BUG_ON(ret);
5026                 btrfs_btree_balance_dirty(root, nr);
5027
5028                 kfree(prev_root);
5029                 prev_root = reloc_root;
5030         }
5031         if (prev_root) {
5032                 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
5033                 kfree(prev_root);
5034         }
5035         return 0;
5036 }
5037
5038 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
5039 {
5040         list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
5041         return 0;
5042 }
5043
5044 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
5045 {
5046         struct btrfs_root *reloc_root;
5047         struct btrfs_trans_handle *trans;
5048         struct btrfs_key location;
5049         int found;
5050         int ret;
5051
5052         mutex_lock(&root->fs_info->tree_reloc_mutex);
5053         ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
5054         BUG_ON(ret);
5055         found = !list_empty(&root->fs_info->dead_reloc_roots);
5056         mutex_unlock(&root->fs_info->tree_reloc_mutex);
5057
5058         if (found) {
5059                 trans = btrfs_start_transaction(root, 1);
5060                 BUG_ON(!trans);
5061                 ret = btrfs_commit_transaction(trans, root);
5062                 BUG_ON(ret);
5063         }
5064
5065         location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5066         location.offset = (u64)-1;
5067         location.type = BTRFS_ROOT_ITEM_KEY;
5068
5069         reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
5070         BUG_ON(!reloc_root);
5071         btrfs_orphan_cleanup(reloc_root);
5072         return 0;
5073 }
5074
5075 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
5076                                     struct btrfs_root *root)
5077 {
5078         struct btrfs_root *reloc_root;
5079         struct extent_buffer *eb;
5080         struct btrfs_root_item *root_item;
5081         struct btrfs_key root_key;
5082         int ret;
5083
5084         BUG_ON(!root->ref_cows);
5085         if (root->reloc_root)
5086                 return 0;
5087
5088         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
5089         BUG_ON(!root_item);
5090
5091         ret = btrfs_copy_root(trans, root, root->commit_root,
5092                               &eb, BTRFS_TREE_RELOC_OBJECTID);
5093         BUG_ON(ret);
5094
5095         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
5096         root_key.offset = root->root_key.objectid;
5097         root_key.type = BTRFS_ROOT_ITEM_KEY;
5098
5099         memcpy(root_item, &root->root_item, sizeof(root_item));
5100         btrfs_set_root_refs(root_item, 0);
5101         btrfs_set_root_bytenr(root_item, eb->start);
5102         btrfs_set_root_level(root_item, btrfs_header_level(eb));
5103         btrfs_set_root_generation(root_item, trans->transid);
5104
5105         btrfs_tree_unlock(eb);
5106         free_extent_buffer(eb);
5107
5108         ret = btrfs_insert_root(trans, root->fs_info->tree_root,
5109                                 &root_key, root_item);
5110         BUG_ON(ret);
5111         kfree(root_item);
5112
5113         reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
5114                                                  &root_key);
5115         BUG_ON(!reloc_root);
5116         reloc_root->last_trans = trans->transid;
5117         reloc_root->commit_root = NULL;
5118         reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
5119
5120         root->reloc_root = reloc_root;
5121         return 0;
5122 }
5123
5124 /*
5125  * Core function of space balance.
5126  *
5127  * The idea is using reloc trees to relocate tree blocks in reference
5128  * counted roots. There is one reloc tree for each subvol, and all
5129  * reloc trees share same root key objectid. Reloc trees are snapshots
5130  * of the latest committed roots of subvols (root->commit_root).
5131  *
5132  * To relocate a tree block referenced by a subvol, there are two steps.
5133  * COW the block through subvol's reloc tree, then update block pointer
5134  * in the subvol to point to the new block. Since all reloc trees share
5135  * same root key objectid, doing special handing for tree blocks owned
5136  * by them is easy. Once a tree block has been COWed in one reloc tree,
5137  * we can use the resulting new block directly when the same block is
5138  * required to COW again through other reloc trees. By this way, relocated
5139  * tree blocks are shared between reloc trees, so they are also shared
5140  * between subvols.
5141  */
5142 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
5143                                       struct btrfs_root *root,
5144                                       struct btrfs_path *path,
5145                                       struct btrfs_key *first_key,
5146                                       struct btrfs_ref_path *ref_path,
5147                                       struct btrfs_block_group_cache *group,
5148                                       struct inode *reloc_inode)
5149 {
5150         struct btrfs_root *reloc_root;
5151         struct extent_buffer *eb = NULL;
5152         struct btrfs_key *keys;
5153         u64 *nodes;
5154         int level;
5155         int shared_level;
5156         int lowest_level = 0;
5157         int ret;
5158
5159         if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
5160                 lowest_level = ref_path->owner_objectid;
5161
5162         if (!root->ref_cows) {
5163                 path->lowest_level = lowest_level;
5164                 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
5165                 BUG_ON(ret < 0);
5166                 path->lowest_level = 0;
5167                 btrfs_release_path(root, path);
5168                 return 0;
5169         }
5170
5171         mutex_lock(&root->fs_info->tree_reloc_mutex);
5172         ret = init_reloc_tree(trans, root);
5173         BUG_ON(ret);
5174         reloc_root = root->reloc_root;
5175
5176         shared_level = ref_path->shared_level;
5177         ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
5178
5179         keys = ref_path->node_keys;
5180         nodes = ref_path->new_nodes;
5181         memset(&keys[shared_level + 1], 0,
5182                sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
5183         memset(&nodes[shared_level + 1], 0,
5184                sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
5185
5186         if (nodes[lowest_level] == 0) {
5187                 path->lowest_level = lowest_level;
5188                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5189                                         0, 1);
5190                 BUG_ON(ret);
5191                 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
5192                         eb = path->nodes[level];
5193                         if (!eb || eb == reloc_root->node)
5194                                 break;
5195                         nodes[level] = eb->start;
5196                         if (level == 0)
5197                                 btrfs_item_key_to_cpu(eb, &keys[level], 0);
5198                         else
5199                                 btrfs_node_key_to_cpu(eb, &keys[level], 0);
5200                 }
5201                 if (nodes[0] &&
5202                     ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5203                         eb = path->nodes[0];
5204                         ret = replace_extents_in_leaf(trans, reloc_root, eb,
5205                                                       group, reloc_inode);
5206                         BUG_ON(ret);
5207                 }
5208                 btrfs_release_path(reloc_root, path);
5209         } else {
5210                 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
5211                                        lowest_level);
5212                 BUG_ON(ret);
5213         }
5214
5215         /*
5216          * replace tree blocks in the fs tree with tree blocks in
5217          * the reloc tree.
5218          */
5219         ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
5220         BUG_ON(ret < 0);
5221
5222         if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5223                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5224                                         0, 0);
5225                 BUG_ON(ret);
5226                 extent_buffer_get(path->nodes[0]);
5227                 eb = path->nodes[0];
5228                 btrfs_release_path(reloc_root, path);
5229                 ret = invalidate_extent_cache(reloc_root, eb, group, root);
5230                 BUG_ON(ret);
5231                 free_extent_buffer(eb);
5232         }
5233
5234         mutex_unlock(&root->fs_info->tree_reloc_mutex);
5235         path->lowest_level = 0;
5236         return 0;
5237 }
5238
5239 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
5240                                         struct btrfs_root *root,
5241                                         struct btrfs_path *path,
5242                                         struct btrfs_key *first_key,
5243                                         struct btrfs_ref_path *ref_path)
5244 {
5245         int ret;
5246
5247         ret = relocate_one_path(trans, root, path, first_key,
5248                                 ref_path, NULL, NULL);
5249         BUG_ON(ret);
5250
5251         return 0;
5252 }
5253
5254 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
5255                                     struct btrfs_root *extent_root,
5256                                     struct btrfs_path *path,
5257                                     struct btrfs_key *extent_key)
5258 {
5259         int ret;
5260
5261         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
5262         if (ret)
5263                 goto out;
5264         ret = btrfs_del_item(trans, extent_root, path);
5265 out:
5266         btrfs_release_path(extent_root, path);
5267         return ret;
5268 }
5269
5270 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
5271                                                 struct btrfs_ref_path *ref_path)
5272 {
5273         struct btrfs_key root_key;
5274
5275         root_key.objectid = ref_path->root_objectid;
5276         root_key.type = BTRFS_ROOT_ITEM_KEY;
5277         if (is_cowonly_root(ref_path->root_objectid))
5278                 root_key.offset = 0;
5279         else
5280                 root_key.offset = (u64)-1;
5281
5282         return btrfs_read_fs_root_no_name(fs_info, &root_key);
5283 }
5284
5285 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
5286                                         struct btrfs_path *path,
5287                                         struct btrfs_key *extent_key,
5288                                         struct btrfs_block_group_cache *group,
5289                                         struct inode *reloc_inode, int pass)
5290 {
5291         struct btrfs_trans_handle *trans;
5292         struct btrfs_root *found_root;
5293         struct btrfs_ref_path *ref_path = NULL;
5294         struct disk_extent *new_extents = NULL;
5295         int nr_extents = 0;
5296         int loops;
5297         int ret;
5298         int level;
5299         struct btrfs_key first_key;
5300         u64 prev_block = 0;
5301
5302
5303         trans = btrfs_start_transaction(extent_root, 1);
5304         BUG_ON(!trans);
5305
5306         if (extent_key->objectid == 0) {
5307                 ret = del_extent_zero(trans, extent_root, path, extent_key);
5308                 goto out;
5309         }
5310
5311         ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
5312         if (!ref_path) {
5313                 ret = -ENOMEM;
5314                 goto out;
5315         }
5316
5317         for (loops = 0; ; loops++) {
5318                 if (loops == 0) {
5319                         ret = btrfs_first_ref_path(trans, extent_root, ref_path,
5320                                                    extent_key->objectid);
5321                 } else {
5322                         ret = btrfs_next_ref_path(trans, extent_root, ref_path);
5323                 }
5324                 if (ret < 0)
5325                         goto out;
5326                 if (ret > 0)
5327                         break;
5328
5329                 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5330                     ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
5331                         continue;
5332
5333                 found_root = read_ref_root(extent_root->fs_info, ref_path);
5334                 BUG_ON(!found_root);
5335                 /*
5336                  * for reference counted tree, only process reference paths
5337                  * rooted at the latest committed root.
5338                  */
5339                 if (found_root->ref_cows &&
5340                     ref_path->root_generation != found_root->root_key.offset)
5341                         continue;
5342
5343                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5344                         if (pass == 0) {
5345                                 /*
5346                                  * copy data extents to new locations
5347                                  */
5348                                 u64 group_start = group->key.objectid;
5349                                 ret = relocate_data_extent(reloc_inode,
5350                                                            extent_key,
5351                                                            group_start);
5352                                 if (ret < 0)
5353                                         goto out;
5354                                 break;
5355                         }
5356                         level = 0;
5357                 } else {
5358                         level = ref_path->owner_objectid;
5359                 }
5360
5361                 if (prev_block != ref_path->nodes[level]) {
5362                         struct extent_buffer *eb;
5363                         u64 block_start = ref_path->nodes[level];
5364                         u64 block_size = btrfs_level_size(found_root, level);
5365
5366                         eb = read_tree_block(found_root, block_start,
5367                                              block_size, 0);
5368                         btrfs_tree_lock(eb);
5369                         BUG_ON(level != btrfs_header_level(eb));
5370
5371                         if (level == 0)
5372                                 btrfs_item_key_to_cpu(eb, &first_key, 0);
5373                         else
5374                                 btrfs_node_key_to_cpu(eb, &first_key, 0);
5375
5376                         btrfs_tree_unlock(eb);
5377                         free_extent_buffer(eb);
5378                         prev_block = block_start;
5379                 }
5380
5381                 mutex_lock(&extent_root->fs_info->trans_mutex);
5382                 btrfs_record_root_in_trans(found_root);
5383                 mutex_unlock(&extent_root->fs_info->trans_mutex);
5384                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5385                         /*
5386                          * try to update data extent references while
5387                          * keeping metadata shared between snapshots.
5388                          */
5389                         if (pass == 1) {
5390                                 ret = relocate_one_path(trans, found_root,
5391                                                 path, &first_key, ref_path,
5392                                                 group, reloc_inode);
5393                                 if (ret < 0)
5394                                         goto out;
5395                                 continue;
5396                         }
5397                         /*
5398                          * use fallback method to process the remaining
5399                          * references.
5400                          */
5401                         if (!new_extents) {
5402                                 u64 group_start = group->key.objectid;
5403                                 new_extents = kmalloc(sizeof(*new_extents),
5404                                                       GFP_NOFS);
5405                                 nr_extents = 1;
5406                                 ret = get_new_locations(reloc_inode,
5407                                                         extent_key,
5408                                                         group_start, 1,
5409                                                         &new_extents,
5410                                                         &nr_extents);
5411                                 if (ret)
5412                                         goto out;
5413                         }
5414                         ret = replace_one_extent(trans, found_root,
5415                                                 path, extent_key,
5416                                                 &first_key, ref_path,
5417                                                 new_extents, nr_extents);
5418                 } else {
5419                         ret = relocate_tree_block(trans, found_root, path,
5420                                                   &first_key, ref_path);
5421                 }
5422                 if (ret < 0)
5423                         goto out;
5424         }
5425         ret = 0;
5426 out:
5427         btrfs_end_transaction(trans, extent_root);
5428         kfree(new_extents);
5429         kfree(ref_path);
5430         return ret;
5431 }
5432
5433 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
5434 {
5435         u64 num_devices;
5436         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
5437                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
5438
5439         num_devices = root->fs_info->fs_devices->rw_devices;
5440         if (num_devices == 1) {
5441                 stripped |= BTRFS_BLOCK_GROUP_DUP;
5442                 stripped = flags & ~stripped;
5443
5444                 /* turn raid0 into single device chunks */
5445                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
5446                         return stripped;
5447
5448                 /* turn mirroring into duplication */
5449                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
5450                              BTRFS_BLOCK_GROUP_RAID10))
5451                         return stripped | BTRFS_BLOCK_GROUP_DUP;
5452                 return flags;
5453         } else {
5454                 /* they already had raid on here, just return */
5455                 if (flags & stripped)
5456                         return flags;
5457
5458                 stripped |= BTRFS_BLOCK_GROUP_DUP;
5459                 stripped = flags & ~stripped;
5460
5461                 /* switch duplicated blocks with raid1 */
5462                 if (flags & BTRFS_BLOCK_GROUP_DUP)
5463                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
5464
5465                 /* turn single device chunks into raid0 */
5466                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
5467         }
5468         return flags;
5469 }
5470
5471 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
5472                      struct btrfs_block_group_cache *shrink_block_group,
5473                      int force)
5474 {
5475         struct btrfs_trans_handle *trans;
5476         u64 new_alloc_flags;
5477         u64 calc;
5478
5479         spin_lock(&shrink_block_group->lock);
5480         if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
5481                 spin_unlock(&shrink_block_group->lock);
5482
5483                 trans = btrfs_start_transaction(root, 1);
5484                 spin_lock(&shrink_block_group->lock);
5485
5486                 new_alloc_flags = update_block_group_flags(root,
5487                                                    shrink_block_group->flags);
5488                 if (new_alloc_flags != shrink_block_group->flags) {
5489                         calc =
5490                              btrfs_block_group_used(&shrink_block_group->item);
5491                 } else {
5492                         calc = shrink_block_group->key.offset;
5493                 }
5494                 spin_unlock(&shrink_block_group->lock);
5495
5496                 do_chunk_alloc(trans, root->fs_info->extent_root,
5497                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
5498
5499                 btrfs_end_transaction(trans, root);
5500         } else
5501                 spin_unlock(&shrink_block_group->lock);
5502         return 0;
5503 }
5504
5505 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5506                                  struct btrfs_root *root,
5507                                  u64 objectid, u64 size)
5508 {
5509         struct btrfs_path *path;
5510         struct btrfs_inode_item *item;
5511         struct extent_buffer *leaf;
5512         int ret;
5513
5514         path = btrfs_alloc_path();
5515         if (!path)
5516                 return -ENOMEM;
5517
5518         path->leave_spinning = 1;
5519         ret = btrfs_insert_empty_inode(trans, root, path, objectid);
5520         if (ret)
5521                 goto out;
5522
5523         leaf = path->nodes[0];
5524         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
5525         memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
5526         btrfs_set_inode_generation(leaf, item, 1);
5527         btrfs_set_inode_size(leaf, item, size);
5528         btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
5529         btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
5530         btrfs_mark_buffer_dirty(leaf);
5531         btrfs_release_path(root, path);
5532 out:
5533         btrfs_free_path(path);
5534         return ret;
5535 }
5536
5537 static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
5538                                         struct btrfs_block_group_cache *group)
5539 {
5540         struct inode *inode = NULL;
5541         struct btrfs_trans_handle *trans;
5542         struct btrfs_root *root;
5543         struct btrfs_key root_key;
5544         u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
5545         int err = 0;
5546
5547         root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5548         root_key.type = BTRFS_ROOT_ITEM_KEY;
5549         root_key.offset = (u64)-1;
5550         root = btrfs_read_fs_root_no_name(fs_info, &root_key);
5551         if (IS_ERR(root))
5552                 return ERR_CAST(root);
5553
5554         trans = btrfs_start_transaction(root, 1);
5555         BUG_ON(!trans);
5556
5557         err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
5558         if (err)
5559                 goto out;
5560
5561         err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
5562         BUG_ON(err);
5563
5564         err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
5565                                        group->key.offset, 0, group->key.offset,
5566                                        0, 0, 0);
5567         BUG_ON(err);
5568
5569         inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
5570         if (inode->i_state & I_NEW) {
5571                 BTRFS_I(inode)->root = root;
5572                 BTRFS_I(inode)->location.objectid = objectid;
5573                 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5574                 BTRFS_I(inode)->location.offset = 0;
5575                 btrfs_read_locked_inode(inode);
5576                 unlock_new_inode(inode);
5577                 BUG_ON(is_bad_inode(inode));
5578         } else {
5579                 BUG_ON(1);
5580         }
5581         BTRFS_I(inode)->index_cnt = group->key.objectid;
5582
5583         err = btrfs_orphan_add(trans, inode);
5584 out:
5585         btrfs_end_transaction(trans, root);
5586         if (err) {
5587                 if (inode)
5588                         iput(inode);
5589                 inode = ERR_PTR(err);
5590         }
5591         return inode;
5592 }
5593
5594 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
5595 {
5596
5597         struct btrfs_ordered_sum *sums;
5598         struct btrfs_sector_sum *sector_sum;
5599         struct btrfs_ordered_extent *ordered;
5600         struct btrfs_root *root = BTRFS_I(inode)->root;
5601         struct list_head list;
5602         size_t offset;
5603         int ret;
5604         u64 disk_bytenr;
5605
5606         INIT_LIST_HEAD(&list);
5607
5608         ordered = btrfs_lookup_ordered_extent(inode, file_pos);
5609         BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
5610
5611         disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
5612         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
5613                                        disk_bytenr + len - 1, &list);
5614
5615         while (!list_empty(&list)) {
5616                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
5617                 list_del_init(&sums->list);
5618
5619                 sector_sum = sums->sums;
5620                 sums->bytenr = ordered->start;
5621
5622                 offset = 0;
5623                 while (offset < sums->len) {
5624                         sector_sum->bytenr += ordered->start - disk_bytenr;
5625                         sector_sum++;
5626                         offset += root->sectorsize;
5627                 }
5628
5629                 btrfs_add_ordered_sum(inode, ordered, sums);
5630         }
5631         btrfs_put_ordered_extent(ordered);
5632         return 0;
5633 }
5634
5635 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
5636 {
5637         struct btrfs_trans_handle *trans;
5638         struct btrfs_path *path;
5639         struct btrfs_fs_info *info = root->fs_info;
5640         struct extent_buffer *leaf;
5641         struct inode *reloc_inode;
5642         struct btrfs_block_group_cache *block_group;
5643         struct btrfs_key key;
5644         u64 skipped;
5645         u64 cur_byte;
5646         u64 total_found;
5647         u32 nritems;
5648         int ret;
5649         int progress;
5650         int pass = 0;
5651
5652         root = root->fs_info->extent_root;
5653
5654         block_group = btrfs_lookup_block_group(info, group_start);
5655         BUG_ON(!block_group);
5656
5657         printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
5658                (unsigned long long)block_group->key.objectid,
5659                (unsigned long long)block_group->flags);
5660
5661         path = btrfs_alloc_path();
5662         BUG_ON(!path);
5663
5664         reloc_inode = create_reloc_inode(info, block_group);
5665         BUG_ON(IS_ERR(reloc_inode));
5666
5667         __alloc_chunk_for_shrink(root, block_group, 1);
5668         set_block_group_readonly(block_group);
5669
5670         btrfs_start_delalloc_inodes(info->tree_root);
5671         btrfs_wait_ordered_extents(info->tree_root, 0);
5672 again:
5673         skipped = 0;
5674         total_found = 0;
5675         progress = 0;
5676         key.objectid = block_group->key.objectid;
5677         key.offset = 0;
5678         key.type = 0;
5679         cur_byte = key.objectid;
5680
5681         trans = btrfs_start_transaction(info->tree_root, 1);
5682         btrfs_commit_transaction(trans, info->tree_root);
5683
5684         mutex_lock(&root->fs_info->cleaner_mutex);
5685         btrfs_clean_old_snapshots(info->tree_root);
5686         btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
5687         mutex_unlock(&root->fs_info->cleaner_mutex);
5688
5689         trans = btrfs_start_transaction(info->tree_root, 1);
5690         btrfs_commit_transaction(trans, info->tree_root);
5691
5692         while (1) {
5693                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5694                 if (ret < 0)
5695                         goto out;
5696 next:
5697                 leaf = path->nodes[0];
5698                 nritems = btrfs_header_nritems(leaf);
5699                 if (path->slots[0] >= nritems) {
5700                         ret = btrfs_next_leaf(root, path);
5701                         if (ret < 0)
5702                                 goto out;
5703                         if (ret == 1) {
5704                                 ret = 0;
5705                                 break;
5706                         }
5707                         leaf = path->nodes[0];
5708                         nritems = btrfs_header_nritems(leaf);
5709                 }
5710
5711                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5712
5713                 if (key.objectid >= block_group->key.objectid +
5714                     block_group->key.offset)
5715                         break;
5716
5717                 if (progress && need_resched()) {
5718                         btrfs_release_path(root, path);
5719                         cond_resched();
5720                         progress = 0;
5721                         continue;
5722                 }
5723                 progress = 1;
5724
5725                 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
5726                     key.objectid + key.offset <= cur_byte) {
5727                         path->slots[0]++;
5728                         goto next;
5729                 }
5730
5731                 total_found++;
5732                 cur_byte = key.objectid + key.offset;
5733                 btrfs_release_path(root, path);
5734
5735                 __alloc_chunk_for_shrink(root, block_group, 0);
5736                 ret = relocate_one_extent(root, path, &key, block_group,
5737                                           reloc_inode, pass);
5738                 BUG_ON(ret < 0);
5739                 if (ret > 0)
5740                         skipped++;
5741
5742                 key.objectid = cur_byte;
5743                 key.type = 0;
5744                 key.offset = 0;
5745         }
5746
5747         btrfs_release_path(root, path);
5748
5749         if (pass == 0) {
5750                 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
5751                 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
5752         }
5753
5754         if (total_found > 0) {
5755                 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
5756                        (unsigned long long)total_found, pass);
5757                 pass++;
5758                 if (total_found == skipped && pass > 2) {
5759                         iput(reloc_inode);
5760                         reloc_inode = create_reloc_inode(info, block_group);
5761                         pass = 0;
5762                 }
5763                 goto again;
5764         }
5765
5766         /* delete reloc_inode */
5767         iput(reloc_inode);
5768
5769         /* unpin extents in this range */
5770         trans = btrfs_start_transaction(info->tree_root, 1);
5771         btrfs_commit_transaction(trans, info->tree_root);
5772
5773         spin_lock(&block_group->lock);
5774         WARN_ON(block_group->pinned > 0);
5775         WARN_ON(block_group->reserved > 0);
5776         WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
5777         spin_unlock(&block_group->lock);
5778         btrfs_put_block_group(block_group);
5779         ret = 0;
5780 out:
5781         btrfs_free_path(path);
5782         return ret;
5783 }
5784
5785 static int find_first_block_group(struct btrfs_root *root,
5786                 struct btrfs_path *path, struct btrfs_key *key)
5787 {
5788         int ret = 0;
5789         struct btrfs_key found_key;
5790         struct extent_buffer *leaf;
5791         int slot;
5792
5793         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
5794         if (ret < 0)
5795                 goto out;
5796
5797         while (1) {
5798                 slot = path->slots[0];
5799                 leaf = path->nodes[0];
5800                 if (slot >= btrfs_header_nritems(leaf)) {
5801                         ret = btrfs_next_leaf(root, path);
5802                         if (ret == 0)
5803                                 continue;
5804                         if (ret < 0)
5805                                 goto out;
5806                         break;
5807                 }
5808                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5809
5810                 if (found_key.objectid >= key->objectid &&
5811                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5812                         ret = 0;
5813                         goto out;
5814                 }
5815                 path->slots[0]++;
5816         }
5817         ret = -ENOENT;
5818 out:
5819         return ret;
5820 }
5821
5822 int btrfs_free_block_groups(struct btrfs_fs_info *info)
5823 {
5824         struct btrfs_block_group_cache *block_group;
5825         struct btrfs_space_info *space_info;
5826         struct rb_node *n;
5827
5828         spin_lock(&info->block_group_cache_lock);
5829         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
5830                 block_group = rb_entry(n, struct btrfs_block_group_cache,
5831                                        cache_node);
5832                 rb_erase(&block_group->cache_node,
5833                          &info->block_group_cache_tree);
5834                 spin_unlock(&info->block_group_cache_lock);
5835
5836                 btrfs_remove_free_space_cache(block_group);
5837                 down_write(&block_group->space_info->groups_sem);
5838                 list_del(&block_group->list);
5839                 up_write(&block_group->space_info->groups_sem);
5840
5841                 WARN_ON(atomic_read(&block_group->count) != 1);
5842                 kfree(block_group);
5843
5844                 spin_lock(&info->block_group_cache_lock);
5845         }
5846         spin_unlock(&info->block_group_cache_lock);
5847
5848         /* now that all the block groups are freed, go through and
5849          * free all the space_info structs.  This is only called during
5850          * the final stages of unmount, and so we know nobody is
5851          * using them.  We call synchronize_rcu() once before we start,
5852          * just to be on the safe side.
5853          */
5854         synchronize_rcu();
5855
5856         while(!list_empty(&info->space_info)) {
5857                 space_info = list_entry(info->space_info.next,
5858                                         struct btrfs_space_info,
5859                                         list);
5860
5861                 list_del(&space_info->list);
5862                 kfree(space_info);
5863         }
5864         return 0;
5865 }
5866
5867 int btrfs_read_block_groups(struct btrfs_root *root)
5868 {
5869         struct btrfs_path *path;
5870         int ret;
5871         struct btrfs_block_group_cache *cache;
5872         struct btrfs_fs_info *info = root->fs_info;
5873         struct btrfs_space_info *space_info;
5874         struct btrfs_key key;
5875         struct btrfs_key found_key;
5876         struct extent_buffer *leaf;
5877
5878         root = info->extent_root;
5879         key.objectid = 0;
5880         key.offset = 0;
5881         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5882         path = btrfs_alloc_path();
5883         if (!path)
5884                 return -ENOMEM;
5885
5886         while (1) {
5887                 ret = find_first_block_group(root, path, &key);
5888                 if (ret > 0) {
5889                         ret = 0;
5890                         goto error;
5891                 }
5892                 if (ret != 0)
5893                         goto error;
5894
5895                 leaf = path->nodes[0];
5896                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5897                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5898                 if (!cache) {
5899                         ret = -ENOMEM;
5900                         break;
5901                 }
5902
5903                 atomic_set(&cache->count, 1);
5904                 spin_lock_init(&cache->lock);
5905                 spin_lock_init(&cache->tree_lock);
5906                 mutex_init(&cache->cache_mutex);
5907                 INIT_LIST_HEAD(&cache->list);
5908                 INIT_LIST_HEAD(&cache->cluster_list);
5909                 read_extent_buffer(leaf, &cache->item,
5910                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
5911                                    sizeof(cache->item));
5912                 memcpy(&cache->key, &found_key, sizeof(found_key));
5913
5914                 key.objectid = found_key.objectid + found_key.offset;
5915                 btrfs_release_path(root, path);
5916                 cache->flags = btrfs_block_group_flags(&cache->item);
5917
5918                 ret = update_space_info(info, cache->flags, found_key.offset,
5919                                         btrfs_block_group_used(&cache->item),
5920                                         &space_info);
5921                 BUG_ON(ret);
5922                 cache->space_info = space_info;
5923                 down_write(&space_info->groups_sem);
5924                 list_add_tail(&cache->list, &space_info->block_groups);
5925                 up_write(&space_info->groups_sem);
5926
5927                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5928                 BUG_ON(ret);
5929
5930                 set_avail_alloc_bits(root->fs_info, cache->flags);
5931                 if (btrfs_chunk_readonly(root, cache->key.objectid))
5932                         set_block_group_readonly(cache);
5933         }
5934         ret = 0;
5935 error:
5936         btrfs_free_path(path);
5937         return ret;
5938 }
5939
5940 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5941                            struct btrfs_root *root, u64 bytes_used,
5942                            u64 type, u64 chunk_objectid, u64 chunk_offset,
5943                            u64 size)
5944 {
5945         int ret;
5946         struct btrfs_root *extent_root;
5947         struct btrfs_block_group_cache *cache;
5948
5949         extent_root = root->fs_info->extent_root;
5950
5951         root->fs_info->last_trans_log_full_commit = trans->transid;
5952
5953         cache = kzalloc(sizeof(*cache), GFP_NOFS);
5954         if (!cache)
5955                 return -ENOMEM;
5956
5957         cache->key.objectid = chunk_offset;
5958         cache->key.offset = size;
5959         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
5960         atomic_set(&cache->count, 1);
5961         spin_lock_init(&cache->lock);
5962         spin_lock_init(&cache->tree_lock);
5963         mutex_init(&cache->cache_mutex);
5964         INIT_LIST_HEAD(&cache->list);
5965         INIT_LIST_HEAD(&cache->cluster_list);
5966
5967         btrfs_set_block_group_used(&cache->item, bytes_used);
5968         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
5969         cache->flags = type;
5970         btrfs_set_block_group_flags(&cache->item, type);
5971
5972         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
5973                                 &cache->space_info);
5974         BUG_ON(ret);
5975         down_write(&cache->space_info->groups_sem);
5976         list_add_tail(&cache->list, &cache->space_info->block_groups);
5977         up_write(&cache->space_info->groups_sem);
5978
5979         ret = btrfs_add_block_group_cache(root->fs_info, cache);
5980         BUG_ON(ret);
5981
5982         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
5983                                 sizeof(cache->item));
5984         BUG_ON(ret);
5985
5986         set_avail_alloc_bits(extent_root->fs_info, type);
5987
5988         return 0;
5989 }
5990
5991 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5992                              struct btrfs_root *root, u64 group_start)
5993 {
5994         struct btrfs_path *path;
5995         struct btrfs_block_group_cache *block_group;
5996         struct btrfs_free_cluster *cluster;
5997         struct btrfs_key key;
5998         int ret;
5999
6000         root = root->fs_info->extent_root;
6001
6002         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
6003         BUG_ON(!block_group);
6004         BUG_ON(!block_group->ro);
6005
6006         memcpy(&key, &block_group->key, sizeof(key));
6007
6008         /* make sure this block group isn't part of an allocation cluster */
6009         cluster = &root->fs_info->data_alloc_cluster;
6010         spin_lock(&cluster->refill_lock);
6011         btrfs_return_cluster_to_free_space(block_group, cluster);
6012         spin_unlock(&cluster->refill_lock);
6013
6014         /*
6015          * make sure this block group isn't part of a metadata
6016          * allocation cluster
6017          */
6018         cluster = &root->fs_info->meta_alloc_cluster;
6019         spin_lock(&cluster->refill_lock);
6020         btrfs_return_cluster_to_free_space(block_group, cluster);
6021         spin_unlock(&cluster->refill_lock);
6022
6023         path = btrfs_alloc_path();
6024         BUG_ON(!path);
6025
6026         spin_lock(&root->fs_info->block_group_cache_lock);
6027         rb_erase(&block_group->cache_node,
6028                  &root->fs_info->block_group_cache_tree);
6029         spin_unlock(&root->fs_info->block_group_cache_lock);
6030         btrfs_remove_free_space_cache(block_group);
6031         down_write(&block_group->space_info->groups_sem);
6032         /*
6033          * we must use list_del_init so people can check to see if they
6034          * are still on the list after taking the semaphore
6035          */
6036         list_del_init(&block_group->list);
6037         up_write(&block_group->space_info->groups_sem);
6038
6039         spin_lock(&block_group->space_info->lock);
6040         block_group->space_info->total_bytes -= block_group->key.offset;
6041         block_group->space_info->bytes_readonly -= block_group->key.offset;
6042         spin_unlock(&block_group->space_info->lock);
6043         block_group->space_info->full = 0;
6044
6045         btrfs_put_block_group(block_group);
6046         btrfs_put_block_group(block_group);
6047
6048         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
6049         if (ret > 0)
6050                 ret = -EIO;
6051         if (ret < 0)
6052                 goto out;
6053
6054         ret = btrfs_del_item(trans, root, path);
6055 out:
6056         btrfs_free_path(path);
6057         return ret;
6058 }