Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[sfrench/cifs-2.6.git] / fs / btrfs / extent_io.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/prefetch.h>
15 #include <linux/cleancache.h>
16 #include "extent_io.h"
17 #include "extent_map.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20 #include "volumes.h"
21 #include "check-integrity.h"
22 #include "locking.h"
23 #include "rcu-string.h"
24 #include "backref.h"
25 #include "disk-io.h"
26
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
29 static struct bio_set btrfs_bioset;
30
31 static inline bool extent_state_in_tree(const struct extent_state *state)
32 {
33         return !RB_EMPTY_NODE(&state->rb_node);
34 }
35
36 #ifdef CONFIG_BTRFS_DEBUG
37 static LIST_HEAD(buffers);
38 static LIST_HEAD(states);
39
40 static DEFINE_SPINLOCK(leak_lock);
41
42 static inline
43 void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
44 {
45         unsigned long flags;
46
47         spin_lock_irqsave(&leak_lock, flags);
48         list_add(new, head);
49         spin_unlock_irqrestore(&leak_lock, flags);
50 }
51
52 static inline
53 void btrfs_leak_debug_del(struct list_head *entry)
54 {
55         unsigned long flags;
56
57         spin_lock_irqsave(&leak_lock, flags);
58         list_del(entry);
59         spin_unlock_irqrestore(&leak_lock, flags);
60 }
61
62 static inline
63 void btrfs_leak_debug_check(void)
64 {
65         struct extent_state *state;
66         struct extent_buffer *eb;
67
68         while (!list_empty(&states)) {
69                 state = list_entry(states.next, struct extent_state, leak_list);
70                 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
71                        state->start, state->end, state->state,
72                        extent_state_in_tree(state),
73                        refcount_read(&state->refs));
74                 list_del(&state->leak_list);
75                 kmem_cache_free(extent_state_cache, state);
76         }
77
78         while (!list_empty(&buffers)) {
79                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
80                 pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
81                        eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
82                 list_del(&eb->leak_list);
83                 kmem_cache_free(extent_buffer_cache, eb);
84         }
85 }
86
87 #define btrfs_debug_check_extent_io_range(tree, start, end)             \
88         __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
89 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
90                 struct extent_io_tree *tree, u64 start, u64 end)
91 {
92         struct inode *inode = tree->private_data;
93         u64 isize;
94
95         if (!inode || !is_data_inode(inode))
96                 return;
97
98         isize = i_size_read(inode);
99         if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
100                 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
101                     "%s: ino %llu isize %llu odd range [%llu,%llu]",
102                         caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
103         }
104 }
105 #else
106 #define btrfs_leak_debug_add(new, head) do {} while (0)
107 #define btrfs_leak_debug_del(entry)     do {} while (0)
108 #define btrfs_leak_debug_check()        do {} while (0)
109 #define btrfs_debug_check_extent_io_range(c, s, e)      do {} while (0)
110 #endif
111
112 struct tree_entry {
113         u64 start;
114         u64 end;
115         struct rb_node rb_node;
116 };
117
118 struct extent_page_data {
119         struct bio *bio;
120         struct extent_io_tree *tree;
121         /* tells writepage not to lock the state bits for this range
122          * it still does the unlocking
123          */
124         unsigned int extent_locked:1;
125
126         /* tells the submit_bio code to use REQ_SYNC */
127         unsigned int sync_io:1;
128 };
129
130 static int add_extent_changeset(struct extent_state *state, unsigned bits,
131                                  struct extent_changeset *changeset,
132                                  int set)
133 {
134         int ret;
135
136         if (!changeset)
137                 return 0;
138         if (set && (state->state & bits) == bits)
139                 return 0;
140         if (!set && (state->state & bits) == 0)
141                 return 0;
142         changeset->bytes_changed += state->end - state->start + 1;
143         ret = ulist_add(&changeset->range_changed, state->start, state->end,
144                         GFP_ATOMIC);
145         return ret;
146 }
147
148 static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
149                                        unsigned long bio_flags)
150 {
151         blk_status_t ret = 0;
152         struct extent_io_tree *tree = bio->bi_private;
153
154         bio->bi_private = NULL;
155
156         if (tree->ops)
157                 ret = tree->ops->submit_bio_hook(tree->private_data, bio,
158                                                  mirror_num, bio_flags);
159         else
160                 btrfsic_submit_bio(bio);
161
162         return blk_status_to_errno(ret);
163 }
164
165 /* Cleanup unsubmitted bios */
166 static void end_write_bio(struct extent_page_data *epd, int ret)
167 {
168         if (epd->bio) {
169                 epd->bio->bi_status = errno_to_blk_status(ret);
170                 bio_endio(epd->bio);
171                 epd->bio = NULL;
172         }
173 }
174
175 /*
176  * Submit bio from extent page data via submit_one_bio
177  *
178  * Return 0 if everything is OK.
179  * Return <0 for error.
180  */
181 static int __must_check flush_write_bio(struct extent_page_data *epd)
182 {
183         int ret = 0;
184
185         if (epd->bio) {
186                 ret = submit_one_bio(epd->bio, 0, 0);
187                 /*
188                  * Clean up of epd->bio is handled by its endio function.
189                  * And endio is either triggered by successful bio execution
190                  * or the error handler of submit bio hook.
191                  * So at this point, no matter what happened, we don't need
192                  * to clean up epd->bio.
193                  */
194                 epd->bio = NULL;
195         }
196         return ret;
197 }
198
199 int __init extent_io_init(void)
200 {
201         extent_state_cache = kmem_cache_create("btrfs_extent_state",
202                         sizeof(struct extent_state), 0,
203                         SLAB_MEM_SPREAD, NULL);
204         if (!extent_state_cache)
205                 return -ENOMEM;
206
207         extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
208                         sizeof(struct extent_buffer), 0,
209                         SLAB_MEM_SPREAD, NULL);
210         if (!extent_buffer_cache)
211                 goto free_state_cache;
212
213         if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
214                         offsetof(struct btrfs_io_bio, bio),
215                         BIOSET_NEED_BVECS))
216                 goto free_buffer_cache;
217
218         if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
219                 goto free_bioset;
220
221         return 0;
222
223 free_bioset:
224         bioset_exit(&btrfs_bioset);
225
226 free_buffer_cache:
227         kmem_cache_destroy(extent_buffer_cache);
228         extent_buffer_cache = NULL;
229
230 free_state_cache:
231         kmem_cache_destroy(extent_state_cache);
232         extent_state_cache = NULL;
233         return -ENOMEM;
234 }
235
236 void __cold extent_io_exit(void)
237 {
238         btrfs_leak_debug_check();
239
240         /*
241          * Make sure all delayed rcu free are flushed before we
242          * destroy caches.
243          */
244         rcu_barrier();
245         kmem_cache_destroy(extent_state_cache);
246         kmem_cache_destroy(extent_buffer_cache);
247         bioset_exit(&btrfs_bioset);
248 }
249
250 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
251                          struct extent_io_tree *tree, unsigned int owner,
252                          void *private_data)
253 {
254         tree->fs_info = fs_info;
255         tree->state = RB_ROOT;
256         tree->ops = NULL;
257         tree->dirty_bytes = 0;
258         spin_lock_init(&tree->lock);
259         tree->private_data = private_data;
260         tree->owner = owner;
261 }
262
263 void extent_io_tree_release(struct extent_io_tree *tree)
264 {
265         spin_lock(&tree->lock);
266         /*
267          * Do a single barrier for the waitqueue_active check here, the state
268          * of the waitqueue should not change once extent_io_tree_release is
269          * called.
270          */
271         smp_mb();
272         while (!RB_EMPTY_ROOT(&tree->state)) {
273                 struct rb_node *node;
274                 struct extent_state *state;
275
276                 node = rb_first(&tree->state);
277                 state = rb_entry(node, struct extent_state, rb_node);
278                 rb_erase(&state->rb_node, &tree->state);
279                 RB_CLEAR_NODE(&state->rb_node);
280                 /*
281                  * btree io trees aren't supposed to have tasks waiting for
282                  * changes in the flags of extent states ever.
283                  */
284                 ASSERT(!waitqueue_active(&state->wq));
285                 free_extent_state(state);
286
287                 cond_resched_lock(&tree->lock);
288         }
289         spin_unlock(&tree->lock);
290 }
291
292 static struct extent_state *alloc_extent_state(gfp_t mask)
293 {
294         struct extent_state *state;
295
296         /*
297          * The given mask might be not appropriate for the slab allocator,
298          * drop the unsupported bits
299          */
300         mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
301         state = kmem_cache_alloc(extent_state_cache, mask);
302         if (!state)
303                 return state;
304         state->state = 0;
305         state->failrec = NULL;
306         RB_CLEAR_NODE(&state->rb_node);
307         btrfs_leak_debug_add(&state->leak_list, &states);
308         refcount_set(&state->refs, 1);
309         init_waitqueue_head(&state->wq);
310         trace_alloc_extent_state(state, mask, _RET_IP_);
311         return state;
312 }
313
314 void free_extent_state(struct extent_state *state)
315 {
316         if (!state)
317                 return;
318         if (refcount_dec_and_test(&state->refs)) {
319                 WARN_ON(extent_state_in_tree(state));
320                 btrfs_leak_debug_del(&state->leak_list);
321                 trace_free_extent_state(state, _RET_IP_);
322                 kmem_cache_free(extent_state_cache, state);
323         }
324 }
325
326 static struct rb_node *tree_insert(struct rb_root *root,
327                                    struct rb_node *search_start,
328                                    u64 offset,
329                                    struct rb_node *node,
330                                    struct rb_node ***p_in,
331                                    struct rb_node **parent_in)
332 {
333         struct rb_node **p;
334         struct rb_node *parent = NULL;
335         struct tree_entry *entry;
336
337         if (p_in && parent_in) {
338                 p = *p_in;
339                 parent = *parent_in;
340                 goto do_insert;
341         }
342
343         p = search_start ? &search_start : &root->rb_node;
344         while (*p) {
345                 parent = *p;
346                 entry = rb_entry(parent, struct tree_entry, rb_node);
347
348                 if (offset < entry->start)
349                         p = &(*p)->rb_left;
350                 else if (offset > entry->end)
351                         p = &(*p)->rb_right;
352                 else
353                         return parent;
354         }
355
356 do_insert:
357         rb_link_node(node, parent, p);
358         rb_insert_color(node, root);
359         return NULL;
360 }
361
362 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
363                                       struct rb_node **next_ret,
364                                       struct rb_node **prev_ret,
365                                       struct rb_node ***p_ret,
366                                       struct rb_node **parent_ret)
367 {
368         struct rb_root *root = &tree->state;
369         struct rb_node **n = &root->rb_node;
370         struct rb_node *prev = NULL;
371         struct rb_node *orig_prev = NULL;
372         struct tree_entry *entry;
373         struct tree_entry *prev_entry = NULL;
374
375         while (*n) {
376                 prev = *n;
377                 entry = rb_entry(prev, struct tree_entry, rb_node);
378                 prev_entry = entry;
379
380                 if (offset < entry->start)
381                         n = &(*n)->rb_left;
382                 else if (offset > entry->end)
383                         n = &(*n)->rb_right;
384                 else
385                         return *n;
386         }
387
388         if (p_ret)
389                 *p_ret = n;
390         if (parent_ret)
391                 *parent_ret = prev;
392
393         if (next_ret) {
394                 orig_prev = prev;
395                 while (prev && offset > prev_entry->end) {
396                         prev = rb_next(prev);
397                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
398                 }
399                 *next_ret = prev;
400                 prev = orig_prev;
401         }
402
403         if (prev_ret) {
404                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
405                 while (prev && offset < prev_entry->start) {
406                         prev = rb_prev(prev);
407                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
408                 }
409                 *prev_ret = prev;
410         }
411         return NULL;
412 }
413
414 static inline struct rb_node *
415 tree_search_for_insert(struct extent_io_tree *tree,
416                        u64 offset,
417                        struct rb_node ***p_ret,
418                        struct rb_node **parent_ret)
419 {
420         struct rb_node *next= NULL;
421         struct rb_node *ret;
422
423         ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
424         if (!ret)
425                 return next;
426         return ret;
427 }
428
429 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
430                                           u64 offset)
431 {
432         return tree_search_for_insert(tree, offset, NULL, NULL);
433 }
434
435 /*
436  * utility function to look for merge candidates inside a given range.
437  * Any extents with matching state are merged together into a single
438  * extent in the tree.  Extents with EXTENT_IO in their state field
439  * are not merged because the end_io handlers need to be able to do
440  * operations on them without sleeping (or doing allocations/splits).
441  *
442  * This should be called with the tree lock held.
443  */
444 static void merge_state(struct extent_io_tree *tree,
445                         struct extent_state *state)
446 {
447         struct extent_state *other;
448         struct rb_node *other_node;
449
450         if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
451                 return;
452
453         other_node = rb_prev(&state->rb_node);
454         if (other_node) {
455                 other = rb_entry(other_node, struct extent_state, rb_node);
456                 if (other->end == state->start - 1 &&
457                     other->state == state->state) {
458                         if (tree->private_data &&
459                             is_data_inode(tree->private_data))
460                                 btrfs_merge_delalloc_extent(tree->private_data,
461                                                             state, other);
462                         state->start = other->start;
463                         rb_erase(&other->rb_node, &tree->state);
464                         RB_CLEAR_NODE(&other->rb_node);
465                         free_extent_state(other);
466                 }
467         }
468         other_node = rb_next(&state->rb_node);
469         if (other_node) {
470                 other = rb_entry(other_node, struct extent_state, rb_node);
471                 if (other->start == state->end + 1 &&
472                     other->state == state->state) {
473                         if (tree->private_data &&
474                             is_data_inode(tree->private_data))
475                                 btrfs_merge_delalloc_extent(tree->private_data,
476                                                             state, other);
477                         state->end = other->end;
478                         rb_erase(&other->rb_node, &tree->state);
479                         RB_CLEAR_NODE(&other->rb_node);
480                         free_extent_state(other);
481                 }
482         }
483 }
484
485 static void set_state_bits(struct extent_io_tree *tree,
486                            struct extent_state *state, unsigned *bits,
487                            struct extent_changeset *changeset);
488
489 /*
490  * insert an extent_state struct into the tree.  'bits' are set on the
491  * struct before it is inserted.
492  *
493  * This may return -EEXIST if the extent is already there, in which case the
494  * state struct is freed.
495  *
496  * The tree lock is not taken internally.  This is a utility function and
497  * probably isn't what you want to call (see set/clear_extent_bit).
498  */
499 static int insert_state(struct extent_io_tree *tree,
500                         struct extent_state *state, u64 start, u64 end,
501                         struct rb_node ***p,
502                         struct rb_node **parent,
503                         unsigned *bits, struct extent_changeset *changeset)
504 {
505         struct rb_node *node;
506
507         if (end < start)
508                 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
509                        end, start);
510         state->start = start;
511         state->end = end;
512
513         set_state_bits(tree, state, bits, changeset);
514
515         node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
516         if (node) {
517                 struct extent_state *found;
518                 found = rb_entry(node, struct extent_state, rb_node);
519                 pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
520                        found->start, found->end, start, end);
521                 return -EEXIST;
522         }
523         merge_state(tree, state);
524         return 0;
525 }
526
527 /*
528  * split a given extent state struct in two, inserting the preallocated
529  * struct 'prealloc' as the newly created second half.  'split' indicates an
530  * offset inside 'orig' where it should be split.
531  *
532  * Before calling,
533  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
534  * are two extent state structs in the tree:
535  * prealloc: [orig->start, split - 1]
536  * orig: [ split, orig->end ]
537  *
538  * The tree locks are not taken by this function. They need to be held
539  * by the caller.
540  */
541 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
542                        struct extent_state *prealloc, u64 split)
543 {
544         struct rb_node *node;
545
546         if (tree->private_data && is_data_inode(tree->private_data))
547                 btrfs_split_delalloc_extent(tree->private_data, orig, split);
548
549         prealloc->start = orig->start;
550         prealloc->end = split - 1;
551         prealloc->state = orig->state;
552         orig->start = split;
553
554         node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
555                            &prealloc->rb_node, NULL, NULL);
556         if (node) {
557                 free_extent_state(prealloc);
558                 return -EEXIST;
559         }
560         return 0;
561 }
562
563 static struct extent_state *next_state(struct extent_state *state)
564 {
565         struct rb_node *next = rb_next(&state->rb_node);
566         if (next)
567                 return rb_entry(next, struct extent_state, rb_node);
568         else
569                 return NULL;
570 }
571
572 /*
573  * utility function to clear some bits in an extent state struct.
574  * it will optionally wake up anyone waiting on this state (wake == 1).
575  *
576  * If no bits are set on the state struct after clearing things, the
577  * struct is freed and removed from the tree
578  */
579 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
580                                             struct extent_state *state,
581                                             unsigned *bits, int wake,
582                                             struct extent_changeset *changeset)
583 {
584         struct extent_state *next;
585         unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
586         int ret;
587
588         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
589                 u64 range = state->end - state->start + 1;
590                 WARN_ON(range > tree->dirty_bytes);
591                 tree->dirty_bytes -= range;
592         }
593
594         if (tree->private_data && is_data_inode(tree->private_data))
595                 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
596
597         ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
598         BUG_ON(ret < 0);
599         state->state &= ~bits_to_clear;
600         if (wake)
601                 wake_up(&state->wq);
602         if (state->state == 0) {
603                 next = next_state(state);
604                 if (extent_state_in_tree(state)) {
605                         rb_erase(&state->rb_node, &tree->state);
606                         RB_CLEAR_NODE(&state->rb_node);
607                         free_extent_state(state);
608                 } else {
609                         WARN_ON(1);
610                 }
611         } else {
612                 merge_state(tree, state);
613                 next = next_state(state);
614         }
615         return next;
616 }
617
618 static struct extent_state *
619 alloc_extent_state_atomic(struct extent_state *prealloc)
620 {
621         if (!prealloc)
622                 prealloc = alloc_extent_state(GFP_ATOMIC);
623
624         return prealloc;
625 }
626
627 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
628 {
629         struct inode *inode = tree->private_data;
630
631         btrfs_panic(btrfs_sb(inode->i_sb), err,
632         "locking error: extent tree was modified by another thread while locked");
633 }
634
635 /*
636  * clear some bits on a range in the tree.  This may require splitting
637  * or inserting elements in the tree, so the gfp mask is used to
638  * indicate which allocations or sleeping are allowed.
639  *
640  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
641  * the given range from the tree regardless of state (ie for truncate).
642  *
643  * the range [start, end] is inclusive.
644  *
645  * This takes the tree lock, and returns 0 on success and < 0 on error.
646  */
647 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
648                               unsigned bits, int wake, int delete,
649                               struct extent_state **cached_state,
650                               gfp_t mask, struct extent_changeset *changeset)
651 {
652         struct extent_state *state;
653         struct extent_state *cached;
654         struct extent_state *prealloc = NULL;
655         struct rb_node *node;
656         u64 last_end;
657         int err;
658         int clear = 0;
659
660         btrfs_debug_check_extent_io_range(tree, start, end);
661         trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
662
663         if (bits & EXTENT_DELALLOC)
664                 bits |= EXTENT_NORESERVE;
665
666         if (delete)
667                 bits |= ~EXTENT_CTLBITS;
668
669         if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
670                 clear = 1;
671 again:
672         if (!prealloc && gfpflags_allow_blocking(mask)) {
673                 /*
674                  * Don't care for allocation failure here because we might end
675                  * up not needing the pre-allocated extent state at all, which
676                  * is the case if we only have in the tree extent states that
677                  * cover our input range and don't cover too any other range.
678                  * If we end up needing a new extent state we allocate it later.
679                  */
680                 prealloc = alloc_extent_state(mask);
681         }
682
683         spin_lock(&tree->lock);
684         if (cached_state) {
685                 cached = *cached_state;
686
687                 if (clear) {
688                         *cached_state = NULL;
689                         cached_state = NULL;
690                 }
691
692                 if (cached && extent_state_in_tree(cached) &&
693                     cached->start <= start && cached->end > start) {
694                         if (clear)
695                                 refcount_dec(&cached->refs);
696                         state = cached;
697                         goto hit_next;
698                 }
699                 if (clear)
700                         free_extent_state(cached);
701         }
702         /*
703          * this search will find the extents that end after
704          * our range starts
705          */
706         node = tree_search(tree, start);
707         if (!node)
708                 goto out;
709         state = rb_entry(node, struct extent_state, rb_node);
710 hit_next:
711         if (state->start > end)
712                 goto out;
713         WARN_ON(state->end < start);
714         last_end = state->end;
715
716         /* the state doesn't have the wanted bits, go ahead */
717         if (!(state->state & bits)) {
718                 state = next_state(state);
719                 goto next;
720         }
721
722         /*
723          *     | ---- desired range ---- |
724          *  | state | or
725          *  | ------------- state -------------- |
726          *
727          * We need to split the extent we found, and may flip
728          * bits on second half.
729          *
730          * If the extent we found extends past our range, we
731          * just split and search again.  It'll get split again
732          * the next time though.
733          *
734          * If the extent we found is inside our range, we clear
735          * the desired bit on it.
736          */
737
738         if (state->start < start) {
739                 prealloc = alloc_extent_state_atomic(prealloc);
740                 BUG_ON(!prealloc);
741                 err = split_state(tree, state, prealloc, start);
742                 if (err)
743                         extent_io_tree_panic(tree, err);
744
745                 prealloc = NULL;
746                 if (err)
747                         goto out;
748                 if (state->end <= end) {
749                         state = clear_state_bit(tree, state, &bits, wake,
750                                                 changeset);
751                         goto next;
752                 }
753                 goto search_again;
754         }
755         /*
756          * | ---- desired range ---- |
757          *                        | state |
758          * We need to split the extent, and clear the bit
759          * on the first half
760          */
761         if (state->start <= end && state->end > end) {
762                 prealloc = alloc_extent_state_atomic(prealloc);
763                 BUG_ON(!prealloc);
764                 err = split_state(tree, state, prealloc, end + 1);
765                 if (err)
766                         extent_io_tree_panic(tree, err);
767
768                 if (wake)
769                         wake_up(&state->wq);
770
771                 clear_state_bit(tree, prealloc, &bits, wake, changeset);
772
773                 prealloc = NULL;
774                 goto out;
775         }
776
777         state = clear_state_bit(tree, state, &bits, wake, changeset);
778 next:
779         if (last_end == (u64)-1)
780                 goto out;
781         start = last_end + 1;
782         if (start <= end && state && !need_resched())
783                 goto hit_next;
784
785 search_again:
786         if (start > end)
787                 goto out;
788         spin_unlock(&tree->lock);
789         if (gfpflags_allow_blocking(mask))
790                 cond_resched();
791         goto again;
792
793 out:
794         spin_unlock(&tree->lock);
795         if (prealloc)
796                 free_extent_state(prealloc);
797
798         return 0;
799
800 }
801
802 static void wait_on_state(struct extent_io_tree *tree,
803                           struct extent_state *state)
804                 __releases(tree->lock)
805                 __acquires(tree->lock)
806 {
807         DEFINE_WAIT(wait);
808         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
809         spin_unlock(&tree->lock);
810         schedule();
811         spin_lock(&tree->lock);
812         finish_wait(&state->wq, &wait);
813 }
814
815 /*
816  * waits for one or more bits to clear on a range in the state tree.
817  * The range [start, end] is inclusive.
818  * The tree lock is taken by this function
819  */
820 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
821                             unsigned long bits)
822 {
823         struct extent_state *state;
824         struct rb_node *node;
825
826         btrfs_debug_check_extent_io_range(tree, start, end);
827
828         spin_lock(&tree->lock);
829 again:
830         while (1) {
831                 /*
832                  * this search will find all the extents that end after
833                  * our range starts
834                  */
835                 node = tree_search(tree, start);
836 process_node:
837                 if (!node)
838                         break;
839
840                 state = rb_entry(node, struct extent_state, rb_node);
841
842                 if (state->start > end)
843                         goto out;
844
845                 if (state->state & bits) {
846                         start = state->start;
847                         refcount_inc(&state->refs);
848                         wait_on_state(tree, state);
849                         free_extent_state(state);
850                         goto again;
851                 }
852                 start = state->end + 1;
853
854                 if (start > end)
855                         break;
856
857                 if (!cond_resched_lock(&tree->lock)) {
858                         node = rb_next(node);
859                         goto process_node;
860                 }
861         }
862 out:
863         spin_unlock(&tree->lock);
864 }
865
866 static void set_state_bits(struct extent_io_tree *tree,
867                            struct extent_state *state,
868                            unsigned *bits, struct extent_changeset *changeset)
869 {
870         unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
871         int ret;
872
873         if (tree->private_data && is_data_inode(tree->private_data))
874                 btrfs_set_delalloc_extent(tree->private_data, state, bits);
875
876         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
877                 u64 range = state->end - state->start + 1;
878                 tree->dirty_bytes += range;
879         }
880         ret = add_extent_changeset(state, bits_to_set, changeset, 1);
881         BUG_ON(ret < 0);
882         state->state |= bits_to_set;
883 }
884
885 static void cache_state_if_flags(struct extent_state *state,
886                                  struct extent_state **cached_ptr,
887                                  unsigned flags)
888 {
889         if (cached_ptr && !(*cached_ptr)) {
890                 if (!flags || (state->state & flags)) {
891                         *cached_ptr = state;
892                         refcount_inc(&state->refs);
893                 }
894         }
895 }
896
897 static void cache_state(struct extent_state *state,
898                         struct extent_state **cached_ptr)
899 {
900         return cache_state_if_flags(state, cached_ptr,
901                                     EXTENT_LOCKED | EXTENT_BOUNDARY);
902 }
903
904 /*
905  * set some bits on a range in the tree.  This may require allocations or
906  * sleeping, so the gfp mask is used to indicate what is allowed.
907  *
908  * If any of the exclusive bits are set, this will fail with -EEXIST if some
909  * part of the range already has the desired bits set.  The start of the
910  * existing range is returned in failed_start in this case.
911  *
912  * [start, end] is inclusive This takes the tree lock.
913  */
914
915 static int __must_check
916 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
917                  unsigned bits, unsigned exclusive_bits,
918                  u64 *failed_start, struct extent_state **cached_state,
919                  gfp_t mask, struct extent_changeset *changeset)
920 {
921         struct extent_state *state;
922         struct extent_state *prealloc = NULL;
923         struct rb_node *node;
924         struct rb_node **p;
925         struct rb_node *parent;
926         int err = 0;
927         u64 last_start;
928         u64 last_end;
929
930         btrfs_debug_check_extent_io_range(tree, start, end);
931         trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
932
933 again:
934         if (!prealloc && gfpflags_allow_blocking(mask)) {
935                 /*
936                  * Don't care for allocation failure here because we might end
937                  * up not needing the pre-allocated extent state at all, which
938                  * is the case if we only have in the tree extent states that
939                  * cover our input range and don't cover too any other range.
940                  * If we end up needing a new extent state we allocate it later.
941                  */
942                 prealloc = alloc_extent_state(mask);
943         }
944
945         spin_lock(&tree->lock);
946         if (cached_state && *cached_state) {
947                 state = *cached_state;
948                 if (state->start <= start && state->end > start &&
949                     extent_state_in_tree(state)) {
950                         node = &state->rb_node;
951                         goto hit_next;
952                 }
953         }
954         /*
955          * this search will find all the extents that end after
956          * our range starts.
957          */
958         node = tree_search_for_insert(tree, start, &p, &parent);
959         if (!node) {
960                 prealloc = alloc_extent_state_atomic(prealloc);
961                 BUG_ON(!prealloc);
962                 err = insert_state(tree, prealloc, start, end,
963                                    &p, &parent, &bits, changeset);
964                 if (err)
965                         extent_io_tree_panic(tree, err);
966
967                 cache_state(prealloc, cached_state);
968                 prealloc = NULL;
969                 goto out;
970         }
971         state = rb_entry(node, struct extent_state, rb_node);
972 hit_next:
973         last_start = state->start;
974         last_end = state->end;
975
976         /*
977          * | ---- desired range ---- |
978          * | state |
979          *
980          * Just lock what we found and keep going
981          */
982         if (state->start == start && state->end <= end) {
983                 if (state->state & exclusive_bits) {
984                         *failed_start = state->start;
985                         err = -EEXIST;
986                         goto out;
987                 }
988
989                 set_state_bits(tree, state, &bits, changeset);
990                 cache_state(state, cached_state);
991                 merge_state(tree, state);
992                 if (last_end == (u64)-1)
993                         goto out;
994                 start = last_end + 1;
995                 state = next_state(state);
996                 if (start < end && state && state->start == start &&
997                     !need_resched())
998                         goto hit_next;
999                 goto search_again;
1000         }
1001
1002         /*
1003          *     | ---- desired range ---- |
1004          * | state |
1005          *   or
1006          * | ------------- state -------------- |
1007          *
1008          * We need to split the extent we found, and may flip bits on
1009          * second half.
1010          *
1011          * If the extent we found extends past our
1012          * range, we just split and search again.  It'll get split
1013          * again the next time though.
1014          *
1015          * If the extent we found is inside our range, we set the
1016          * desired bit on it.
1017          */
1018         if (state->start < start) {
1019                 if (state->state & exclusive_bits) {
1020                         *failed_start = start;
1021                         err = -EEXIST;
1022                         goto out;
1023                 }
1024
1025                 prealloc = alloc_extent_state_atomic(prealloc);
1026                 BUG_ON(!prealloc);
1027                 err = split_state(tree, state, prealloc, start);
1028                 if (err)
1029                         extent_io_tree_panic(tree, err);
1030
1031                 prealloc = NULL;
1032                 if (err)
1033                         goto out;
1034                 if (state->end <= end) {
1035                         set_state_bits(tree, state, &bits, changeset);
1036                         cache_state(state, cached_state);
1037                         merge_state(tree, state);
1038                         if (last_end == (u64)-1)
1039                                 goto out;
1040                         start = last_end + 1;
1041                         state = next_state(state);
1042                         if (start < end && state && state->start == start &&
1043                             !need_resched())
1044                                 goto hit_next;
1045                 }
1046                 goto search_again;
1047         }
1048         /*
1049          * | ---- desired range ---- |
1050          *     | state | or               | state |
1051          *
1052          * There's a hole, we need to insert something in it and
1053          * ignore the extent we found.
1054          */
1055         if (state->start > start) {
1056                 u64 this_end;
1057                 if (end < last_start)
1058                         this_end = end;
1059                 else
1060                         this_end = last_start - 1;
1061
1062                 prealloc = alloc_extent_state_atomic(prealloc);
1063                 BUG_ON(!prealloc);
1064
1065                 /*
1066                  * Avoid to free 'prealloc' if it can be merged with
1067                  * the later extent.
1068                  */
1069                 err = insert_state(tree, prealloc, start, this_end,
1070                                    NULL, NULL, &bits, changeset);
1071                 if (err)
1072                         extent_io_tree_panic(tree, err);
1073
1074                 cache_state(prealloc, cached_state);
1075                 prealloc = NULL;
1076                 start = this_end + 1;
1077                 goto search_again;
1078         }
1079         /*
1080          * | ---- desired range ---- |
1081          *                        | state |
1082          * We need to split the extent, and set the bit
1083          * on the first half
1084          */
1085         if (state->start <= end && state->end > end) {
1086                 if (state->state & exclusive_bits) {
1087                         *failed_start = start;
1088                         err = -EEXIST;
1089                         goto out;
1090                 }
1091
1092                 prealloc = alloc_extent_state_atomic(prealloc);
1093                 BUG_ON(!prealloc);
1094                 err = split_state(tree, state, prealloc, end + 1);
1095                 if (err)
1096                         extent_io_tree_panic(tree, err);
1097
1098                 set_state_bits(tree, prealloc, &bits, changeset);
1099                 cache_state(prealloc, cached_state);
1100                 merge_state(tree, prealloc);
1101                 prealloc = NULL;
1102                 goto out;
1103         }
1104
1105 search_again:
1106         if (start > end)
1107                 goto out;
1108         spin_unlock(&tree->lock);
1109         if (gfpflags_allow_blocking(mask))
1110                 cond_resched();
1111         goto again;
1112
1113 out:
1114         spin_unlock(&tree->lock);
1115         if (prealloc)
1116                 free_extent_state(prealloc);
1117
1118         return err;
1119
1120 }
1121
1122 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1123                    unsigned bits, u64 * failed_start,
1124                    struct extent_state **cached_state, gfp_t mask)
1125 {
1126         return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1127                                 cached_state, mask, NULL);
1128 }
1129
1130
1131 /**
1132  * convert_extent_bit - convert all bits in a given range from one bit to
1133  *                      another
1134  * @tree:       the io tree to search
1135  * @start:      the start offset in bytes
1136  * @end:        the end offset in bytes (inclusive)
1137  * @bits:       the bits to set in this range
1138  * @clear_bits: the bits to clear in this range
1139  * @cached_state:       state that we're going to cache
1140  *
1141  * This will go through and set bits for the given range.  If any states exist
1142  * already in this range they are set with the given bit and cleared of the
1143  * clear_bits.  This is only meant to be used by things that are mergeable, ie
1144  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1145  * boundary bits like LOCK.
1146  *
1147  * All allocations are done with GFP_NOFS.
1148  */
1149 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1150                        unsigned bits, unsigned clear_bits,
1151                        struct extent_state **cached_state)
1152 {
1153         struct extent_state *state;
1154         struct extent_state *prealloc = NULL;
1155         struct rb_node *node;
1156         struct rb_node **p;
1157         struct rb_node *parent;
1158         int err = 0;
1159         u64 last_start;
1160         u64 last_end;
1161         bool first_iteration = true;
1162
1163         btrfs_debug_check_extent_io_range(tree, start, end);
1164         trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1165                                        clear_bits);
1166
1167 again:
1168         if (!prealloc) {
1169                 /*
1170                  * Best effort, don't worry if extent state allocation fails
1171                  * here for the first iteration. We might have a cached state
1172                  * that matches exactly the target range, in which case no
1173                  * extent state allocations are needed. We'll only know this
1174                  * after locking the tree.
1175                  */
1176                 prealloc = alloc_extent_state(GFP_NOFS);
1177                 if (!prealloc && !first_iteration)
1178                         return -ENOMEM;
1179         }
1180
1181         spin_lock(&tree->lock);
1182         if (cached_state && *cached_state) {
1183                 state = *cached_state;
1184                 if (state->start <= start && state->end > start &&
1185                     extent_state_in_tree(state)) {
1186                         node = &state->rb_node;
1187                         goto hit_next;
1188                 }
1189         }
1190
1191         /*
1192          * this search will find all the extents that end after
1193          * our range starts.
1194          */
1195         node = tree_search_for_insert(tree, start, &p, &parent);
1196         if (!node) {
1197                 prealloc = alloc_extent_state_atomic(prealloc);
1198                 if (!prealloc) {
1199                         err = -ENOMEM;
1200                         goto out;
1201                 }
1202                 err = insert_state(tree, prealloc, start, end,
1203                                    &p, &parent, &bits, NULL);
1204                 if (err)
1205                         extent_io_tree_panic(tree, err);
1206                 cache_state(prealloc, cached_state);
1207                 prealloc = NULL;
1208                 goto out;
1209         }
1210         state = rb_entry(node, struct extent_state, rb_node);
1211 hit_next:
1212         last_start = state->start;
1213         last_end = state->end;
1214
1215         /*
1216          * | ---- desired range ---- |
1217          * | state |
1218          *
1219          * Just lock what we found and keep going
1220          */
1221         if (state->start == start && state->end <= end) {
1222                 set_state_bits(tree, state, &bits, NULL);
1223                 cache_state(state, cached_state);
1224                 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1225                 if (last_end == (u64)-1)
1226                         goto out;
1227                 start = last_end + 1;
1228                 if (start < end && state && state->start == start &&
1229                     !need_resched())
1230                         goto hit_next;
1231                 goto search_again;
1232         }
1233
1234         /*
1235          *     | ---- desired range ---- |
1236          * | state |
1237          *   or
1238          * | ------------- state -------------- |
1239          *
1240          * We need to split the extent we found, and may flip bits on
1241          * second half.
1242          *
1243          * If the extent we found extends past our
1244          * range, we just split and search again.  It'll get split
1245          * again the next time though.
1246          *
1247          * If the extent we found is inside our range, we set the
1248          * desired bit on it.
1249          */
1250         if (state->start < start) {
1251                 prealloc = alloc_extent_state_atomic(prealloc);
1252                 if (!prealloc) {
1253                         err = -ENOMEM;
1254                         goto out;
1255                 }
1256                 err = split_state(tree, state, prealloc, start);
1257                 if (err)
1258                         extent_io_tree_panic(tree, err);
1259                 prealloc = NULL;
1260                 if (err)
1261                         goto out;
1262                 if (state->end <= end) {
1263                         set_state_bits(tree, state, &bits, NULL);
1264                         cache_state(state, cached_state);
1265                         state = clear_state_bit(tree, state, &clear_bits, 0,
1266                                                 NULL);
1267                         if (last_end == (u64)-1)
1268                                 goto out;
1269                         start = last_end + 1;
1270                         if (start < end && state && state->start == start &&
1271                             !need_resched())
1272                                 goto hit_next;
1273                 }
1274                 goto search_again;
1275         }
1276         /*
1277          * | ---- desired range ---- |
1278          *     | state | or               | state |
1279          *
1280          * There's a hole, we need to insert something in it and
1281          * ignore the extent we found.
1282          */
1283         if (state->start > start) {
1284                 u64 this_end;
1285                 if (end < last_start)
1286                         this_end = end;
1287                 else
1288                         this_end = last_start - 1;
1289
1290                 prealloc = alloc_extent_state_atomic(prealloc);
1291                 if (!prealloc) {
1292                         err = -ENOMEM;
1293                         goto out;
1294                 }
1295
1296                 /*
1297                  * Avoid to free 'prealloc' if it can be merged with
1298                  * the later extent.
1299                  */
1300                 err = insert_state(tree, prealloc, start, this_end,
1301                                    NULL, NULL, &bits, NULL);
1302                 if (err)
1303                         extent_io_tree_panic(tree, err);
1304                 cache_state(prealloc, cached_state);
1305                 prealloc = NULL;
1306                 start = this_end + 1;
1307                 goto search_again;
1308         }
1309         /*
1310          * | ---- desired range ---- |
1311          *                        | state |
1312          * We need to split the extent, and set the bit
1313          * on the first half
1314          */
1315         if (state->start <= end && state->end > end) {
1316                 prealloc = alloc_extent_state_atomic(prealloc);
1317                 if (!prealloc) {
1318                         err = -ENOMEM;
1319                         goto out;
1320                 }
1321
1322                 err = split_state(tree, state, prealloc, end + 1);
1323                 if (err)
1324                         extent_io_tree_panic(tree, err);
1325
1326                 set_state_bits(tree, prealloc, &bits, NULL);
1327                 cache_state(prealloc, cached_state);
1328                 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1329                 prealloc = NULL;
1330                 goto out;
1331         }
1332
1333 search_again:
1334         if (start > end)
1335                 goto out;
1336         spin_unlock(&tree->lock);
1337         cond_resched();
1338         first_iteration = false;
1339         goto again;
1340
1341 out:
1342         spin_unlock(&tree->lock);
1343         if (prealloc)
1344                 free_extent_state(prealloc);
1345
1346         return err;
1347 }
1348
1349 /* wrappers around set/clear extent bit */
1350 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1351                            unsigned bits, struct extent_changeset *changeset)
1352 {
1353         /*
1354          * We don't support EXTENT_LOCKED yet, as current changeset will
1355          * record any bits changed, so for EXTENT_LOCKED case, it will
1356          * either fail with -EEXIST or changeset will record the whole
1357          * range.
1358          */
1359         BUG_ON(bits & EXTENT_LOCKED);
1360
1361         return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1362                                 changeset);
1363 }
1364
1365 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1366                            unsigned bits)
1367 {
1368         return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1369                                 GFP_NOWAIT, NULL);
1370 }
1371
1372 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1373                      unsigned bits, int wake, int delete,
1374                      struct extent_state **cached)
1375 {
1376         return __clear_extent_bit(tree, start, end, bits, wake, delete,
1377                                   cached, GFP_NOFS, NULL);
1378 }
1379
1380 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1381                 unsigned bits, struct extent_changeset *changeset)
1382 {
1383         /*
1384          * Don't support EXTENT_LOCKED case, same reason as
1385          * set_record_extent_bits().
1386          */
1387         BUG_ON(bits & EXTENT_LOCKED);
1388
1389         return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1390                                   changeset);
1391 }
1392
1393 /*
1394  * either insert or lock state struct between start and end use mask to tell
1395  * us if waiting is desired.
1396  */
1397 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1398                      struct extent_state **cached_state)
1399 {
1400         int err;
1401         u64 failed_start;
1402
1403         while (1) {
1404                 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1405                                        EXTENT_LOCKED, &failed_start,
1406                                        cached_state, GFP_NOFS, NULL);
1407                 if (err == -EEXIST) {
1408                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1409                         start = failed_start;
1410                 } else
1411                         break;
1412                 WARN_ON(start > end);
1413         }
1414         return err;
1415 }
1416
1417 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1418 {
1419         int err;
1420         u64 failed_start;
1421
1422         err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1423                                &failed_start, NULL, GFP_NOFS, NULL);
1424         if (err == -EEXIST) {
1425                 if (failed_start > start)
1426                         clear_extent_bit(tree, start, failed_start - 1,
1427                                          EXTENT_LOCKED, 1, 0, NULL);
1428                 return 0;
1429         }
1430         return 1;
1431 }
1432
1433 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1434 {
1435         unsigned long index = start >> PAGE_SHIFT;
1436         unsigned long end_index = end >> PAGE_SHIFT;
1437         struct page *page;
1438
1439         while (index <= end_index) {
1440                 page = find_get_page(inode->i_mapping, index);
1441                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1442                 clear_page_dirty_for_io(page);
1443                 put_page(page);
1444                 index++;
1445         }
1446 }
1447
1448 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1449 {
1450         unsigned long index = start >> PAGE_SHIFT;
1451         unsigned long end_index = end >> PAGE_SHIFT;
1452         struct page *page;
1453
1454         while (index <= end_index) {
1455                 page = find_get_page(inode->i_mapping, index);
1456                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1457                 __set_page_dirty_nobuffers(page);
1458                 account_page_redirty(page);
1459                 put_page(page);
1460                 index++;
1461         }
1462 }
1463
1464 /* find the first state struct with 'bits' set after 'start', and
1465  * return it.  tree->lock must be held.  NULL will returned if
1466  * nothing was found after 'start'
1467  */
1468 static struct extent_state *
1469 find_first_extent_bit_state(struct extent_io_tree *tree,
1470                             u64 start, unsigned bits)
1471 {
1472         struct rb_node *node;
1473         struct extent_state *state;
1474
1475         /*
1476          * this search will find all the extents that end after
1477          * our range starts.
1478          */
1479         node = tree_search(tree, start);
1480         if (!node)
1481                 goto out;
1482
1483         while (1) {
1484                 state = rb_entry(node, struct extent_state, rb_node);
1485                 if (state->end >= start && (state->state & bits))
1486                         return state;
1487
1488                 node = rb_next(node);
1489                 if (!node)
1490                         break;
1491         }
1492 out:
1493         return NULL;
1494 }
1495
1496 /*
1497  * find the first offset in the io tree with 'bits' set. zero is
1498  * returned if we find something, and *start_ret and *end_ret are
1499  * set to reflect the state struct that was found.
1500  *
1501  * If nothing was found, 1 is returned. If found something, return 0.
1502  */
1503 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1504                           u64 *start_ret, u64 *end_ret, unsigned bits,
1505                           struct extent_state **cached_state)
1506 {
1507         struct extent_state *state;
1508         int ret = 1;
1509
1510         spin_lock(&tree->lock);
1511         if (cached_state && *cached_state) {
1512                 state = *cached_state;
1513                 if (state->end == start - 1 && extent_state_in_tree(state)) {
1514                         while ((state = next_state(state)) != NULL) {
1515                                 if (state->state & bits)
1516                                         goto got_it;
1517                         }
1518                         free_extent_state(*cached_state);
1519                         *cached_state = NULL;
1520                         goto out;
1521                 }
1522                 free_extent_state(*cached_state);
1523                 *cached_state = NULL;
1524         }
1525
1526         state = find_first_extent_bit_state(tree, start, bits);
1527 got_it:
1528         if (state) {
1529                 cache_state_if_flags(state, cached_state, 0);
1530                 *start_ret = state->start;
1531                 *end_ret = state->end;
1532                 ret = 0;
1533         }
1534 out:
1535         spin_unlock(&tree->lock);
1536         return ret;
1537 }
1538
1539 /**
1540  * find_first_clear_extent_bit - finds the first range that has @bits not set
1541  * and that starts after @start
1542  *
1543  * @tree - the tree to search
1544  * @start - the offset at/after which the found extent should start
1545  * @start_ret - records the beginning of the range
1546  * @end_ret - records the end of the range (inclusive)
1547  * @bits - the set of bits which must be unset
1548  *
1549  * Since unallocated range is also considered one which doesn't have the bits
1550  * set it's possible that @end_ret contains -1, this happens in case the range
1551  * spans (last_range_end, end of device]. In this case it's up to the caller to
1552  * trim @end_ret to the appropriate size.
1553  */
1554 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1555                                  u64 *start_ret, u64 *end_ret, unsigned bits)
1556 {
1557         struct extent_state *state;
1558         struct rb_node *node, *prev = NULL, *next;
1559
1560         spin_lock(&tree->lock);
1561
1562         /* Find first extent with bits cleared */
1563         while (1) {
1564                 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1565                 if (!node) {
1566                         node = next;
1567                         if (!node) {
1568                                 /*
1569                                  * We are past the last allocated chunk,
1570                                  * set start at the end of the last extent. The
1571                                  * device alloc tree should never be empty so
1572                                  * prev is always set.
1573                                  */
1574                                 ASSERT(prev);
1575                                 state = rb_entry(prev, struct extent_state, rb_node);
1576                                 *start_ret = state->end + 1;
1577                                 *end_ret = -1;
1578                                 goto out;
1579                         }
1580                 }
1581                 state = rb_entry(node, struct extent_state, rb_node);
1582                 if (in_range(start, state->start, state->end - state->start + 1) &&
1583                         (state->state & bits)) {
1584                         start = state->end + 1;
1585                 } else {
1586                         *start_ret = start;
1587                         break;
1588                 }
1589         }
1590
1591         /*
1592          * Find the longest stretch from start until an entry which has the
1593          * bits set
1594          */
1595         while (1) {
1596                 state = rb_entry(node, struct extent_state, rb_node);
1597                 if (state->end >= start && !(state->state & bits)) {
1598                         *end_ret = state->end;
1599                 } else {
1600                         *end_ret = state->start - 1;
1601                         break;
1602                 }
1603
1604                 node = rb_next(node);
1605                 if (!node)
1606                         break;
1607         }
1608 out:
1609         spin_unlock(&tree->lock);
1610 }
1611
1612 /*
1613  * find a contiguous range of bytes in the file marked as delalloc, not
1614  * more than 'max_bytes'.  start and end are used to return the range,
1615  *
1616  * true is returned if we find something, false if nothing was in the tree
1617  */
1618 static noinline bool find_delalloc_range(struct extent_io_tree *tree,
1619                                         u64 *start, u64 *end, u64 max_bytes,
1620                                         struct extent_state **cached_state)
1621 {
1622         struct rb_node *node;
1623         struct extent_state *state;
1624         u64 cur_start = *start;
1625         bool found = false;
1626         u64 total_bytes = 0;
1627
1628         spin_lock(&tree->lock);
1629
1630         /*
1631          * this search will find all the extents that end after
1632          * our range starts.
1633          */
1634         node = tree_search(tree, cur_start);
1635         if (!node) {
1636                 *end = (u64)-1;
1637                 goto out;
1638         }
1639
1640         while (1) {
1641                 state = rb_entry(node, struct extent_state, rb_node);
1642                 if (found && (state->start != cur_start ||
1643                               (state->state & EXTENT_BOUNDARY))) {
1644                         goto out;
1645                 }
1646                 if (!(state->state & EXTENT_DELALLOC)) {
1647                         if (!found)
1648                                 *end = state->end;
1649                         goto out;
1650                 }
1651                 if (!found) {
1652                         *start = state->start;
1653                         *cached_state = state;
1654                         refcount_inc(&state->refs);
1655                 }
1656                 found = true;
1657                 *end = state->end;
1658                 cur_start = state->end + 1;
1659                 node = rb_next(node);
1660                 total_bytes += state->end - state->start + 1;
1661                 if (total_bytes >= max_bytes)
1662                         break;
1663                 if (!node)
1664                         break;
1665         }
1666 out:
1667         spin_unlock(&tree->lock);
1668         return found;
1669 }
1670
1671 static int __process_pages_contig(struct address_space *mapping,
1672                                   struct page *locked_page,
1673                                   pgoff_t start_index, pgoff_t end_index,
1674                                   unsigned long page_ops, pgoff_t *index_ret);
1675
1676 static noinline void __unlock_for_delalloc(struct inode *inode,
1677                                            struct page *locked_page,
1678                                            u64 start, u64 end)
1679 {
1680         unsigned long index = start >> PAGE_SHIFT;
1681         unsigned long end_index = end >> PAGE_SHIFT;
1682
1683         ASSERT(locked_page);
1684         if (index == locked_page->index && end_index == index)
1685                 return;
1686
1687         __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1688                                PAGE_UNLOCK, NULL);
1689 }
1690
1691 static noinline int lock_delalloc_pages(struct inode *inode,
1692                                         struct page *locked_page,
1693                                         u64 delalloc_start,
1694                                         u64 delalloc_end)
1695 {
1696         unsigned long index = delalloc_start >> PAGE_SHIFT;
1697         unsigned long index_ret = index;
1698         unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1699         int ret;
1700
1701         ASSERT(locked_page);
1702         if (index == locked_page->index && index == end_index)
1703                 return 0;
1704
1705         ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1706                                      end_index, PAGE_LOCK, &index_ret);
1707         if (ret == -EAGAIN)
1708                 __unlock_for_delalloc(inode, locked_page, delalloc_start,
1709                                       (u64)index_ret << PAGE_SHIFT);
1710         return ret;
1711 }
1712
1713 /*
1714  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1715  * more than @max_bytes.  @Start and @end are used to return the range,
1716  *
1717  * Return: true if we find something
1718  *         false if nothing was in the tree
1719  */
1720 EXPORT_FOR_TESTS
1721 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1722                                     struct extent_io_tree *tree,
1723                                     struct page *locked_page, u64 *start,
1724                                     u64 *end)
1725 {
1726         u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
1727         u64 delalloc_start;
1728         u64 delalloc_end;
1729         bool found;
1730         struct extent_state *cached_state = NULL;
1731         int ret;
1732         int loops = 0;
1733
1734 again:
1735         /* step one, find a bunch of delalloc bytes starting at start */
1736         delalloc_start = *start;
1737         delalloc_end = 0;
1738         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1739                                     max_bytes, &cached_state);
1740         if (!found || delalloc_end <= *start) {
1741                 *start = delalloc_start;
1742                 *end = delalloc_end;
1743                 free_extent_state(cached_state);
1744                 return false;
1745         }
1746
1747         /*
1748          * start comes from the offset of locked_page.  We have to lock
1749          * pages in order, so we can't process delalloc bytes before
1750          * locked_page
1751          */
1752         if (delalloc_start < *start)
1753                 delalloc_start = *start;
1754
1755         /*
1756          * make sure to limit the number of pages we try to lock down
1757          */
1758         if (delalloc_end + 1 - delalloc_start > max_bytes)
1759                 delalloc_end = delalloc_start + max_bytes - 1;
1760
1761         /* step two, lock all the pages after the page that has start */
1762         ret = lock_delalloc_pages(inode, locked_page,
1763                                   delalloc_start, delalloc_end);
1764         ASSERT(!ret || ret == -EAGAIN);
1765         if (ret == -EAGAIN) {
1766                 /* some of the pages are gone, lets avoid looping by
1767                  * shortening the size of the delalloc range we're searching
1768                  */
1769                 free_extent_state(cached_state);
1770                 cached_state = NULL;
1771                 if (!loops) {
1772                         max_bytes = PAGE_SIZE;
1773                         loops = 1;
1774                         goto again;
1775                 } else {
1776                         found = false;
1777                         goto out_failed;
1778                 }
1779         }
1780
1781         /* step three, lock the state bits for the whole range */
1782         lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1783
1784         /* then test to make sure it is all still delalloc */
1785         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1786                              EXTENT_DELALLOC, 1, cached_state);
1787         if (!ret) {
1788                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1789                                      &cached_state);
1790                 __unlock_for_delalloc(inode, locked_page,
1791                               delalloc_start, delalloc_end);
1792                 cond_resched();
1793                 goto again;
1794         }
1795         free_extent_state(cached_state);
1796         *start = delalloc_start;
1797         *end = delalloc_end;
1798 out_failed:
1799         return found;
1800 }
1801
1802 static int __process_pages_contig(struct address_space *mapping,
1803                                   struct page *locked_page,
1804                                   pgoff_t start_index, pgoff_t end_index,
1805                                   unsigned long page_ops, pgoff_t *index_ret)
1806 {
1807         unsigned long nr_pages = end_index - start_index + 1;
1808         unsigned long pages_locked = 0;
1809         pgoff_t index = start_index;
1810         struct page *pages[16];
1811         unsigned ret;
1812         int err = 0;
1813         int i;
1814
1815         if (page_ops & PAGE_LOCK) {
1816                 ASSERT(page_ops == PAGE_LOCK);
1817                 ASSERT(index_ret && *index_ret == start_index);
1818         }
1819
1820         if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1821                 mapping_set_error(mapping, -EIO);
1822
1823         while (nr_pages > 0) {
1824                 ret = find_get_pages_contig(mapping, index,
1825                                      min_t(unsigned long,
1826                                      nr_pages, ARRAY_SIZE(pages)), pages);
1827                 if (ret == 0) {
1828                         /*
1829                          * Only if we're going to lock these pages,
1830                          * can we find nothing at @index.
1831                          */
1832                         ASSERT(page_ops & PAGE_LOCK);
1833                         err = -EAGAIN;
1834                         goto out;
1835                 }
1836
1837                 for (i = 0; i < ret; i++) {
1838                         if (page_ops & PAGE_SET_PRIVATE2)
1839                                 SetPagePrivate2(pages[i]);
1840
1841                         if (pages[i] == locked_page) {
1842                                 put_page(pages[i]);
1843                                 pages_locked++;
1844                                 continue;
1845                         }
1846                         if (page_ops & PAGE_CLEAR_DIRTY)
1847                                 clear_page_dirty_for_io(pages[i]);
1848                         if (page_ops & PAGE_SET_WRITEBACK)
1849                                 set_page_writeback(pages[i]);
1850                         if (page_ops & PAGE_SET_ERROR)
1851                                 SetPageError(pages[i]);
1852                         if (page_ops & PAGE_END_WRITEBACK)
1853                                 end_page_writeback(pages[i]);
1854                         if (page_ops & PAGE_UNLOCK)
1855                                 unlock_page(pages[i]);
1856                         if (page_ops & PAGE_LOCK) {
1857                                 lock_page(pages[i]);
1858                                 if (!PageDirty(pages[i]) ||
1859                                     pages[i]->mapping != mapping) {
1860                                         unlock_page(pages[i]);
1861                                         put_page(pages[i]);
1862                                         err = -EAGAIN;
1863                                         goto out;
1864                                 }
1865                         }
1866                         put_page(pages[i]);
1867                         pages_locked++;
1868                 }
1869                 nr_pages -= ret;
1870                 index += ret;
1871                 cond_resched();
1872         }
1873 out:
1874         if (err && index_ret)
1875                 *index_ret = start_index + pages_locked - 1;
1876         return err;
1877 }
1878
1879 void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1880                                  u64 delalloc_end, struct page *locked_page,
1881                                  unsigned clear_bits,
1882                                  unsigned long page_ops)
1883 {
1884         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
1885                          NULL);
1886
1887         __process_pages_contig(inode->i_mapping, locked_page,
1888                                start >> PAGE_SHIFT, end >> PAGE_SHIFT,
1889                                page_ops, NULL);
1890 }
1891
1892 /*
1893  * count the number of bytes in the tree that have a given bit(s)
1894  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1895  * cached.  The total number found is returned.
1896  */
1897 u64 count_range_bits(struct extent_io_tree *tree,
1898                      u64 *start, u64 search_end, u64 max_bytes,
1899                      unsigned bits, int contig)
1900 {
1901         struct rb_node *node;
1902         struct extent_state *state;
1903         u64 cur_start = *start;
1904         u64 total_bytes = 0;
1905         u64 last = 0;
1906         int found = 0;
1907
1908         if (WARN_ON(search_end <= cur_start))
1909                 return 0;
1910
1911         spin_lock(&tree->lock);
1912         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1913                 total_bytes = tree->dirty_bytes;
1914                 goto out;
1915         }
1916         /*
1917          * this search will find all the extents that end after
1918          * our range starts.
1919          */
1920         node = tree_search(tree, cur_start);
1921         if (!node)
1922                 goto out;
1923
1924         while (1) {
1925                 state = rb_entry(node, struct extent_state, rb_node);
1926                 if (state->start > search_end)
1927                         break;
1928                 if (contig && found && state->start > last + 1)
1929                         break;
1930                 if (state->end >= cur_start && (state->state & bits) == bits) {
1931                         total_bytes += min(search_end, state->end) + 1 -
1932                                        max(cur_start, state->start);
1933                         if (total_bytes >= max_bytes)
1934                                 break;
1935                         if (!found) {
1936                                 *start = max(cur_start, state->start);
1937                                 found = 1;
1938                         }
1939                         last = state->end;
1940                 } else if (contig && found) {
1941                         break;
1942                 }
1943                 node = rb_next(node);
1944                 if (!node)
1945                         break;
1946         }
1947 out:
1948         spin_unlock(&tree->lock);
1949         return total_bytes;
1950 }
1951
1952 /*
1953  * set the private field for a given byte offset in the tree.  If there isn't
1954  * an extent_state there already, this does nothing.
1955  */
1956 static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
1957                 struct io_failure_record *failrec)
1958 {
1959         struct rb_node *node;
1960         struct extent_state *state;
1961         int ret = 0;
1962
1963         spin_lock(&tree->lock);
1964         /*
1965          * this search will find all the extents that end after
1966          * our range starts.
1967          */
1968         node = tree_search(tree, start);
1969         if (!node) {
1970                 ret = -ENOENT;
1971                 goto out;
1972         }
1973         state = rb_entry(node, struct extent_state, rb_node);
1974         if (state->start != start) {
1975                 ret = -ENOENT;
1976                 goto out;
1977         }
1978         state->failrec = failrec;
1979 out:
1980         spin_unlock(&tree->lock);
1981         return ret;
1982 }
1983
1984 static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
1985                 struct io_failure_record **failrec)
1986 {
1987         struct rb_node *node;
1988         struct extent_state *state;
1989         int ret = 0;
1990
1991         spin_lock(&tree->lock);
1992         /*
1993          * this search will find all the extents that end after
1994          * our range starts.
1995          */
1996         node = tree_search(tree, start);
1997         if (!node) {
1998                 ret = -ENOENT;
1999                 goto out;
2000         }
2001         state = rb_entry(node, struct extent_state, rb_node);
2002         if (state->start != start) {
2003                 ret = -ENOENT;
2004                 goto out;
2005         }
2006         *failrec = state->failrec;
2007 out:
2008         spin_unlock(&tree->lock);
2009         return ret;
2010 }
2011
2012 /*
2013  * searches a range in the state tree for a given mask.
2014  * If 'filled' == 1, this returns 1 only if every extent in the tree
2015  * has the bits set.  Otherwise, 1 is returned if any bit in the
2016  * range is found set.
2017  */
2018 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2019                    unsigned bits, int filled, struct extent_state *cached)
2020 {
2021         struct extent_state *state = NULL;
2022         struct rb_node *node;
2023         int bitset = 0;
2024
2025         spin_lock(&tree->lock);
2026         if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2027             cached->end > start)
2028                 node = &cached->rb_node;
2029         else
2030                 node = tree_search(tree, start);
2031         while (node && start <= end) {
2032                 state = rb_entry(node, struct extent_state, rb_node);
2033
2034                 if (filled && state->start > start) {
2035                         bitset = 0;
2036                         break;
2037                 }
2038
2039                 if (state->start > end)
2040                         break;
2041
2042                 if (state->state & bits) {
2043                         bitset = 1;
2044                         if (!filled)
2045                                 break;
2046                 } else if (filled) {
2047                         bitset = 0;
2048                         break;
2049                 }
2050
2051                 if (state->end == (u64)-1)
2052                         break;
2053
2054                 start = state->end + 1;
2055                 if (start > end)
2056                         break;
2057                 node = rb_next(node);
2058                 if (!node) {
2059                         if (filled)
2060                                 bitset = 0;
2061                         break;
2062                 }
2063         }
2064         spin_unlock(&tree->lock);
2065         return bitset;
2066 }
2067
2068 /*
2069  * helper function to set a given page up to date if all the
2070  * extents in the tree for that page are up to date
2071  */
2072 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
2073 {
2074         u64 start = page_offset(page);
2075         u64 end = start + PAGE_SIZE - 1;
2076         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2077                 SetPageUptodate(page);
2078 }
2079
2080 int free_io_failure(struct extent_io_tree *failure_tree,
2081                     struct extent_io_tree *io_tree,
2082                     struct io_failure_record *rec)
2083 {
2084         int ret;
2085         int err = 0;
2086
2087         set_state_failrec(failure_tree, rec->start, NULL);
2088         ret = clear_extent_bits(failure_tree, rec->start,
2089                                 rec->start + rec->len - 1,
2090                                 EXTENT_LOCKED | EXTENT_DIRTY);
2091         if (ret)
2092                 err = ret;
2093
2094         ret = clear_extent_bits(io_tree, rec->start,
2095                                 rec->start + rec->len - 1,
2096                                 EXTENT_DAMAGED);
2097         if (ret && !err)
2098                 err = ret;
2099
2100         kfree(rec);
2101         return err;
2102 }
2103
2104 /*
2105  * this bypasses the standard btrfs submit functions deliberately, as
2106  * the standard behavior is to write all copies in a raid setup. here we only
2107  * want to write the one bad copy. so we do the mapping for ourselves and issue
2108  * submit_bio directly.
2109  * to avoid any synchronization issues, wait for the data after writing, which
2110  * actually prevents the read that triggered the error from finishing.
2111  * currently, there can be no more than two copies of every data bit. thus,
2112  * exactly one rewrite is required.
2113  */
2114 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2115                       u64 length, u64 logical, struct page *page,
2116                       unsigned int pg_offset, int mirror_num)
2117 {
2118         struct bio *bio;
2119         struct btrfs_device *dev;
2120         u64 map_length = 0;
2121         u64 sector;
2122         struct btrfs_bio *bbio = NULL;
2123         int ret;
2124
2125         ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2126         BUG_ON(!mirror_num);
2127
2128         bio = btrfs_io_bio_alloc(1);
2129         bio->bi_iter.bi_size = 0;
2130         map_length = length;
2131
2132         /*
2133          * Avoid races with device replace and make sure our bbio has devices
2134          * associated to its stripes that don't go away while we are doing the
2135          * read repair operation.
2136          */
2137         btrfs_bio_counter_inc_blocked(fs_info);
2138         if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2139                 /*
2140                  * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2141                  * to update all raid stripes, but here we just want to correct
2142                  * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2143                  * stripe's dev and sector.
2144                  */
2145                 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2146                                       &map_length, &bbio, 0);
2147                 if (ret) {
2148                         btrfs_bio_counter_dec(fs_info);
2149                         bio_put(bio);
2150                         return -EIO;
2151                 }
2152                 ASSERT(bbio->mirror_num == 1);
2153         } else {
2154                 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2155                                       &map_length, &bbio, mirror_num);
2156                 if (ret) {
2157                         btrfs_bio_counter_dec(fs_info);
2158                         bio_put(bio);
2159                         return -EIO;
2160                 }
2161                 BUG_ON(mirror_num != bbio->mirror_num);
2162         }
2163
2164         sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
2165         bio->bi_iter.bi_sector = sector;
2166         dev = bbio->stripes[bbio->mirror_num - 1].dev;
2167         btrfs_put_bbio(bbio);
2168         if (!dev || !dev->bdev ||
2169             !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2170                 btrfs_bio_counter_dec(fs_info);
2171                 bio_put(bio);
2172                 return -EIO;
2173         }
2174         bio_set_dev(bio, dev->bdev);
2175         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2176         bio_add_page(bio, page, length, pg_offset);
2177
2178         if (btrfsic_submit_bio_wait(bio)) {
2179                 /* try to remap that extent elsewhere? */
2180                 btrfs_bio_counter_dec(fs_info);
2181                 bio_put(bio);
2182                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2183                 return -EIO;
2184         }
2185
2186         btrfs_info_rl_in_rcu(fs_info,
2187                 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2188                                   ino, start,
2189                                   rcu_str_deref(dev->name), sector);
2190         btrfs_bio_counter_dec(fs_info);
2191         bio_put(bio);
2192         return 0;
2193 }
2194
2195 int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num)
2196 {
2197         struct btrfs_fs_info *fs_info = eb->fs_info;
2198         u64 start = eb->start;
2199         int i, num_pages = num_extent_pages(eb);
2200         int ret = 0;
2201
2202         if (sb_rdonly(fs_info->sb))
2203                 return -EROFS;
2204
2205         for (i = 0; i < num_pages; i++) {
2206                 struct page *p = eb->pages[i];
2207
2208                 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2209                                         start - page_offset(p), mirror_num);
2210                 if (ret)
2211                         break;
2212                 start += PAGE_SIZE;
2213         }
2214
2215         return ret;
2216 }
2217
2218 /*
2219  * each time an IO finishes, we do a fast check in the IO failure tree
2220  * to see if we need to process or clean up an io_failure_record
2221  */
2222 int clean_io_failure(struct btrfs_fs_info *fs_info,
2223                      struct extent_io_tree *failure_tree,
2224                      struct extent_io_tree *io_tree, u64 start,
2225                      struct page *page, u64 ino, unsigned int pg_offset)
2226 {
2227         u64 private;
2228         struct io_failure_record *failrec;
2229         struct extent_state *state;
2230         int num_copies;
2231         int ret;
2232
2233         private = 0;
2234         ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2235                                EXTENT_DIRTY, 0);
2236         if (!ret)
2237                 return 0;
2238
2239         ret = get_state_failrec(failure_tree, start, &failrec);
2240         if (ret)
2241                 return 0;
2242
2243         BUG_ON(!failrec->this_mirror);
2244
2245         if (failrec->in_validation) {
2246                 /* there was no real error, just free the record */
2247                 btrfs_debug(fs_info,
2248                         "clean_io_failure: freeing dummy error at %llu",
2249                         failrec->start);
2250                 goto out;
2251         }
2252         if (sb_rdonly(fs_info->sb))
2253                 goto out;
2254
2255         spin_lock(&io_tree->lock);
2256         state = find_first_extent_bit_state(io_tree,
2257                                             failrec->start,
2258                                             EXTENT_LOCKED);
2259         spin_unlock(&io_tree->lock);
2260
2261         if (state && state->start <= failrec->start &&
2262             state->end >= failrec->start + failrec->len - 1) {
2263                 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2264                                               failrec->len);
2265                 if (num_copies > 1)  {
2266                         repair_io_failure(fs_info, ino, start, failrec->len,
2267                                           failrec->logical, page, pg_offset,
2268                                           failrec->failed_mirror);
2269                 }
2270         }
2271
2272 out:
2273         free_io_failure(failure_tree, io_tree, failrec);
2274
2275         return 0;
2276 }
2277
2278 /*
2279  * Can be called when
2280  * - hold extent lock
2281  * - under ordered extent
2282  * - the inode is freeing
2283  */
2284 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2285 {
2286         struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2287         struct io_failure_record *failrec;
2288         struct extent_state *state, *next;
2289
2290         if (RB_EMPTY_ROOT(&failure_tree->state))
2291                 return;
2292
2293         spin_lock(&failure_tree->lock);
2294         state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2295         while (state) {
2296                 if (state->start > end)
2297                         break;
2298
2299                 ASSERT(state->end <= end);
2300
2301                 next = next_state(state);
2302
2303                 failrec = state->failrec;
2304                 free_extent_state(state);
2305                 kfree(failrec);
2306
2307                 state = next;
2308         }
2309         spin_unlock(&failure_tree->lock);
2310 }
2311
2312 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2313                 struct io_failure_record **failrec_ret)
2314 {
2315         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2316         struct io_failure_record *failrec;
2317         struct extent_map *em;
2318         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2319         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2320         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2321         int ret;
2322         u64 logical;
2323
2324         ret = get_state_failrec(failure_tree, start, &failrec);
2325         if (ret) {
2326                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2327                 if (!failrec)
2328                         return -ENOMEM;
2329
2330                 failrec->start = start;
2331                 failrec->len = end - start + 1;
2332                 failrec->this_mirror = 0;
2333                 failrec->bio_flags = 0;
2334                 failrec->in_validation = 0;
2335
2336                 read_lock(&em_tree->lock);
2337                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2338                 if (!em) {
2339                         read_unlock(&em_tree->lock);
2340                         kfree(failrec);
2341                         return -EIO;
2342                 }
2343
2344                 if (em->start > start || em->start + em->len <= start) {
2345                         free_extent_map(em);
2346                         em = NULL;
2347                 }
2348                 read_unlock(&em_tree->lock);
2349                 if (!em) {
2350                         kfree(failrec);
2351                         return -EIO;
2352                 }
2353
2354                 logical = start - em->start;
2355                 logical = em->block_start + logical;
2356                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2357                         logical = em->block_start;
2358                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2359                         extent_set_compress_type(&failrec->bio_flags,
2360                                                  em->compress_type);
2361                 }
2362
2363                 btrfs_debug(fs_info,
2364                         "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2365                         logical, start, failrec->len);
2366
2367                 failrec->logical = logical;
2368                 free_extent_map(em);
2369
2370                 /* set the bits in the private failure tree */
2371                 ret = set_extent_bits(failure_tree, start, end,
2372                                         EXTENT_LOCKED | EXTENT_DIRTY);
2373                 if (ret >= 0)
2374                         ret = set_state_failrec(failure_tree, start, failrec);
2375                 /* set the bits in the inode's tree */
2376                 if (ret >= 0)
2377                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2378                 if (ret < 0) {
2379                         kfree(failrec);
2380                         return ret;
2381                 }
2382         } else {
2383                 btrfs_debug(fs_info,
2384                         "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2385                         failrec->logical, failrec->start, failrec->len,
2386                         failrec->in_validation);
2387                 /*
2388                  * when data can be on disk more than twice, add to failrec here
2389                  * (e.g. with a list for failed_mirror) to make
2390                  * clean_io_failure() clean all those errors at once.
2391                  */
2392         }
2393
2394         *failrec_ret = failrec;
2395
2396         return 0;
2397 }
2398
2399 bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
2400                            struct io_failure_record *failrec, int failed_mirror)
2401 {
2402         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2403         int num_copies;
2404
2405         num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2406         if (num_copies == 1) {
2407                 /*
2408                  * we only have a single copy of the data, so don't bother with
2409                  * all the retry and error correction code that follows. no
2410                  * matter what the error is, it is very likely to persist.
2411                  */
2412                 btrfs_debug(fs_info,
2413                         "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2414                         num_copies, failrec->this_mirror, failed_mirror);
2415                 return false;
2416         }
2417
2418         /*
2419          * there are two premises:
2420          *      a) deliver good data to the caller
2421          *      b) correct the bad sectors on disk
2422          */
2423         if (failed_bio_pages > 1) {
2424                 /*
2425                  * to fulfill b), we need to know the exact failing sectors, as
2426                  * we don't want to rewrite any more than the failed ones. thus,
2427                  * we need separate read requests for the failed bio
2428                  *
2429                  * if the following BUG_ON triggers, our validation request got
2430                  * merged. we need separate requests for our algorithm to work.
2431                  */
2432                 BUG_ON(failrec->in_validation);
2433                 failrec->in_validation = 1;
2434                 failrec->this_mirror = failed_mirror;
2435         } else {
2436                 /*
2437                  * we're ready to fulfill a) and b) alongside. get a good copy
2438                  * of the failed sector and if we succeed, we have setup
2439                  * everything for repair_io_failure to do the rest for us.
2440                  */
2441                 if (failrec->in_validation) {
2442                         BUG_ON(failrec->this_mirror != failed_mirror);
2443                         failrec->in_validation = 0;
2444                         failrec->this_mirror = 0;
2445                 }
2446                 failrec->failed_mirror = failed_mirror;
2447                 failrec->this_mirror++;
2448                 if (failrec->this_mirror == failed_mirror)
2449                         failrec->this_mirror++;
2450         }
2451
2452         if (failrec->this_mirror > num_copies) {
2453                 btrfs_debug(fs_info,
2454                         "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2455                         num_copies, failrec->this_mirror, failed_mirror);
2456                 return false;
2457         }
2458
2459         return true;
2460 }
2461
2462
2463 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2464                                     struct io_failure_record *failrec,
2465                                     struct page *page, int pg_offset, int icsum,
2466                                     bio_end_io_t *endio_func, void *data)
2467 {
2468         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2469         struct bio *bio;
2470         struct btrfs_io_bio *btrfs_failed_bio;
2471         struct btrfs_io_bio *btrfs_bio;
2472
2473         bio = btrfs_io_bio_alloc(1);
2474         bio->bi_end_io = endio_func;
2475         bio->bi_iter.bi_sector = failrec->logical >> 9;
2476         bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
2477         bio->bi_iter.bi_size = 0;
2478         bio->bi_private = data;
2479
2480         btrfs_failed_bio = btrfs_io_bio(failed_bio);
2481         if (btrfs_failed_bio->csum) {
2482                 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2483
2484                 btrfs_bio = btrfs_io_bio(bio);
2485                 btrfs_bio->csum = btrfs_bio->csum_inline;
2486                 icsum *= csum_size;
2487                 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2488                        csum_size);
2489         }
2490
2491         bio_add_page(bio, page, failrec->len, pg_offset);
2492
2493         return bio;
2494 }
2495
2496 /*
2497  * This is a generic handler for readpage errors. If other copies exist, read
2498  * those and write back good data to the failed position. Does not investigate
2499  * in remapping the failed extent elsewhere, hoping the device will be smart
2500  * enough to do this as needed
2501  */
2502 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2503                               struct page *page, u64 start, u64 end,
2504                               int failed_mirror)
2505 {
2506         struct io_failure_record *failrec;
2507         struct inode *inode = page->mapping->host;
2508         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2509         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2510         struct bio *bio;
2511         int read_mode = 0;
2512         blk_status_t status;
2513         int ret;
2514         unsigned failed_bio_pages = failed_bio->bi_iter.bi_size >> PAGE_SHIFT;
2515
2516         BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2517
2518         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2519         if (ret)
2520                 return ret;
2521
2522         if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
2523                                     failed_mirror)) {
2524                 free_io_failure(failure_tree, tree, failrec);
2525                 return -EIO;
2526         }
2527
2528         if (failed_bio_pages > 1)
2529                 read_mode |= REQ_FAILFAST_DEV;
2530
2531         phy_offset >>= inode->i_sb->s_blocksize_bits;
2532         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2533                                       start - page_offset(page),
2534                                       (int)phy_offset, failed_bio->bi_end_io,
2535                                       NULL);
2536         bio->bi_opf = REQ_OP_READ | read_mode;
2537
2538         btrfs_debug(btrfs_sb(inode->i_sb),
2539                 "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
2540                 read_mode, failrec->this_mirror, failrec->in_validation);
2541
2542         status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
2543                                          failrec->bio_flags);
2544         if (status) {
2545                 free_io_failure(failure_tree, tree, failrec);
2546                 bio_put(bio);
2547                 ret = blk_status_to_errno(status);
2548         }
2549
2550         return ret;
2551 }
2552
2553 /* lots and lots of room for performance fixes in the end_bio funcs */
2554
2555 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2556 {
2557         int uptodate = (err == 0);
2558         int ret = 0;
2559
2560         btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
2561
2562         if (!uptodate) {
2563                 ClearPageUptodate(page);
2564                 SetPageError(page);
2565                 ret = err < 0 ? err : -EIO;
2566                 mapping_set_error(page->mapping, ret);
2567         }
2568 }
2569
2570 /*
2571  * after a writepage IO is done, we need to:
2572  * clear the uptodate bits on error
2573  * clear the writeback bits in the extent tree for this IO
2574  * end_page_writeback if the page has no more pending IO
2575  *
2576  * Scheduling is not allowed, so the extent state tree is expected
2577  * to have one and only one object corresponding to this IO.
2578  */
2579 static void end_bio_extent_writepage(struct bio *bio)
2580 {
2581         int error = blk_status_to_errno(bio->bi_status);
2582         struct bio_vec *bvec;
2583         u64 start;
2584         u64 end;
2585         struct bvec_iter_all iter_all;
2586
2587         ASSERT(!bio_flagged(bio, BIO_CLONED));
2588         bio_for_each_segment_all(bvec, bio, iter_all) {
2589                 struct page *page = bvec->bv_page;
2590                 struct inode *inode = page->mapping->host;
2591                 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2592
2593                 /* We always issue full-page reads, but if some block
2594                  * in a page fails to read, blk_update_request() will
2595                  * advance bv_offset and adjust bv_len to compensate.
2596                  * Print a warning for nonzero offsets, and an error
2597                  * if they don't add up to a full page.  */
2598                 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2599                         if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2600                                 btrfs_err(fs_info,
2601                                    "partial page write in btrfs with offset %u and length %u",
2602                                         bvec->bv_offset, bvec->bv_len);
2603                         else
2604                                 btrfs_info(fs_info,
2605                                    "incomplete page write in btrfs with offset %u and length %u",
2606                                         bvec->bv_offset, bvec->bv_len);
2607                 }
2608
2609                 start = page_offset(page);
2610                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2611
2612                 end_extent_writepage(page, error, start, end);
2613                 end_page_writeback(page);
2614         }
2615
2616         bio_put(bio);
2617 }
2618
2619 static void
2620 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2621                               int uptodate)
2622 {
2623         struct extent_state *cached = NULL;
2624         u64 end = start + len - 1;
2625
2626         if (uptodate && tree->track_uptodate)
2627                 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2628         unlock_extent_cached_atomic(tree, start, end, &cached);
2629 }
2630
2631 /*
2632  * after a readpage IO is done, we need to:
2633  * clear the uptodate bits on error
2634  * set the uptodate bits if things worked
2635  * set the page up to date if all extents in the tree are uptodate
2636  * clear the lock bit in the extent tree
2637  * unlock the page if there are no other extents locked for it
2638  *
2639  * Scheduling is not allowed, so the extent state tree is expected
2640  * to have one and only one object corresponding to this IO.
2641  */
2642 static void end_bio_extent_readpage(struct bio *bio)
2643 {
2644         struct bio_vec *bvec;
2645         int uptodate = !bio->bi_status;
2646         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2647         struct extent_io_tree *tree, *failure_tree;
2648         u64 offset = 0;
2649         u64 start;
2650         u64 end;
2651         u64 len;
2652         u64 extent_start = 0;
2653         u64 extent_len = 0;
2654         int mirror;
2655         int ret;
2656         struct bvec_iter_all iter_all;
2657
2658         ASSERT(!bio_flagged(bio, BIO_CLONED));
2659         bio_for_each_segment_all(bvec, bio, iter_all) {
2660                 struct page *page = bvec->bv_page;
2661                 struct inode *inode = page->mapping->host;
2662                 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2663                 bool data_inode = btrfs_ino(BTRFS_I(inode))
2664                         != BTRFS_BTREE_INODE_OBJECTID;
2665
2666                 btrfs_debug(fs_info,
2667                         "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
2668                         (u64)bio->bi_iter.bi_sector, bio->bi_status,
2669                         io_bio->mirror_num);
2670                 tree = &BTRFS_I(inode)->io_tree;
2671                 failure_tree = &BTRFS_I(inode)->io_failure_tree;
2672
2673                 /* We always issue full-page reads, but if some block
2674                  * in a page fails to read, blk_update_request() will
2675                  * advance bv_offset and adjust bv_len to compensate.
2676                  * Print a warning for nonzero offsets, and an error
2677                  * if they don't add up to a full page.  */
2678                 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2679                         if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2680                                 btrfs_err(fs_info,
2681                                         "partial page read in btrfs with offset %u and length %u",
2682                                         bvec->bv_offset, bvec->bv_len);
2683                         else
2684                                 btrfs_info(fs_info,
2685                                         "incomplete page read in btrfs with offset %u and length %u",
2686                                         bvec->bv_offset, bvec->bv_len);
2687                 }
2688
2689                 start = page_offset(page);
2690                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2691                 len = bvec->bv_len;
2692
2693                 mirror = io_bio->mirror_num;
2694                 if (likely(uptodate)) {
2695                         ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2696                                                               page, start, end,
2697                                                               mirror);
2698                         if (ret)
2699                                 uptodate = 0;
2700                         else
2701                                 clean_io_failure(BTRFS_I(inode)->root->fs_info,
2702                                                  failure_tree, tree, start,
2703                                                  page,
2704                                                  btrfs_ino(BTRFS_I(inode)), 0);
2705                 }
2706
2707                 if (likely(uptodate))
2708                         goto readpage_ok;
2709
2710                 if (data_inode) {
2711
2712                         /*
2713                          * The generic bio_readpage_error handles errors the
2714                          * following way: If possible, new read requests are
2715                          * created and submitted and will end up in
2716                          * end_bio_extent_readpage as well (if we're lucky,
2717                          * not in the !uptodate case). In that case it returns
2718                          * 0 and we just go on with the next page in our bio.
2719                          * If it can't handle the error it will return -EIO and
2720                          * we remain responsible for that page.
2721                          */
2722                         ret = bio_readpage_error(bio, offset, page, start, end,
2723                                                  mirror);
2724                         if (ret == 0) {
2725                                 uptodate = !bio->bi_status;
2726                                 offset += len;
2727                                 continue;
2728                         }
2729                 } else {
2730                         struct extent_buffer *eb;
2731
2732                         eb = (struct extent_buffer *)page->private;
2733                         set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
2734                         eb->read_mirror = mirror;
2735                         atomic_dec(&eb->io_pages);
2736                         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
2737                                                &eb->bflags))
2738                                 btree_readahead_hook(eb, -EIO);
2739                 }
2740 readpage_ok:
2741                 if (likely(uptodate)) {
2742                         loff_t i_size = i_size_read(inode);
2743                         pgoff_t end_index = i_size >> PAGE_SHIFT;
2744                         unsigned off;
2745
2746                         /* Zero out the end if this page straddles i_size */
2747                         off = offset_in_page(i_size);
2748                         if (page->index == end_index && off)
2749                                 zero_user_segment(page, off, PAGE_SIZE);
2750                         SetPageUptodate(page);
2751                 } else {
2752                         ClearPageUptodate(page);
2753                         SetPageError(page);
2754                 }
2755                 unlock_page(page);
2756                 offset += len;
2757
2758                 if (unlikely(!uptodate)) {
2759                         if (extent_len) {
2760                                 endio_readpage_release_extent(tree,
2761                                                               extent_start,
2762                                                               extent_len, 1);
2763                                 extent_start = 0;
2764                                 extent_len = 0;
2765                         }
2766                         endio_readpage_release_extent(tree, start,
2767                                                       end - start + 1, 0);
2768                 } else if (!extent_len) {
2769                         extent_start = start;
2770                         extent_len = end + 1 - start;
2771                 } else if (extent_start + extent_len == start) {
2772                         extent_len += end + 1 - start;
2773                 } else {
2774                         endio_readpage_release_extent(tree, extent_start,
2775                                                       extent_len, uptodate);
2776                         extent_start = start;
2777                         extent_len = end + 1 - start;
2778                 }
2779         }
2780
2781         if (extent_len)
2782                 endio_readpage_release_extent(tree, extent_start, extent_len,
2783                                               uptodate);
2784         btrfs_io_bio_free_csum(io_bio);
2785         bio_put(bio);
2786 }
2787
2788 /*
2789  * Initialize the members up to but not including 'bio'. Use after allocating a
2790  * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2791  * 'bio' because use of __GFP_ZERO is not supported.
2792  */
2793 static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
2794 {
2795         memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2796 }
2797
2798 /*
2799  * The following helpers allocate a bio. As it's backed by a bioset, it'll
2800  * never fail.  We're returning a bio right now but you can call btrfs_io_bio
2801  * for the appropriate container_of magic
2802  */
2803 struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
2804 {
2805         struct bio *bio;
2806
2807         bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
2808         bio_set_dev(bio, bdev);
2809         bio->bi_iter.bi_sector = first_byte >> 9;
2810         btrfs_io_bio_init(btrfs_io_bio(bio));
2811         return bio;
2812 }
2813
2814 struct bio *btrfs_bio_clone(struct bio *bio)
2815 {
2816         struct btrfs_io_bio *btrfs_bio;
2817         struct bio *new;
2818
2819         /* Bio allocation backed by a bioset does not fail */
2820         new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
2821         btrfs_bio = btrfs_io_bio(new);
2822         btrfs_io_bio_init(btrfs_bio);
2823         btrfs_bio->iter = bio->bi_iter;
2824         return new;
2825 }
2826
2827 struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
2828 {
2829         struct bio *bio;
2830
2831         /* Bio allocation backed by a bioset does not fail */
2832         bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
2833         btrfs_io_bio_init(btrfs_io_bio(bio));
2834         return bio;
2835 }
2836
2837 struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
2838 {
2839         struct bio *bio;
2840         struct btrfs_io_bio *btrfs_bio;
2841
2842         /* this will never fail when it's backed by a bioset */
2843         bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
2844         ASSERT(bio);
2845
2846         btrfs_bio = btrfs_io_bio(bio);
2847         btrfs_io_bio_init(btrfs_bio);
2848
2849         bio_trim(bio, offset >> 9, size >> 9);
2850         btrfs_bio->iter = bio->bi_iter;
2851         return bio;
2852 }
2853
2854 /*
2855  * @opf:        bio REQ_OP_* and REQ_* flags as one value
2856  * @tree:       tree so we can call our merge_bio hook
2857  * @wbc:        optional writeback control for io accounting
2858  * @page:       page to add to the bio
2859  * @pg_offset:  offset of the new bio or to check whether we are adding
2860  *              a contiguous page to the previous one
2861  * @size:       portion of page that we want to write
2862  * @offset:     starting offset in the page
2863  * @bdev:       attach newly created bios to this bdev
2864  * @bio_ret:    must be valid pointer, newly allocated bio will be stored there
2865  * @end_io_func:     end_io callback for new bio
2866  * @mirror_num:      desired mirror to read/write
2867  * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
2868  * @bio_flags:  flags of the current bio to see if we can merge them
2869  */
2870 static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
2871                               struct writeback_control *wbc,
2872                               struct page *page, u64 offset,
2873                               size_t size, unsigned long pg_offset,
2874                               struct block_device *bdev,
2875                               struct bio **bio_ret,
2876                               bio_end_io_t end_io_func,
2877                               int mirror_num,
2878                               unsigned long prev_bio_flags,
2879                               unsigned long bio_flags,
2880                               bool force_bio_submit)
2881 {
2882         int ret = 0;
2883         struct bio *bio;
2884         size_t page_size = min_t(size_t, size, PAGE_SIZE);
2885         sector_t sector = offset >> 9;
2886
2887         ASSERT(bio_ret);
2888
2889         if (*bio_ret) {
2890                 bool contig;
2891                 bool can_merge = true;
2892
2893                 bio = *bio_ret;
2894                 if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
2895                         contig = bio->bi_iter.bi_sector == sector;
2896                 else
2897                         contig = bio_end_sector(bio) == sector;
2898
2899                 ASSERT(tree->ops);
2900                 if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
2901                         can_merge = false;
2902
2903                 if (prev_bio_flags != bio_flags || !contig || !can_merge ||
2904                     force_bio_submit ||
2905                     bio_add_page(bio, page, page_size, pg_offset) < page_size) {
2906                         ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
2907                         if (ret < 0) {
2908                                 *bio_ret = NULL;
2909                                 return ret;
2910                         }
2911                         bio = NULL;
2912                 } else {
2913                         if (wbc)
2914                                 wbc_account_io(wbc, page, page_size);
2915                         return 0;
2916                 }
2917         }
2918
2919         bio = btrfs_bio_alloc(bdev, offset);
2920         bio_add_page(bio, page, page_size, pg_offset);
2921         bio->bi_end_io = end_io_func;
2922         bio->bi_private = tree;
2923         bio->bi_write_hint = page->mapping->host->i_write_hint;
2924         bio->bi_opf = opf;
2925         if (wbc) {
2926                 wbc_init_bio(wbc, bio);
2927                 wbc_account_io(wbc, page, page_size);
2928         }
2929
2930         *bio_ret = bio;
2931
2932         return ret;
2933 }
2934
2935 static void attach_extent_buffer_page(struct extent_buffer *eb,
2936                                       struct page *page)
2937 {
2938         if (!PagePrivate(page)) {
2939                 SetPagePrivate(page);
2940                 get_page(page);
2941                 set_page_private(page, (unsigned long)eb);
2942         } else {
2943                 WARN_ON(page->private != (unsigned long)eb);
2944         }
2945 }
2946
2947 void set_page_extent_mapped(struct page *page)
2948 {
2949         if (!PagePrivate(page)) {
2950                 SetPagePrivate(page);
2951                 get_page(page);
2952                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2953         }
2954 }
2955
2956 static struct extent_map *
2957 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2958                  u64 start, u64 len, get_extent_t *get_extent,
2959                  struct extent_map **em_cached)
2960 {
2961         struct extent_map *em;
2962
2963         if (em_cached && *em_cached) {
2964                 em = *em_cached;
2965                 if (extent_map_in_tree(em) && start >= em->start &&
2966                     start < extent_map_end(em)) {
2967                         refcount_inc(&em->refs);
2968                         return em;
2969                 }
2970
2971                 free_extent_map(em);
2972                 *em_cached = NULL;
2973         }
2974
2975         em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
2976         if (em_cached && !IS_ERR_OR_NULL(em)) {
2977                 BUG_ON(*em_cached);
2978                 refcount_inc(&em->refs);
2979                 *em_cached = em;
2980         }
2981         return em;
2982 }
2983 /*
2984  * basic readpage implementation.  Locked extent state structs are inserted
2985  * into the tree that are removed when the IO is done (by the end_io
2986  * handlers)
2987  * XXX JDM: This needs looking at to ensure proper page locking
2988  * return 0 on success, otherwise return error
2989  */
2990 static int __do_readpage(struct extent_io_tree *tree,
2991                          struct page *page,
2992                          get_extent_t *get_extent,
2993                          struct extent_map **em_cached,
2994                          struct bio **bio, int mirror_num,
2995                          unsigned long *bio_flags, unsigned int read_flags,
2996                          u64 *prev_em_start)
2997 {
2998         struct inode *inode = page->mapping->host;
2999         u64 start = page_offset(page);
3000         const u64 end = start + PAGE_SIZE - 1;
3001         u64 cur = start;
3002         u64 extent_offset;
3003         u64 last_byte = i_size_read(inode);
3004         u64 block_start;
3005         u64 cur_end;
3006         struct extent_map *em;
3007         struct block_device *bdev;
3008         int ret = 0;
3009         int nr = 0;
3010         size_t pg_offset = 0;
3011         size_t iosize;
3012         size_t disk_io_size;
3013         size_t blocksize = inode->i_sb->s_blocksize;
3014         unsigned long this_bio_flag = 0;
3015
3016         set_page_extent_mapped(page);
3017
3018         if (!PageUptodate(page)) {
3019                 if (cleancache_get_page(page) == 0) {
3020                         BUG_ON(blocksize != PAGE_SIZE);
3021                         unlock_extent(tree, start, end);
3022                         goto out;
3023                 }
3024         }
3025
3026         if (page->index == last_byte >> PAGE_SHIFT) {
3027                 char *userpage;
3028                 size_t zero_offset = offset_in_page(last_byte);
3029
3030                 if (zero_offset) {
3031                         iosize = PAGE_SIZE - zero_offset;
3032                         userpage = kmap_atomic(page);
3033                         memset(userpage + zero_offset, 0, iosize);
3034                         flush_dcache_page(page);
3035                         kunmap_atomic(userpage);
3036                 }
3037         }
3038         while (cur <= end) {
3039                 bool force_bio_submit = false;
3040                 u64 offset;
3041
3042                 if (cur >= last_byte) {
3043                         char *userpage;
3044                         struct extent_state *cached = NULL;
3045
3046                         iosize = PAGE_SIZE - pg_offset;
3047                         userpage = kmap_atomic(page);
3048                         memset(userpage + pg_offset, 0, iosize);
3049                         flush_dcache_page(page);
3050                         kunmap_atomic(userpage);
3051                         set_extent_uptodate(tree, cur, cur + iosize - 1,
3052                                             &cached, GFP_NOFS);
3053                         unlock_extent_cached(tree, cur,
3054                                              cur + iosize - 1, &cached);
3055                         break;
3056                 }
3057                 em = __get_extent_map(inode, page, pg_offset, cur,
3058                                       end - cur + 1, get_extent, em_cached);
3059                 if (IS_ERR_OR_NULL(em)) {
3060                         SetPageError(page);
3061                         unlock_extent(tree, cur, end);
3062                         break;
3063                 }
3064                 extent_offset = cur - em->start;
3065                 BUG_ON(extent_map_end(em) <= cur);
3066                 BUG_ON(end < cur);
3067
3068                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3069                         this_bio_flag |= EXTENT_BIO_COMPRESSED;
3070                         extent_set_compress_type(&this_bio_flag,
3071                                                  em->compress_type);
3072                 }
3073
3074                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3075                 cur_end = min(extent_map_end(em) - 1, end);
3076                 iosize = ALIGN(iosize, blocksize);
3077                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
3078                         disk_io_size = em->block_len;
3079                         offset = em->block_start;
3080                 } else {
3081                         offset = em->block_start + extent_offset;
3082                         disk_io_size = iosize;
3083                 }
3084                 bdev = em->bdev;
3085                 block_start = em->block_start;
3086                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3087                         block_start = EXTENT_MAP_HOLE;
3088
3089                 /*
3090                  * If we have a file range that points to a compressed extent
3091                  * and it's followed by a consecutive file range that points to
3092                  * to the same compressed extent (possibly with a different
3093                  * offset and/or length, so it either points to the whole extent
3094                  * or only part of it), we must make sure we do not submit a
3095                  * single bio to populate the pages for the 2 ranges because
3096                  * this makes the compressed extent read zero out the pages
3097                  * belonging to the 2nd range. Imagine the following scenario:
3098                  *
3099                  *  File layout
3100                  *  [0 - 8K]                     [8K - 24K]
3101                  *    |                               |
3102                  *    |                               |
3103                  * points to extent X,         points to extent X,
3104                  * offset 4K, length of 8K     offset 0, length 16K
3105                  *
3106                  * [extent X, compressed length = 4K uncompressed length = 16K]
3107                  *
3108                  * If the bio to read the compressed extent covers both ranges,
3109                  * it will decompress extent X into the pages belonging to the
3110                  * first range and then it will stop, zeroing out the remaining
3111                  * pages that belong to the other range that points to extent X.
3112                  * So here we make sure we submit 2 bios, one for the first
3113                  * range and another one for the third range. Both will target
3114                  * the same physical extent from disk, but we can't currently
3115                  * make the compressed bio endio callback populate the pages
3116                  * for both ranges because each compressed bio is tightly
3117                  * coupled with a single extent map, and each range can have
3118                  * an extent map with a different offset value relative to the
3119                  * uncompressed data of our extent and different lengths. This
3120                  * is a corner case so we prioritize correctness over
3121                  * non-optimal behavior (submitting 2 bios for the same extent).
3122                  */
3123                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3124                     prev_em_start && *prev_em_start != (u64)-1 &&
3125                     *prev_em_start != em->start)
3126                         force_bio_submit = true;
3127
3128                 if (prev_em_start)
3129                         *prev_em_start = em->start;
3130
3131                 free_extent_map(em);
3132                 em = NULL;
3133
3134                 /* we've found a hole, just zero and go on */
3135                 if (block_start == EXTENT_MAP_HOLE) {
3136                         char *userpage;
3137                         struct extent_state *cached = NULL;
3138
3139                         userpage = kmap_atomic(page);
3140                         memset(userpage + pg_offset, 0, iosize);
3141                         flush_dcache_page(page);
3142                         kunmap_atomic(userpage);
3143
3144                         set_extent_uptodate(tree, cur, cur + iosize - 1,
3145                                             &cached, GFP_NOFS);
3146                         unlock_extent_cached(tree, cur,
3147                                              cur + iosize - 1, &cached);
3148                         cur = cur + iosize;
3149                         pg_offset += iosize;
3150                         continue;
3151                 }
3152                 /* the get_extent function already copied into the page */
3153                 if (test_range_bit(tree, cur, cur_end,
3154                                    EXTENT_UPTODATE, 1, NULL)) {
3155                         check_page_uptodate(tree, page);
3156                         unlock_extent(tree, cur, cur + iosize - 1);
3157                         cur = cur + iosize;
3158                         pg_offset += iosize;
3159                         continue;
3160                 }
3161                 /* we have an inline extent but it didn't get marked up
3162                  * to date.  Error out
3163                  */
3164                 if (block_start == EXTENT_MAP_INLINE) {
3165                         SetPageError(page);
3166                         unlock_extent(tree, cur, cur + iosize - 1);
3167                         cur = cur + iosize;
3168                         pg_offset += iosize;
3169                         continue;
3170                 }
3171
3172                 ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
3173                                          page, offset, disk_io_size,
3174                                          pg_offset, bdev, bio,
3175                                          end_bio_extent_readpage, mirror_num,
3176                                          *bio_flags,
3177                                          this_bio_flag,
3178                                          force_bio_submit);
3179                 if (!ret) {
3180                         nr++;
3181                         *bio_flags = this_bio_flag;
3182                 } else {
3183                         SetPageError(page);
3184                         unlock_extent(tree, cur, cur + iosize - 1);
3185                         goto out;
3186                 }
3187                 cur = cur + iosize;
3188                 pg_offset += iosize;
3189         }
3190 out:
3191         if (!nr) {
3192                 if (!PageError(page))
3193                         SetPageUptodate(page);
3194                 unlock_page(page);
3195         }
3196         return ret;
3197 }
3198
3199 static inline void contiguous_readpages(struct extent_io_tree *tree,
3200                                              struct page *pages[], int nr_pages,
3201                                              u64 start, u64 end,
3202                                              struct extent_map **em_cached,
3203                                              struct bio **bio,
3204                                              unsigned long *bio_flags,
3205                                              u64 *prev_em_start)
3206 {
3207         struct inode *inode;
3208         struct btrfs_ordered_extent *ordered;
3209         int index;
3210
3211         inode = pages[0]->mapping->host;
3212         while (1) {
3213                 lock_extent(tree, start, end);
3214                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
3215                                                      end - start + 1);
3216                 if (!ordered)
3217                         break;
3218                 unlock_extent(tree, start, end);
3219                 btrfs_start_ordered_extent(inode, ordered, 1);
3220                 btrfs_put_ordered_extent(ordered);
3221         }
3222
3223         for (index = 0; index < nr_pages; index++) {
3224                 __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
3225                                 bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
3226                 put_page(pages[index]);
3227         }
3228 }
3229
3230 static int __extent_read_full_page(struct extent_io_tree *tree,
3231                                    struct page *page,
3232                                    get_extent_t *get_extent,
3233                                    struct bio **bio, int mirror_num,
3234                                    unsigned long *bio_flags,
3235                                    unsigned int read_flags)
3236 {
3237         struct inode *inode = page->mapping->host;
3238         struct btrfs_ordered_extent *ordered;
3239         u64 start = page_offset(page);
3240         u64 end = start + PAGE_SIZE - 1;
3241         int ret;
3242
3243         while (1) {
3244                 lock_extent(tree, start, end);
3245                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
3246                                                 PAGE_SIZE);
3247                 if (!ordered)
3248                         break;
3249                 unlock_extent(tree, start, end);
3250                 btrfs_start_ordered_extent(inode, ordered, 1);
3251                 btrfs_put_ordered_extent(ordered);
3252         }
3253
3254         ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3255                             bio_flags, read_flags, NULL);
3256         return ret;
3257 }
3258
3259 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3260                             get_extent_t *get_extent, int mirror_num)
3261 {
3262         struct bio *bio = NULL;
3263         unsigned long bio_flags = 0;
3264         int ret;
3265
3266         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3267                                       &bio_flags, 0);
3268         if (bio)
3269                 ret = submit_one_bio(bio, mirror_num, bio_flags);
3270         return ret;
3271 }
3272
3273 static void update_nr_written(struct writeback_control *wbc,
3274                               unsigned long nr_written)
3275 {
3276         wbc->nr_to_write -= nr_written;
3277 }
3278
3279 /*
3280  * helper for __extent_writepage, doing all of the delayed allocation setup.
3281  *
3282  * This returns 1 if btrfs_run_delalloc_range function did all the work required
3283  * to write the page (copy into inline extent).  In this case the IO has
3284  * been started and the page is already unlocked.
3285  *
3286  * This returns 0 if all went well (page still locked)
3287  * This returns < 0 if there were errors (page still locked)
3288  */
3289 static noinline_for_stack int writepage_delalloc(struct inode *inode,
3290                 struct page *page, struct writeback_control *wbc,
3291                 u64 delalloc_start, unsigned long *nr_written)
3292 {
3293         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3294         u64 page_end = delalloc_start + PAGE_SIZE - 1;
3295         bool found;
3296         u64 delalloc_to_write = 0;
3297         u64 delalloc_end = 0;
3298         int ret;
3299         int page_started = 0;
3300
3301
3302         while (delalloc_end < page_end) {
3303                 found = find_lock_delalloc_range(inode, tree,
3304                                                page,
3305                                                &delalloc_start,
3306                                                &delalloc_end);
3307                 if (!found) {
3308                         delalloc_start = delalloc_end + 1;
3309                         continue;
3310                 }
3311                 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3312                                 delalloc_end, &page_started, nr_written, wbc);
3313                 /* File system has been set read-only */
3314                 if (ret) {
3315                         SetPageError(page);
3316                         /*
3317                          * btrfs_run_delalloc_range should return < 0 for error
3318                          * but just in case, we use > 0 here meaning the IO is
3319                          * started, so we don't want to return > 0 unless
3320                          * things are going well.
3321                          */
3322                         ret = ret < 0 ? ret : -EIO;
3323                         goto done;
3324                 }
3325                 /*
3326                  * delalloc_end is already one less than the total length, so
3327                  * we don't subtract one from PAGE_SIZE
3328                  */
3329                 delalloc_to_write += (delalloc_end - delalloc_start +
3330                                       PAGE_SIZE) >> PAGE_SHIFT;
3331                 delalloc_start = delalloc_end + 1;
3332         }
3333         if (wbc->nr_to_write < delalloc_to_write) {
3334                 int thresh = 8192;
3335
3336                 if (delalloc_to_write < thresh * 2)
3337                         thresh = delalloc_to_write;
3338                 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3339                                          thresh);
3340         }
3341
3342         /* did the fill delalloc function already unlock and start
3343          * the IO?
3344          */
3345         if (page_started) {
3346                 /*
3347                  * we've unlocked the page, so we can't update
3348                  * the mapping's writeback index, just update
3349                  * nr_to_write.
3350                  */
3351                 wbc->nr_to_write -= *nr_written;
3352                 return 1;
3353         }
3354
3355         ret = 0;
3356
3357 done:
3358         return ret;
3359 }
3360
3361 /*
3362  * helper for __extent_writepage.  This calls the writepage start hooks,
3363  * and does the loop to map the page into extents and bios.
3364  *
3365  * We return 1 if the IO is started and the page is unlocked,
3366  * 0 if all went well (page still locked)
3367  * < 0 if there were errors (page still locked)
3368  */
3369 static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3370                                  struct page *page,
3371                                  struct writeback_control *wbc,
3372                                  struct extent_page_data *epd,
3373                                  loff_t i_size,
3374                                  unsigned long nr_written,
3375                                  unsigned int write_flags, int *nr_ret)
3376 {
3377         struct extent_io_tree *tree = epd->tree;
3378         u64 start = page_offset(page);
3379         u64 page_end = start + PAGE_SIZE - 1;
3380         u64 end;
3381         u64 cur = start;
3382         u64 extent_offset;
3383         u64 block_start;
3384         u64 iosize;
3385         struct extent_map *em;
3386         struct block_device *bdev;
3387         size_t pg_offset = 0;
3388         size_t blocksize;
3389         int ret = 0;
3390         int nr = 0;
3391         bool compressed;
3392
3393         ret = btrfs_writepage_cow_fixup(page, start, page_end);
3394         if (ret) {
3395                 /* Fixup worker will requeue */
3396                 if (ret == -EBUSY)
3397                         wbc->pages_skipped++;
3398                 else
3399                         redirty_page_for_writepage(wbc, page);
3400
3401                 update_nr_written(wbc, nr_written);
3402                 unlock_page(page);
3403                 return 1;
3404         }
3405
3406         /*
3407          * we don't want to touch the inode after unlocking the page,
3408          * so we update the mapping writeback index now
3409          */
3410         update_nr_written(wbc, nr_written + 1);
3411
3412         end = page_end;
3413         if (i_size <= start) {
3414                 btrfs_writepage_endio_finish_ordered(page, start, page_end, 1);
3415                 goto done;
3416         }
3417
3418         blocksize = inode->i_sb->s_blocksize;
3419
3420         while (cur <= end) {
3421                 u64 em_end;
3422                 u64 offset;
3423
3424                 if (cur >= i_size) {
3425                         btrfs_writepage_endio_finish_ordered(page, cur,
3426                                                              page_end, 1);
3427                         break;
3428                 }
3429                 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
3430                                      end - cur + 1, 1);
3431                 if (IS_ERR_OR_NULL(em)) {
3432                         SetPageError(page);
3433                         ret = PTR_ERR_OR_ZERO(em);
3434                         break;
3435                 }
3436
3437                 extent_offset = cur - em->start;
3438                 em_end = extent_map_end(em);
3439                 BUG_ON(em_end <= cur);
3440                 BUG_ON(end < cur);
3441                 iosize = min(em_end - cur, end - cur + 1);
3442                 iosize = ALIGN(iosize, blocksize);
3443                 offset = em->block_start + extent_offset;
3444                 bdev = em->bdev;
3445                 block_start = em->block_start;
3446                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3447                 free_extent_map(em);
3448                 em = NULL;
3449
3450                 /*
3451                  * compressed and inline extents are written through other
3452                  * paths in the FS
3453                  */
3454                 if (compressed || block_start == EXTENT_MAP_HOLE ||
3455                     block_start == EXTENT_MAP_INLINE) {
3456                         /*
3457                          * end_io notification does not happen here for
3458                          * compressed extents
3459                          */
3460                         if (!compressed)
3461                                 btrfs_writepage_endio_finish_ordered(page, cur,
3462                                                             cur + iosize - 1,
3463                                                             1);
3464                         else if (compressed) {
3465                                 /* we don't want to end_page_writeback on
3466                                  * a compressed extent.  this happens
3467                                  * elsewhere
3468                                  */
3469                                 nr++;
3470                         }
3471
3472                         cur += iosize;
3473                         pg_offset += iosize;
3474                         continue;
3475                 }
3476
3477                 btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
3478                 if (!PageWriteback(page)) {
3479                         btrfs_err(BTRFS_I(inode)->root->fs_info,
3480                                    "page %lu not writeback, cur %llu end %llu",
3481                                page->index, cur, end);
3482                 }
3483
3484                 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
3485                                          page, offset, iosize, pg_offset,
3486                                          bdev, &epd->bio,
3487                                          end_bio_extent_writepage,
3488                                          0, 0, 0, false);
3489                 if (ret) {
3490                         SetPageError(page);
3491                         if (PageWriteback(page))
3492                                 end_page_writeback(page);
3493                 }
3494
3495                 cur = cur + iosize;
3496                 pg_offset += iosize;
3497                 nr++;
3498         }
3499 done:
3500         *nr_ret = nr;
3501         return ret;
3502 }
3503
3504 /*
3505  * the writepage semantics are similar to regular writepage.  extent
3506  * records are inserted to lock ranges in the tree, and as dirty areas
3507  * are found, they are marked writeback.  Then the lock bits are removed
3508  * and the end_io handler clears the writeback ranges
3509  *
3510  * Return 0 if everything goes well.
3511  * Return <0 for error.
3512  */
3513 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3514                               struct extent_page_data *epd)
3515 {
3516         struct inode *inode = page->mapping->host;
3517         u64 start = page_offset(page);
3518         u64 page_end = start + PAGE_SIZE - 1;
3519         int ret;
3520         int nr = 0;
3521         size_t pg_offset = 0;
3522         loff_t i_size = i_size_read(inode);
3523         unsigned long end_index = i_size >> PAGE_SHIFT;
3524         unsigned int write_flags = 0;
3525         unsigned long nr_written = 0;
3526
3527         write_flags = wbc_to_write_flags(wbc);
3528
3529         trace___extent_writepage(page, inode, wbc);
3530
3531         WARN_ON(!PageLocked(page));
3532
3533         ClearPageError(page);
3534
3535         pg_offset = offset_in_page(i_size);
3536         if (page->index > end_index ||
3537            (page->index == end_index && !pg_offset)) {
3538                 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3539                 unlock_page(page);
3540                 return 0;
3541         }
3542
3543         if (page->index == end_index) {
3544                 char *userpage;
3545
3546                 userpage = kmap_atomic(page);
3547                 memset(userpage + pg_offset, 0,
3548                        PAGE_SIZE - pg_offset);
3549                 kunmap_atomic(userpage);
3550                 flush_dcache_page(page);
3551         }
3552
3553         pg_offset = 0;
3554
3555         set_page_extent_mapped(page);
3556
3557         if (!epd->extent_locked) {
3558                 ret = writepage_delalloc(inode, page, wbc, start, &nr_written);
3559                 if (ret == 1)
3560                         goto done_unlocked;
3561                 if (ret)
3562                         goto done;
3563         }
3564
3565         ret = __extent_writepage_io(inode, page, wbc, epd,
3566                                     i_size, nr_written, write_flags, &nr);
3567         if (ret == 1)
3568                 goto done_unlocked;
3569
3570 done:
3571         if (nr == 0) {
3572                 /* make sure the mapping tag for page dirty gets cleared */
3573                 set_page_writeback(page);
3574                 end_page_writeback(page);
3575         }
3576         if (PageError(page)) {
3577                 ret = ret < 0 ? ret : -EIO;
3578                 end_extent_writepage(page, ret, start, page_end);
3579         }
3580         unlock_page(page);
3581         ASSERT(ret <= 0);
3582         return ret;
3583
3584 done_unlocked:
3585         return 0;
3586 }
3587
3588 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3589 {
3590         wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3591                        TASK_UNINTERRUPTIBLE);
3592 }
3593
3594 /*
3595  * Lock eb pages and flush the bio if we can't the locks
3596  *
3597  * Return  0 if nothing went wrong
3598  * Return >0 is same as 0, except bio is not submitted
3599  * Return <0 if something went wrong, no page is locked
3600  */
3601 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
3602                           struct extent_page_data *epd)
3603 {
3604         struct btrfs_fs_info *fs_info = eb->fs_info;
3605         int i, num_pages, failed_page_nr;
3606         int flush = 0;
3607         int ret = 0;
3608
3609         if (!btrfs_try_tree_write_lock(eb)) {
3610                 ret = flush_write_bio(epd);
3611                 if (ret < 0)
3612                         return ret;
3613                 flush = 1;
3614                 btrfs_tree_lock(eb);
3615         }
3616
3617         if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3618                 btrfs_tree_unlock(eb);
3619                 if (!epd->sync_io)
3620                         return 0;
3621                 if (!flush) {
3622                         ret = flush_write_bio(epd);
3623                         if (ret < 0)
3624                                 return ret;
3625                         flush = 1;
3626                 }
3627                 while (1) {
3628                         wait_on_extent_buffer_writeback(eb);
3629                         btrfs_tree_lock(eb);
3630                         if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3631                                 break;
3632                         btrfs_tree_unlock(eb);
3633                 }
3634         }
3635
3636         /*
3637          * We need to do this to prevent races in people who check if the eb is
3638          * under IO since we can end up having no IO bits set for a short period
3639          * of time.
3640          */
3641         spin_lock(&eb->refs_lock);
3642         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3643                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3644                 spin_unlock(&eb->refs_lock);
3645                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3646                 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3647                                          -eb->len,
3648                                          fs_info->dirty_metadata_batch);
3649                 ret = 1;
3650         } else {
3651                 spin_unlock(&eb->refs_lock);
3652         }
3653
3654         btrfs_tree_unlock(eb);
3655
3656         if (!ret)
3657                 return ret;
3658
3659         num_pages = num_extent_pages(eb);
3660         for (i = 0; i < num_pages; i++) {
3661                 struct page *p = eb->pages[i];
3662
3663                 if (!trylock_page(p)) {
3664                         if (!flush) {
3665                                 ret = flush_write_bio(epd);
3666                                 if (ret < 0) {
3667                                         failed_page_nr = i;
3668                                         goto err_unlock;
3669                                 }
3670                                 flush = 1;
3671                         }
3672                         lock_page(p);
3673                 }
3674         }
3675
3676         return ret;
3677 err_unlock:
3678         /* Unlock already locked pages */
3679         for (i = 0; i < failed_page_nr; i++)
3680                 unlock_page(eb->pages[i]);
3681         return ret;
3682 }
3683
3684 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3685 {
3686         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3687         smp_mb__after_atomic();
3688         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3689 }
3690
3691 static void set_btree_ioerr(struct page *page)
3692 {
3693         struct extent_buffer *eb = (struct extent_buffer *)page->private;
3694
3695         SetPageError(page);
3696         if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3697                 return;
3698
3699         /*
3700          * If writeback for a btree extent that doesn't belong to a log tree
3701          * failed, increment the counter transaction->eb_write_errors.
3702          * We do this because while the transaction is running and before it's
3703          * committing (when we call filemap_fdata[write|wait]_range against
3704          * the btree inode), we might have
3705          * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3706          * returns an error or an error happens during writeback, when we're
3707          * committing the transaction we wouldn't know about it, since the pages
3708          * can be no longer dirty nor marked anymore for writeback (if a
3709          * subsequent modification to the extent buffer didn't happen before the
3710          * transaction commit), which makes filemap_fdata[write|wait]_range not
3711          * able to find the pages tagged with SetPageError at transaction
3712          * commit time. So if this happens we must abort the transaction,
3713          * otherwise we commit a super block with btree roots that point to
3714          * btree nodes/leafs whose content on disk is invalid - either garbage
3715          * or the content of some node/leaf from a past generation that got
3716          * cowed or deleted and is no longer valid.
3717          *
3718          * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3719          * not be enough - we need to distinguish between log tree extents vs
3720          * non-log tree extents, and the next filemap_fdatawait_range() call
3721          * will catch and clear such errors in the mapping - and that call might
3722          * be from a log sync and not from a transaction commit. Also, checking
3723          * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3724          * not done and would not be reliable - the eb might have been released
3725          * from memory and reading it back again means that flag would not be
3726          * set (since it's a runtime flag, not persisted on disk).
3727          *
3728          * Using the flags below in the btree inode also makes us achieve the
3729          * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3730          * writeback for all dirty pages and before filemap_fdatawait_range()
3731          * is called, the writeback for all dirty pages had already finished
3732          * with errors - because we were not using AS_EIO/AS_ENOSPC,
3733          * filemap_fdatawait_range() would return success, as it could not know
3734          * that writeback errors happened (the pages were no longer tagged for
3735          * writeback).
3736          */
3737         switch (eb->log_index) {
3738         case -1:
3739                 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
3740                 break;
3741         case 0:
3742                 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
3743                 break;
3744         case 1:
3745                 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
3746                 break;
3747         default:
3748                 BUG(); /* unexpected, logic error */
3749         }
3750 }
3751
3752 static void end_bio_extent_buffer_writepage(struct bio *bio)
3753 {
3754         struct bio_vec *bvec;
3755         struct extent_buffer *eb;
3756         int done;
3757         struct bvec_iter_all iter_all;
3758
3759         ASSERT(!bio_flagged(bio, BIO_CLONED));
3760         bio_for_each_segment_all(bvec, bio, iter_all) {
3761                 struct page *page = bvec->bv_page;
3762
3763                 eb = (struct extent_buffer *)page->private;
3764                 BUG_ON(!eb);
3765                 done = atomic_dec_and_test(&eb->io_pages);
3766
3767                 if (bio->bi_status ||
3768                     test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3769                         ClearPageUptodate(page);
3770                         set_btree_ioerr(page);
3771                 }
3772
3773                 end_page_writeback(page);
3774
3775                 if (!done)
3776                         continue;
3777
3778                 end_extent_buffer_writeback(eb);
3779         }
3780
3781         bio_put(bio);
3782 }
3783
3784 static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3785                         struct writeback_control *wbc,
3786                         struct extent_page_data *epd)
3787 {
3788         struct btrfs_fs_info *fs_info = eb->fs_info;
3789         struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3790         struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3791         u64 offset = eb->start;
3792         u32 nritems;
3793         int i, num_pages;
3794         unsigned long start, end;
3795         unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
3796         int ret = 0;
3797
3798         clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3799         num_pages = num_extent_pages(eb);
3800         atomic_set(&eb->io_pages, num_pages);
3801
3802         /* set btree blocks beyond nritems with 0 to avoid stale content. */
3803         nritems = btrfs_header_nritems(eb);
3804         if (btrfs_header_level(eb) > 0) {
3805                 end = btrfs_node_key_ptr_offset(nritems);
3806
3807                 memzero_extent_buffer(eb, end, eb->len - end);
3808         } else {
3809                 /*
3810                  * leaf:
3811                  * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3812                  */
3813                 start = btrfs_item_nr_offset(nritems);
3814                 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
3815                 memzero_extent_buffer(eb, start, end - start);
3816         }
3817
3818         for (i = 0; i < num_pages; i++) {
3819                 struct page *p = eb->pages[i];
3820
3821                 clear_page_dirty_for_io(p);
3822                 set_page_writeback(p);
3823                 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
3824                                          p, offset, PAGE_SIZE, 0, bdev,
3825                                          &epd->bio,
3826                                          end_bio_extent_buffer_writepage,
3827                                          0, 0, 0, false);
3828                 if (ret) {
3829                         set_btree_ioerr(p);
3830                         if (PageWriteback(p))
3831                                 end_page_writeback(p);
3832                         if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3833                                 end_extent_buffer_writeback(eb);
3834                         ret = -EIO;
3835                         break;
3836                 }
3837                 offset += PAGE_SIZE;
3838                 update_nr_written(wbc, 1);
3839                 unlock_page(p);
3840         }
3841
3842         if (unlikely(ret)) {
3843                 for (; i < num_pages; i++) {
3844                         struct page *p = eb->pages[i];
3845                         clear_page_dirty_for_io(p);
3846                         unlock_page(p);
3847                 }
3848         }
3849
3850         return ret;
3851 }
3852
3853 int btree_write_cache_pages(struct address_space *mapping,
3854                                    struct writeback_control *wbc)
3855 {
3856         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3857         struct extent_buffer *eb, *prev_eb = NULL;
3858         struct extent_page_data epd = {
3859                 .bio = NULL,
3860                 .tree = tree,
3861                 .extent_locked = 0,
3862                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3863         };
3864         int ret = 0;
3865         int done = 0;
3866         int nr_to_write_done = 0;
3867         struct pagevec pvec;
3868         int nr_pages;
3869         pgoff_t index;
3870         pgoff_t end;            /* Inclusive */
3871         int scanned = 0;
3872         xa_mark_t tag;
3873
3874         pagevec_init(&pvec);
3875         if (wbc->range_cyclic) {
3876                 index = mapping->writeback_index; /* Start from prev offset */
3877                 end = -1;
3878         } else {
3879                 index = wbc->range_start >> PAGE_SHIFT;
3880                 end = wbc->range_end >> PAGE_SHIFT;
3881                 scanned = 1;
3882         }
3883         if (wbc->sync_mode == WB_SYNC_ALL)
3884                 tag = PAGECACHE_TAG_TOWRITE;
3885         else
3886                 tag = PAGECACHE_TAG_DIRTY;
3887 retry:
3888         if (wbc->sync_mode == WB_SYNC_ALL)
3889                 tag_pages_for_writeback(mapping, index, end);
3890         while (!done && !nr_to_write_done && (index <= end) &&
3891                (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
3892                         tag))) {
3893                 unsigned i;
3894
3895                 scanned = 1;
3896                 for (i = 0; i < nr_pages; i++) {
3897                         struct page *page = pvec.pages[i];
3898
3899                         if (!PagePrivate(page))
3900                                 continue;
3901
3902                         spin_lock(&mapping->private_lock);
3903                         if (!PagePrivate(page)) {
3904                                 spin_unlock(&mapping->private_lock);
3905                                 continue;
3906                         }
3907
3908                         eb = (struct extent_buffer *)page->private;
3909
3910                         /*
3911                          * Shouldn't happen and normally this would be a BUG_ON
3912                          * but no sense in crashing the users box for something
3913                          * we can survive anyway.
3914                          */
3915                         if (WARN_ON(!eb)) {
3916                                 spin_unlock(&mapping->private_lock);
3917                                 continue;
3918                         }
3919
3920                         if (eb == prev_eb) {
3921                                 spin_unlock(&mapping->private_lock);
3922                                 continue;
3923                         }
3924
3925                         ret = atomic_inc_not_zero(&eb->refs);
3926                         spin_unlock(&mapping->private_lock);
3927                         if (!ret)
3928                                 continue;
3929
3930                         prev_eb = eb;
3931                         ret = lock_extent_buffer_for_io(eb, &epd);
3932                         if (!ret) {
3933                                 free_extent_buffer(eb);
3934                                 continue;
3935                         }
3936
3937                         ret = write_one_eb(eb, wbc, &epd);
3938                         if (ret) {
3939                                 done = 1;
3940                                 free_extent_buffer(eb);
3941                                 break;
3942                         }
3943                         free_extent_buffer(eb);
3944
3945                         /*
3946                          * the filesystem may choose to bump up nr_to_write.
3947                          * We have to make sure to honor the new nr_to_write
3948                          * at any time
3949                          */
3950                         nr_to_write_done = wbc->nr_to_write <= 0;
3951                 }
3952                 pagevec_release(&pvec);
3953                 cond_resched();
3954         }
3955         if (!scanned && !done) {
3956                 /*
3957                  * We hit the last page and there is more work to be done: wrap
3958                  * back to the start of the file
3959                  */
3960                 scanned = 1;
3961                 index = 0;
3962                 goto retry;
3963         }
3964         ASSERT(ret <= 0);
3965         if (ret < 0) {
3966                 end_write_bio(&epd, ret);
3967                 return ret;
3968         }
3969         ret = flush_write_bio(&epd);
3970         return ret;
3971 }
3972
3973 /**
3974  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3975  * @mapping: address space structure to write
3976  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3977  * @data: data passed to __extent_writepage function
3978  *
3979  * If a page is already under I/O, write_cache_pages() skips it, even
3980  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3981  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3982  * and msync() need to guarantee that all the data which was dirty at the time
3983  * the call was made get new I/O started against them.  If wbc->sync_mode is
3984  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3985  * existing IO to complete.
3986  */
3987 static int extent_write_cache_pages(struct address_space *mapping,
3988                              struct writeback_control *wbc,
3989                              struct extent_page_data *epd)
3990 {
3991         struct inode *inode = mapping->host;
3992         int ret = 0;
3993         int done = 0;
3994         int nr_to_write_done = 0;
3995         struct pagevec pvec;
3996         int nr_pages;
3997         pgoff_t index;
3998         pgoff_t end;            /* Inclusive */
3999         pgoff_t done_index;
4000         int range_whole = 0;
4001         int scanned = 0;
4002         xa_mark_t tag;
4003
4004         /*
4005          * We have to hold onto the inode so that ordered extents can do their
4006          * work when the IO finishes.  The alternative to this is failing to add
4007          * an ordered extent if the igrab() fails there and that is a huge pain
4008          * to deal with, so instead just hold onto the inode throughout the
4009          * writepages operation.  If it fails here we are freeing up the inode
4010          * anyway and we'd rather not waste our time writing out stuff that is
4011          * going to be truncated anyway.
4012          */
4013         if (!igrab(inode))
4014                 return 0;
4015
4016         pagevec_init(&pvec);
4017         if (wbc->range_cyclic) {
4018                 index = mapping->writeback_index; /* Start from prev offset */
4019                 end = -1;
4020         } else {
4021                 index = wbc->range_start >> PAGE_SHIFT;
4022                 end = wbc->range_end >> PAGE_SHIFT;
4023                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4024                         range_whole = 1;
4025                 scanned = 1;
4026         }
4027
4028         /*
4029          * We do the tagged writepage as long as the snapshot flush bit is set
4030          * and we are the first one who do the filemap_flush() on this inode.
4031          *
4032          * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4033          * not race in and drop the bit.
4034          */
4035         if (range_whole && wbc->nr_to_write == LONG_MAX &&
4036             test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4037                                &BTRFS_I(inode)->runtime_flags))
4038                 wbc->tagged_writepages = 1;
4039
4040         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4041                 tag = PAGECACHE_TAG_TOWRITE;
4042         else
4043                 tag = PAGECACHE_TAG_DIRTY;
4044 retry:
4045         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4046                 tag_pages_for_writeback(mapping, index, end);
4047         done_index = index;
4048         while (!done && !nr_to_write_done && (index <= end) &&
4049                         (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4050                                                 &index, end, tag))) {
4051                 unsigned i;
4052
4053                 scanned = 1;
4054                 for (i = 0; i < nr_pages; i++) {
4055                         struct page *page = pvec.pages[i];
4056
4057                         done_index = page->index;
4058                         /*
4059                          * At this point we hold neither the i_pages lock nor
4060                          * the page lock: the page may be truncated or
4061                          * invalidated (changing page->mapping to NULL),
4062                          * or even swizzled back from swapper_space to
4063                          * tmpfs file mapping
4064                          */
4065                         if (!trylock_page(page)) {
4066                                 ret = flush_write_bio(epd);
4067                                 BUG_ON(ret < 0);
4068                                 lock_page(page);
4069                         }
4070
4071                         if (unlikely(page->mapping != mapping)) {
4072                                 unlock_page(page);
4073                                 continue;
4074                         }
4075
4076                         if (wbc->sync_mode != WB_SYNC_NONE) {
4077                                 if (PageWriteback(page)) {
4078                                         ret = flush_write_bio(epd);
4079                                         BUG_ON(ret < 0);
4080                                 }
4081                                 wait_on_page_writeback(page);
4082                         }
4083
4084                         if (PageWriteback(page) ||
4085                             !clear_page_dirty_for_io(page)) {
4086                                 unlock_page(page);
4087                                 continue;
4088                         }
4089
4090                         ret = __extent_writepage(page, wbc, epd);
4091                         if (ret < 0) {
4092                                 /*
4093                                  * done_index is set past this page,
4094                                  * so media errors will not choke
4095                                  * background writeout for the entire
4096                                  * file. This has consequences for
4097                                  * range_cyclic semantics (ie. it may
4098                                  * not be suitable for data integrity
4099                                  * writeout).
4100                                  */
4101                                 done_index = page->index + 1;
4102                                 done = 1;
4103                                 break;
4104                         }
4105
4106                         /*
4107                          * the filesystem may choose to bump up nr_to_write.
4108                          * We have to make sure to honor the new nr_to_write
4109                          * at any time
4110                          */
4111                         nr_to_write_done = wbc->nr_to_write <= 0;
4112                 }
4113                 pagevec_release(&pvec);
4114                 cond_resched();
4115         }
4116         if (!scanned && !done) {
4117                 /*
4118                  * We hit the last page and there is more work to be done: wrap
4119                  * back to the start of the file
4120                  */
4121                 scanned = 1;
4122                 index = 0;
4123                 goto retry;
4124         }
4125
4126         if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4127                 mapping->writeback_index = done_index;
4128
4129         btrfs_add_delayed_iput(inode);
4130         return ret;
4131 }
4132
4133 int extent_write_full_page(struct page *page, struct writeback_control *wbc)
4134 {
4135         int ret;
4136         struct extent_page_data epd = {
4137                 .bio = NULL,
4138                 .tree = &BTRFS_I(page->mapping->host)->io_tree,
4139                 .extent_locked = 0,
4140                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4141         };
4142
4143         ret = __extent_writepage(page, wbc, &epd);
4144         ASSERT(ret <= 0);
4145         if (ret < 0) {
4146                 end_write_bio(&epd, ret);
4147                 return ret;
4148         }
4149
4150         ret = flush_write_bio(&epd);
4151         ASSERT(ret <= 0);
4152         return ret;
4153 }
4154
4155 int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4156                               int mode)
4157 {
4158         int ret = 0;
4159         struct address_space *mapping = inode->i_mapping;
4160         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
4161         struct page *page;
4162         unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4163                 PAGE_SHIFT;
4164
4165         struct extent_page_data epd = {
4166                 .bio = NULL,
4167                 .tree = tree,
4168                 .extent_locked = 1,
4169                 .sync_io = mode == WB_SYNC_ALL,
4170         };
4171         struct writeback_control wbc_writepages = {
4172                 .sync_mode      = mode,
4173                 .nr_to_write    = nr_pages * 2,
4174                 .range_start    = start,
4175                 .range_end      = end + 1,
4176         };
4177
4178         while (start <= end) {
4179                 page = find_get_page(mapping, start >> PAGE_SHIFT);
4180                 if (clear_page_dirty_for_io(page))
4181                         ret = __extent_writepage(page, &wbc_writepages, &epd);
4182                 else {
4183                         btrfs_writepage_endio_finish_ordered(page, start,
4184                                                     start + PAGE_SIZE - 1, 1);
4185                         unlock_page(page);
4186                 }
4187                 put_page(page);
4188                 start += PAGE_SIZE;
4189         }
4190
4191         ASSERT(ret <= 0);
4192         if (ret < 0) {
4193                 end_write_bio(&epd, ret);
4194                 return ret;
4195         }
4196         ret = flush_write_bio(&epd);
4197         return ret;
4198 }
4199
4200 int extent_writepages(struct address_space *mapping,
4201                       struct writeback_control *wbc)
4202 {
4203         int ret = 0;
4204         struct extent_page_data epd = {
4205                 .bio = NULL,
4206                 .tree = &BTRFS_I(mapping->host)->io_tree,
4207                 .extent_locked = 0,
4208                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4209         };
4210
4211         ret = extent_write_cache_pages(mapping, wbc, &epd);
4212         ASSERT(ret <= 0);
4213         if (ret < 0) {
4214                 end_write_bio(&epd, ret);
4215                 return ret;
4216         }
4217         ret = flush_write_bio(&epd);
4218         return ret;
4219 }
4220
4221 int extent_readpages(struct address_space *mapping, struct list_head *pages,
4222                      unsigned nr_pages)
4223 {
4224         struct bio *bio = NULL;
4225         unsigned long bio_flags = 0;
4226         struct page *pagepool[16];
4227         struct extent_map *em_cached = NULL;
4228         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
4229         int nr = 0;
4230         u64 prev_em_start = (u64)-1;
4231
4232         while (!list_empty(pages)) {
4233                 u64 contig_end = 0;
4234
4235                 for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
4236                         struct page *page = lru_to_page(pages);
4237
4238                         prefetchw(&page->flags);
4239                         list_del(&page->lru);
4240                         if (add_to_page_cache_lru(page, mapping, page->index,
4241                                                 readahead_gfp_mask(mapping))) {
4242                                 put_page(page);
4243                                 break;
4244                         }
4245
4246                         pagepool[nr++] = page;
4247                         contig_end = page_offset(page) + PAGE_SIZE - 1;
4248                 }
4249
4250                 if (nr) {
4251                         u64 contig_start = page_offset(pagepool[0]);
4252
4253                         ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
4254
4255                         contiguous_readpages(tree, pagepool, nr, contig_start,
4256                                      contig_end, &em_cached, &bio, &bio_flags,
4257                                      &prev_em_start);
4258                 }
4259         }
4260
4261         if (em_cached)
4262                 free_extent_map(em_cached);
4263
4264         if (bio)
4265                 return submit_one_bio(bio, 0, bio_flags);
4266         return 0;
4267 }
4268
4269 /*
4270  * basic invalidatepage code, this waits on any locked or writeback
4271  * ranges corresponding to the page, and then deletes any extent state
4272  * records from the tree
4273  */
4274 int extent_invalidatepage(struct extent_io_tree *tree,
4275                           struct page *page, unsigned long offset)
4276 {
4277         struct extent_state *cached_state = NULL;
4278         u64 start = page_offset(page);
4279         u64 end = start + PAGE_SIZE - 1;
4280         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4281
4282         start += ALIGN(offset, blocksize);
4283         if (start > end)
4284                 return 0;
4285
4286         lock_extent_bits(tree, start, end, &cached_state);
4287         wait_on_page_writeback(page);
4288         clear_extent_bit(tree, start, end,
4289                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4290                          EXTENT_DO_ACCOUNTING,
4291                          1, 1, &cached_state);
4292         return 0;
4293 }
4294
4295 /*
4296  * a helper for releasepage, this tests for areas of the page that
4297  * are locked or under IO and drops the related state bits if it is safe
4298  * to drop the page.
4299  */
4300 static int try_release_extent_state(struct extent_io_tree *tree,
4301                                     struct page *page, gfp_t mask)
4302 {
4303         u64 start = page_offset(page);
4304         u64 end = start + PAGE_SIZE - 1;
4305         int ret = 1;
4306
4307         if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
4308                 ret = 0;
4309         } else {
4310                 /*
4311                  * at this point we can safely clear everything except the
4312                  * locked bit and the nodatasum bit
4313                  */
4314                 ret = __clear_extent_bit(tree, start, end,
4315                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4316                                  0, 0, NULL, mask, NULL);
4317
4318                 /* if clear_extent_bit failed for enomem reasons,
4319                  * we can't allow the release to continue.
4320                  */
4321                 if (ret < 0)
4322                         ret = 0;
4323                 else
4324                         ret = 1;
4325         }
4326         return ret;
4327 }
4328
4329 /*
4330  * a helper for releasepage.  As long as there are no locked extents
4331  * in the range corresponding to the page, both state records and extent
4332  * map records are removed
4333  */
4334 int try_release_extent_mapping(struct page *page, gfp_t mask)
4335 {
4336         struct extent_map *em;
4337         u64 start = page_offset(page);
4338         u64 end = start + PAGE_SIZE - 1;
4339         struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4340         struct extent_io_tree *tree = &btrfs_inode->io_tree;
4341         struct extent_map_tree *map = &btrfs_inode->extent_tree;
4342
4343         if (gfpflags_allow_blocking(mask) &&
4344             page->mapping->host->i_size > SZ_16M) {
4345                 u64 len;
4346                 while (start <= end) {
4347                         len = end - start + 1;
4348                         write_lock(&map->lock);
4349                         em = lookup_extent_mapping(map, start, len);
4350                         if (!em) {
4351                                 write_unlock(&map->lock);
4352                                 break;
4353                         }
4354                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4355                             em->start != start) {
4356                                 write_unlock(&map->lock);
4357                                 free_extent_map(em);
4358                                 break;
4359                         }
4360                         if (!test_range_bit(tree, em->start,
4361                                             extent_map_end(em) - 1,
4362                                             EXTENT_LOCKED, 0, NULL)) {
4363                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4364                                         &btrfs_inode->runtime_flags);
4365                                 remove_extent_mapping(map, em);
4366                                 /* once for the rb tree */
4367                                 free_extent_map(em);
4368                         }
4369                         start = extent_map_end(em);
4370                         write_unlock(&map->lock);
4371
4372                         /* once for us */
4373                         free_extent_map(em);
4374                 }
4375         }
4376         return try_release_extent_state(tree, page, mask);
4377 }
4378
4379 /*
4380  * helper function for fiemap, which doesn't want to see any holes.
4381  * This maps until we find something past 'last'
4382  */
4383 static struct extent_map *get_extent_skip_holes(struct inode *inode,
4384                                                 u64 offset, u64 last)
4385 {
4386         u64 sectorsize = btrfs_inode_sectorsize(inode);
4387         struct extent_map *em;
4388         u64 len;
4389
4390         if (offset >= last)
4391                 return NULL;
4392
4393         while (1) {
4394                 len = last - offset;
4395                 if (len == 0)
4396                         break;
4397                 len = ALIGN(len, sectorsize);
4398                 em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len);
4399                 if (IS_ERR_OR_NULL(em))
4400                         return em;
4401
4402                 /* if this isn't a hole return it */
4403                 if (em->block_start != EXTENT_MAP_HOLE)
4404                         return em;
4405
4406                 /* this is a hole, advance to the next extent */
4407                 offset = extent_map_end(em);
4408                 free_extent_map(em);
4409                 if (offset >= last)
4410                         break;
4411         }
4412         return NULL;
4413 }
4414
4415 /*
4416  * To cache previous fiemap extent
4417  *
4418  * Will be used for merging fiemap extent
4419  */
4420 struct fiemap_cache {
4421         u64 offset;
4422         u64 phys;
4423         u64 len;
4424         u32 flags;
4425         bool cached;
4426 };
4427
4428 /*
4429  * Helper to submit fiemap extent.
4430  *
4431  * Will try to merge current fiemap extent specified by @offset, @phys,
4432  * @len and @flags with cached one.
4433  * And only when we fails to merge, cached one will be submitted as
4434  * fiemap extent.
4435  *
4436  * Return value is the same as fiemap_fill_next_extent().
4437  */
4438 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4439                                 struct fiemap_cache *cache,
4440                                 u64 offset, u64 phys, u64 len, u32 flags)
4441 {
4442         int ret = 0;
4443
4444         if (!cache->cached)
4445                 goto assign;
4446
4447         /*
4448          * Sanity check, extent_fiemap() should have ensured that new
4449          * fiemap extent won't overlap with cached one.
4450          * Not recoverable.
4451          *
4452          * NOTE: Physical address can overlap, due to compression
4453          */
4454         if (cache->offset + cache->len > offset) {
4455                 WARN_ON(1);
4456                 return -EINVAL;
4457         }
4458
4459         /*
4460          * Only merges fiemap extents if
4461          * 1) Their logical addresses are continuous
4462          *
4463          * 2) Their physical addresses are continuous
4464          *    So truly compressed (physical size smaller than logical size)
4465          *    extents won't get merged with each other
4466          *
4467          * 3) Share same flags except FIEMAP_EXTENT_LAST
4468          *    So regular extent won't get merged with prealloc extent
4469          */
4470         if (cache->offset + cache->len  == offset &&
4471             cache->phys + cache->len == phys  &&
4472             (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4473                         (flags & ~FIEMAP_EXTENT_LAST)) {
4474                 cache->len += len;
4475                 cache->flags |= flags;
4476                 goto try_submit_last;
4477         }
4478
4479         /* Not mergeable, need to submit cached one */
4480         ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4481                                       cache->len, cache->flags);
4482         cache->cached = false;
4483         if (ret)
4484                 return ret;
4485 assign:
4486         cache->cached = true;
4487         cache->offset = offset;
4488         cache->phys = phys;
4489         cache->len = len;
4490         cache->flags = flags;
4491 try_submit_last:
4492         if (cache->flags & FIEMAP_EXTENT_LAST) {
4493                 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4494                                 cache->phys, cache->len, cache->flags);
4495                 cache->cached = false;
4496         }
4497         return ret;
4498 }
4499
4500 /*
4501  * Emit last fiemap cache
4502  *
4503  * The last fiemap cache may still be cached in the following case:
4504  * 0                  4k                    8k
4505  * |<- Fiemap range ->|
4506  * |<------------  First extent ----------->|
4507  *
4508  * In this case, the first extent range will be cached but not emitted.
4509  * So we must emit it before ending extent_fiemap().
4510  */
4511 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
4512                                   struct fiemap_cache *cache)
4513 {
4514         int ret;
4515
4516         if (!cache->cached)
4517                 return 0;
4518
4519         ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4520                                       cache->len, cache->flags);
4521         cache->cached = false;
4522         if (ret > 0)
4523                 ret = 0;
4524         return ret;
4525 }
4526
4527 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4528                 __u64 start, __u64 len)
4529 {
4530         int ret = 0;
4531         u64 off = start;
4532         u64 max = start + len;
4533         u32 flags = 0;
4534         u32 found_type;
4535         u64 last;
4536         u64 last_for_get_extent = 0;
4537         u64 disko = 0;
4538         u64 isize = i_size_read(inode);
4539         struct btrfs_key found_key;
4540         struct extent_map *em = NULL;
4541         struct extent_state *cached_state = NULL;
4542         struct btrfs_path *path;
4543         struct btrfs_root *root = BTRFS_I(inode)->root;
4544         struct fiemap_cache cache = { 0 };
4545         int end = 0;
4546         u64 em_start = 0;
4547         u64 em_len = 0;
4548         u64 em_end = 0;
4549
4550         if (len == 0)
4551                 return -EINVAL;
4552
4553         path = btrfs_alloc_path();
4554         if (!path)
4555                 return -ENOMEM;
4556         path->leave_spinning = 1;
4557
4558         start = round_down(start, btrfs_inode_sectorsize(inode));
4559         len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4560
4561         /*
4562          * lookup the last file extent.  We're not using i_size here
4563          * because there might be preallocation past i_size
4564          */
4565         ret = btrfs_lookup_file_extent(NULL, root, path,
4566                         btrfs_ino(BTRFS_I(inode)), -1, 0);
4567         if (ret < 0) {
4568                 btrfs_free_path(path);
4569                 return ret;
4570         } else {
4571                 WARN_ON(!ret);
4572                 if (ret == 1)
4573                         ret = 0;
4574         }
4575
4576         path->slots[0]--;
4577         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4578         found_type = found_key.type;
4579
4580         /* No extents, but there might be delalloc bits */
4581         if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
4582             found_type != BTRFS_EXTENT_DATA_KEY) {
4583                 /* have to trust i_size as the end */
4584                 last = (u64)-1;
4585                 last_for_get_extent = isize;
4586         } else {
4587                 /*
4588                  * remember the start of the last extent.  There are a
4589                  * bunch of different factors that go into the length of the
4590                  * extent, so its much less complex to remember where it started
4591                  */
4592                 last = found_key.offset;
4593                 last_for_get_extent = last + 1;
4594         }
4595         btrfs_release_path(path);
4596
4597         /*
4598          * we might have some extents allocated but more delalloc past those
4599          * extents.  so, we trust isize unless the start of the last extent is
4600          * beyond isize
4601          */
4602         if (last < isize) {
4603                 last = (u64)-1;
4604                 last_for_get_extent = isize;
4605         }
4606
4607         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4608                          &cached_state);
4609
4610         em = get_extent_skip_holes(inode, start, last_for_get_extent);
4611         if (!em)
4612                 goto out;
4613         if (IS_ERR(em)) {
4614                 ret = PTR_ERR(em);
4615                 goto out;
4616         }
4617
4618         while (!end) {
4619                 u64 offset_in_extent = 0;
4620
4621                 /* break if the extent we found is outside the range */
4622                 if (em->start >= max || extent_map_end(em) < off)
4623                         break;
4624
4625                 /*
4626                  * get_extent may return an extent that starts before our
4627                  * requested range.  We have to make sure the ranges
4628                  * we return to fiemap always move forward and don't
4629                  * overlap, so adjust the offsets here
4630                  */
4631                 em_start = max(em->start, off);
4632
4633                 /*
4634                  * record the offset from the start of the extent
4635                  * for adjusting the disk offset below.  Only do this if the
4636                  * extent isn't compressed since our in ram offset may be past
4637                  * what we have actually allocated on disk.
4638                  */
4639                 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4640                         offset_in_extent = em_start - em->start;
4641                 em_end = extent_map_end(em);
4642                 em_len = em_end - em_start;
4643                 flags = 0;
4644                 if (em->block_start < EXTENT_MAP_LAST_BYTE)
4645                         disko = em->block_start + offset_in_extent;
4646                 else
4647                         disko = 0;
4648
4649                 /*
4650                  * bump off for our next call to get_extent
4651                  */
4652                 off = extent_map_end(em);
4653                 if (off >= max)
4654                         end = 1;
4655
4656                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4657                         end = 1;
4658                         flags |= FIEMAP_EXTENT_LAST;
4659                 } else if (em->block_start == EXTENT_MAP_INLINE) {
4660                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
4661                                   FIEMAP_EXTENT_NOT_ALIGNED);
4662                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4663                         flags |= (FIEMAP_EXTENT_DELALLOC |
4664                                   FIEMAP_EXTENT_UNKNOWN);
4665                 } else if (fieinfo->fi_extents_max) {
4666                         u64 bytenr = em->block_start -
4667                                 (em->start - em->orig_start);
4668
4669                         /*
4670                          * As btrfs supports shared space, this information
4671                          * can be exported to userspace tools via
4672                          * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
4673                          * then we're just getting a count and we can skip the
4674                          * lookup stuff.
4675                          */
4676                         ret = btrfs_check_shared(root,
4677                                                  btrfs_ino(BTRFS_I(inode)),
4678                                                  bytenr);
4679                         if (ret < 0)
4680                                 goto out_free;
4681                         if (ret)
4682                                 flags |= FIEMAP_EXTENT_SHARED;
4683                         ret = 0;
4684                 }
4685                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4686                         flags |= FIEMAP_EXTENT_ENCODED;
4687                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4688                         flags |= FIEMAP_EXTENT_UNWRITTEN;
4689
4690                 free_extent_map(em);
4691                 em = NULL;
4692                 if ((em_start >= last) || em_len == (u64)-1 ||
4693                    (last == (u64)-1 && isize <= em_end)) {
4694                         flags |= FIEMAP_EXTENT_LAST;
4695                         end = 1;
4696                 }
4697
4698                 /* now scan forward to see if this is really the last extent. */
4699                 em = get_extent_skip_holes(inode, off, last_for_get_extent);
4700                 if (IS_ERR(em)) {
4701                         ret = PTR_ERR(em);
4702                         goto out;
4703                 }
4704                 if (!em) {
4705                         flags |= FIEMAP_EXTENT_LAST;
4706                         end = 1;
4707                 }
4708                 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4709                                            em_len, flags);
4710                 if (ret) {
4711                         if (ret == 1)
4712                                 ret = 0;
4713                         goto out_free;
4714                 }
4715         }
4716 out_free:
4717         if (!ret)
4718                 ret = emit_last_fiemap_cache(fieinfo, &cache);
4719         free_extent_map(em);
4720 out:
4721         btrfs_free_path(path);
4722         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4723                              &cached_state);
4724         return ret;
4725 }
4726
4727 static void __free_extent_buffer(struct extent_buffer *eb)
4728 {
4729         btrfs_leak_debug_del(&eb->leak_list);
4730         kmem_cache_free(extent_buffer_cache, eb);
4731 }
4732
4733 int extent_buffer_under_io(struct extent_buffer *eb)
4734 {
4735         return (atomic_read(&eb->io_pages) ||
4736                 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4737                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4738 }
4739
4740 /*
4741  * Release all pages attached to the extent buffer.
4742  */
4743 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
4744 {
4745         int i;
4746         int num_pages;
4747         int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4748
4749         BUG_ON(extent_buffer_under_io(eb));
4750
4751         num_pages = num_extent_pages(eb);
4752         for (i = 0; i < num_pages; i++) {
4753                 struct page *page = eb->pages[i];
4754
4755                 if (!page)
4756                         continue;
4757                 if (mapped)
4758                         spin_lock(&page->mapping->private_lock);
4759                 /*
4760                  * We do this since we'll remove the pages after we've
4761                  * removed the eb from the radix tree, so we could race
4762                  * and have this page now attached to the new eb.  So
4763                  * only clear page_private if it's still connected to
4764                  * this eb.
4765                  */
4766                 if (PagePrivate(page) &&
4767                     page->private == (unsigned long)eb) {
4768                         BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4769                         BUG_ON(PageDirty(page));
4770                         BUG_ON(PageWriteback(page));
4771                         /*
4772                          * We need to make sure we haven't be attached
4773                          * to a new eb.
4774                          */
4775                         ClearPagePrivate(page);
4776                         set_page_private(page, 0);
4777                         /* One for the page private */
4778                         put_page(page);
4779                 }
4780
4781                 if (mapped)
4782                         spin_unlock(&page->mapping->private_lock);
4783
4784                 /* One for when we allocated the page */
4785                 put_page(page);
4786         }
4787 }
4788
4789 /*
4790  * Helper for releasing the extent buffer.
4791  */
4792 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4793 {
4794         btrfs_release_extent_buffer_pages(eb);
4795         __free_extent_buffer(eb);
4796 }
4797
4798 static struct extent_buffer *
4799 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4800                       unsigned long len)
4801 {
4802         struct extent_buffer *eb = NULL;
4803
4804         eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4805         eb->start = start;
4806         eb->len = len;
4807         eb->fs_info = fs_info;
4808         eb->bflags = 0;
4809         rwlock_init(&eb->lock);
4810         atomic_set(&eb->blocking_readers, 0);
4811         atomic_set(&eb->blocking_writers, 0);
4812         eb->lock_nested = false;
4813         init_waitqueue_head(&eb->write_lock_wq);
4814         init_waitqueue_head(&eb->read_lock_wq);
4815
4816         btrfs_leak_debug_add(&eb->leak_list, &buffers);
4817
4818         spin_lock_init(&eb->refs_lock);
4819         atomic_set(&eb->refs, 1);
4820         atomic_set(&eb->io_pages, 0);
4821
4822         /*
4823          * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4824          */
4825         BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4826                 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4827         BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4828
4829 #ifdef CONFIG_BTRFS_DEBUG
4830         atomic_set(&eb->spinning_writers, 0);
4831         atomic_set(&eb->spinning_readers, 0);
4832         atomic_set(&eb->read_locks, 0);
4833         atomic_set(&eb->write_locks, 0);
4834 #endif
4835
4836         return eb;
4837 }
4838
4839 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4840 {
4841         int i;
4842         struct page *p;
4843         struct extent_buffer *new;
4844         int num_pages = num_extent_pages(src);
4845
4846         new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4847         if (new == NULL)
4848                 return NULL;
4849
4850         for (i = 0; i < num_pages; i++) {
4851                 p = alloc_page(GFP_NOFS);
4852                 if (!p) {
4853                         btrfs_release_extent_buffer(new);
4854                         return NULL;
4855                 }
4856                 attach_extent_buffer_page(new, p);
4857                 WARN_ON(PageDirty(p));
4858                 SetPageUptodate(p);
4859                 new->pages[i] = p;
4860                 copy_page(page_address(p), page_address(src->pages[i]));
4861         }
4862
4863         set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4864         set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
4865
4866         return new;
4867 }
4868
4869 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4870                                                   u64 start, unsigned long len)
4871 {
4872         struct extent_buffer *eb;
4873         int num_pages;
4874         int i;
4875
4876         eb = __alloc_extent_buffer(fs_info, start, len);
4877         if (!eb)
4878                 return NULL;
4879
4880         num_pages = num_extent_pages(eb);
4881         for (i = 0; i < num_pages; i++) {
4882                 eb->pages[i] = alloc_page(GFP_NOFS);
4883                 if (!eb->pages[i])
4884                         goto err;
4885         }
4886         set_extent_buffer_uptodate(eb);
4887         btrfs_set_header_nritems(eb, 0);
4888         set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4889
4890         return eb;
4891 err:
4892         for (; i > 0; i--)
4893                 __free_page(eb->pages[i - 1]);
4894         __free_extent_buffer(eb);
4895         return NULL;
4896 }
4897
4898 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4899                                                 u64 start)
4900 {
4901         return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
4902 }
4903
4904 static void check_buffer_tree_ref(struct extent_buffer *eb)
4905 {
4906         int refs;
4907         /* the ref bit is tricky.  We have to make sure it is set
4908          * if we have the buffer dirty.   Otherwise the
4909          * code to free a buffer can end up dropping a dirty
4910          * page
4911          *
4912          * Once the ref bit is set, it won't go away while the
4913          * buffer is dirty or in writeback, and it also won't
4914          * go away while we have the reference count on the
4915          * eb bumped.
4916          *
4917          * We can't just set the ref bit without bumping the
4918          * ref on the eb because free_extent_buffer might
4919          * see the ref bit and try to clear it.  If this happens
4920          * free_extent_buffer might end up dropping our original
4921          * ref by mistake and freeing the page before we are able
4922          * to add one more ref.
4923          *
4924          * So bump the ref count first, then set the bit.  If someone
4925          * beat us to it, drop the ref we added.
4926          */
4927         refs = atomic_read(&eb->refs);
4928         if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4929                 return;
4930
4931         spin_lock(&eb->refs_lock);
4932         if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4933                 atomic_inc(&eb->refs);
4934         spin_unlock(&eb->refs_lock);
4935 }
4936
4937 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4938                 struct page *accessed)
4939 {
4940         int num_pages, i;
4941
4942         check_buffer_tree_ref(eb);
4943
4944         num_pages = num_extent_pages(eb);
4945         for (i = 0; i < num_pages; i++) {
4946                 struct page *p = eb->pages[i];
4947
4948                 if (p != accessed)
4949                         mark_page_accessed(p);
4950         }
4951 }
4952
4953 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4954                                          u64 start)
4955 {
4956         struct extent_buffer *eb;
4957
4958         rcu_read_lock();
4959         eb = radix_tree_lookup(&fs_info->buffer_radix,
4960                                start >> PAGE_SHIFT);
4961         if (eb && atomic_inc_not_zero(&eb->refs)) {
4962                 rcu_read_unlock();
4963                 /*
4964                  * Lock our eb's refs_lock to avoid races with
4965                  * free_extent_buffer. When we get our eb it might be flagged
4966                  * with EXTENT_BUFFER_STALE and another task running
4967                  * free_extent_buffer might have seen that flag set,
4968                  * eb->refs == 2, that the buffer isn't under IO (dirty and
4969                  * writeback flags not set) and it's still in the tree (flag
4970                  * EXTENT_BUFFER_TREE_REF set), therefore being in the process
4971                  * of decrementing the extent buffer's reference count twice.
4972                  * So here we could race and increment the eb's reference count,
4973                  * clear its stale flag, mark it as dirty and drop our reference
4974                  * before the other task finishes executing free_extent_buffer,
4975                  * which would later result in an attempt to free an extent
4976                  * buffer that is dirty.
4977                  */
4978                 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4979                         spin_lock(&eb->refs_lock);
4980                         spin_unlock(&eb->refs_lock);
4981                 }
4982                 mark_extent_buffer_accessed(eb, NULL);
4983                 return eb;
4984         }
4985         rcu_read_unlock();
4986
4987         return NULL;
4988 }
4989
4990 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4991 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4992                                         u64 start)
4993 {
4994         struct extent_buffer *eb, *exists = NULL;
4995         int ret;
4996
4997         eb = find_extent_buffer(fs_info, start);
4998         if (eb)
4999                 return eb;
5000         eb = alloc_dummy_extent_buffer(fs_info, start);
5001         if (!eb)
5002                 return NULL;
5003         eb->fs_info = fs_info;
5004 again:
5005         ret = radix_tree_preload(GFP_NOFS);
5006         if (ret)
5007                 goto free_eb;
5008         spin_lock(&fs_info->buffer_lock);
5009         ret = radix_tree_insert(&fs_info->buffer_radix,
5010                                 start >> PAGE_SHIFT, eb);
5011         spin_unlock(&fs_info->buffer_lock);
5012         radix_tree_preload_end();
5013         if (ret == -EEXIST) {
5014                 exists = find_extent_buffer(fs_info, start);
5015                 if (exists)
5016                         goto free_eb;
5017                 else
5018                         goto again;
5019         }
5020         check_buffer_tree_ref(eb);
5021         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5022
5023         return eb;
5024 free_eb:
5025         btrfs_release_extent_buffer(eb);
5026         return exists;
5027 }
5028 #endif
5029
5030 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
5031                                           u64 start)
5032 {
5033         unsigned long len = fs_info->nodesize;
5034         int num_pages;
5035         int i;
5036         unsigned long index = start >> PAGE_SHIFT;
5037         struct extent_buffer *eb;
5038         struct extent_buffer *exists = NULL;
5039         struct page *p;
5040         struct address_space *mapping = fs_info->btree_inode->i_mapping;
5041         int uptodate = 1;
5042         int ret;
5043
5044         if (!IS_ALIGNED(start, fs_info->sectorsize)) {
5045                 btrfs_err(fs_info, "bad tree block start %llu", start);
5046                 return ERR_PTR(-EINVAL);
5047         }
5048
5049         eb = find_extent_buffer(fs_info, start);
5050         if (eb)
5051                 return eb;
5052
5053         eb = __alloc_extent_buffer(fs_info, start, len);
5054         if (!eb)
5055                 return ERR_PTR(-ENOMEM);
5056
5057         num_pages = num_extent_pages(eb);
5058         for (i = 0; i < num_pages; i++, index++) {
5059                 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
5060                 if (!p) {
5061                         exists = ERR_PTR(-ENOMEM);
5062                         goto free_eb;
5063                 }
5064
5065                 spin_lock(&mapping->private_lock);
5066                 if (PagePrivate(p)) {
5067                         /*
5068                          * We could have already allocated an eb for this page
5069                          * and attached one so lets see if we can get a ref on
5070                          * the existing eb, and if we can we know it's good and
5071                          * we can just return that one, else we know we can just
5072                          * overwrite page->private.
5073                          */
5074                         exists = (struct extent_buffer *)p->private;
5075                         if (atomic_inc_not_zero(&exists->refs)) {
5076                                 spin_unlock(&mapping->private_lock);
5077                                 unlock_page(p);
5078                                 put_page(p);
5079                                 mark_extent_buffer_accessed(exists, p);
5080                                 goto free_eb;
5081                         }
5082                         exists = NULL;
5083
5084                         /*
5085                          * Do this so attach doesn't complain and we need to
5086                          * drop the ref the old guy had.
5087                          */
5088                         ClearPagePrivate(p);
5089                         WARN_ON(PageDirty(p));
5090                         put_page(p);
5091                 }
5092                 attach_extent_buffer_page(eb, p);
5093                 spin_unlock(&mapping->private_lock);
5094                 WARN_ON(PageDirty(p));
5095                 eb->pages[i] = p;
5096                 if (!PageUptodate(p))
5097                         uptodate = 0;
5098
5099                 /*
5100                  * We can't unlock the pages just yet since the extent buffer
5101                  * hasn't been properly inserted in the radix tree, this
5102                  * opens a race with btree_releasepage which can free a page
5103                  * while we are still filling in all pages for the buffer and
5104                  * we could crash.
5105                  */
5106         }
5107         if (uptodate)
5108                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5109 again:
5110         ret = radix_tree_preload(GFP_NOFS);
5111         if (ret) {
5112                 exists = ERR_PTR(ret);
5113                 goto free_eb;
5114         }
5115
5116         spin_lock(&fs_info->buffer_lock);
5117         ret = radix_tree_insert(&fs_info->buffer_radix,
5118                                 start >> PAGE_SHIFT, eb);
5119         spin_unlock(&fs_info->buffer_lock);
5120         radix_tree_preload_end();
5121         if (ret == -EEXIST) {
5122                 exists = find_extent_buffer(fs_info, start);
5123                 if (exists)
5124                         goto free_eb;
5125                 else
5126                         goto again;
5127         }
5128         /* add one reference for the tree */
5129         check_buffer_tree_ref(eb);
5130         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5131
5132         /*
5133          * Now it's safe to unlock the pages because any calls to
5134          * btree_releasepage will correctly detect that a page belongs to a
5135          * live buffer and won't free them prematurely.
5136          */
5137         for (i = 0; i < num_pages; i++)
5138                 unlock_page(eb->pages[i]);
5139         return eb;
5140
5141 free_eb:
5142         WARN_ON(!atomic_dec_and_test(&eb->refs));
5143         for (i = 0; i < num_pages; i++) {
5144                 if (eb->pages[i])
5145                         unlock_page(eb->pages[i]);
5146         }
5147
5148         btrfs_release_extent_buffer(eb);
5149         return exists;
5150 }
5151
5152 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5153 {
5154         struct extent_buffer *eb =
5155                         container_of(head, struct extent_buffer, rcu_head);
5156
5157         __free_extent_buffer(eb);
5158 }
5159
5160 static int release_extent_buffer(struct extent_buffer *eb)
5161 {
5162         lockdep_assert_held(&eb->refs_lock);
5163
5164         WARN_ON(atomic_read(&eb->refs) == 0);
5165         if (atomic_dec_and_test(&eb->refs)) {
5166                 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
5167                         struct btrfs_fs_info *fs_info = eb->fs_info;
5168
5169                         spin_unlock(&eb->refs_lock);
5170
5171                         spin_lock(&fs_info->buffer_lock);
5172                         radix_tree_delete(&fs_info->buffer_radix,
5173                                           eb->start >> PAGE_SHIFT);
5174                         spin_unlock(&fs_info->buffer_lock);
5175                 } else {
5176                         spin_unlock(&eb->refs_lock);
5177                 }
5178
5179                 /* Should be safe to release our pages at this point */
5180                 btrfs_release_extent_buffer_pages(eb);
5181 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5182                 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
5183                         __free_extent_buffer(eb);
5184                         return 1;
5185                 }
5186 #endif
5187                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5188                 return 1;
5189         }
5190         spin_unlock(&eb->refs_lock);
5191
5192         return 0;
5193 }
5194
5195 void free_extent_buffer(struct extent_buffer *eb)
5196 {
5197         int refs;
5198         int old;
5199         if (!eb)
5200                 return;
5201
5202         while (1) {
5203                 refs = atomic_read(&eb->refs);
5204                 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
5205                     || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
5206                         refs == 1))
5207                         break;
5208                 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5209                 if (old == refs)
5210                         return;
5211         }
5212
5213         spin_lock(&eb->refs_lock);
5214         if (atomic_read(&eb->refs) == 2 &&
5215             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5216             !extent_buffer_under_io(eb) &&
5217             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5218                 atomic_dec(&eb->refs);
5219
5220         /*
5221          * I know this is terrible, but it's temporary until we stop tracking
5222          * the uptodate bits and such for the extent buffers.
5223          */
5224         release_extent_buffer(eb);
5225 }
5226
5227 void free_extent_buffer_stale(struct extent_buffer *eb)
5228 {
5229         if (!eb)
5230                 return;
5231
5232         spin_lock(&eb->refs_lock);
5233         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5234
5235         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5236             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5237                 atomic_dec(&eb->refs);
5238         release_extent_buffer(eb);
5239 }
5240
5241 void clear_extent_buffer_dirty(struct extent_buffer *eb)
5242 {
5243         int i;
5244         int num_pages;
5245         struct page *page;
5246
5247         num_pages = num_extent_pages(eb);
5248
5249         for (i = 0; i < num_pages; i++) {
5250                 page = eb->pages[i];
5251                 if (!PageDirty(page))
5252                         continue;
5253
5254                 lock_page(page);
5255                 WARN_ON(!PagePrivate(page));
5256
5257                 clear_page_dirty_for_io(page);
5258                 xa_lock_irq(&page->mapping->i_pages);
5259                 if (!PageDirty(page))
5260                         __xa_clear_mark(&page->mapping->i_pages,
5261                                         page_index(page), PAGECACHE_TAG_DIRTY);
5262                 xa_unlock_irq(&page->mapping->i_pages);
5263                 ClearPageError(page);
5264                 unlock_page(page);
5265         }
5266         WARN_ON(atomic_read(&eb->refs) == 0);
5267 }
5268
5269 bool set_extent_buffer_dirty(struct extent_buffer *eb)
5270 {
5271         int i;
5272         int num_pages;
5273         bool was_dirty;
5274
5275         check_buffer_tree_ref(eb);
5276
5277         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5278
5279         num_pages = num_extent_pages(eb);
5280         WARN_ON(atomic_read(&eb->refs) == 0);
5281         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5282
5283         if (!was_dirty)
5284                 for (i = 0; i < num_pages; i++)
5285                         set_page_dirty(eb->pages[i]);
5286
5287 #ifdef CONFIG_BTRFS_DEBUG
5288         for (i = 0; i < num_pages; i++)
5289                 ASSERT(PageDirty(eb->pages[i]));
5290 #endif
5291
5292         return was_dirty;
5293 }
5294
5295 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5296 {
5297         int i;
5298         struct page *page;
5299         int num_pages;
5300
5301         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5302         num_pages = num_extent_pages(eb);
5303         for (i = 0; i < num_pages; i++) {
5304                 page = eb->pages[i];
5305                 if (page)
5306                         ClearPageUptodate(page);
5307         }
5308 }
5309
5310 void set_extent_buffer_uptodate(struct extent_buffer *eb)
5311 {
5312         int i;
5313         struct page *page;
5314         int num_pages;
5315
5316         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5317         num_pages = num_extent_pages(eb);
5318         for (i = 0; i < num_pages; i++) {
5319                 page = eb->pages[i];
5320                 SetPageUptodate(page);
5321         }
5322 }
5323
5324 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
5325 {
5326         int i;
5327         struct page *page;
5328         int err;
5329         int ret = 0;
5330         int locked_pages = 0;
5331         int all_uptodate = 1;
5332         int num_pages;
5333         unsigned long num_reads = 0;
5334         struct bio *bio = NULL;
5335         unsigned long bio_flags = 0;
5336         struct extent_io_tree *tree = &BTRFS_I(eb->fs_info->btree_inode)->io_tree;
5337
5338         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5339                 return 0;
5340
5341         num_pages = num_extent_pages(eb);
5342         for (i = 0; i < num_pages; i++) {
5343                 page = eb->pages[i];
5344                 if (wait == WAIT_NONE) {
5345                         if (!trylock_page(page))
5346                                 goto unlock_exit;
5347                 } else {
5348                         lock_page(page);
5349                 }
5350                 locked_pages++;
5351         }
5352         /*
5353          * We need to firstly lock all pages to make sure that
5354          * the uptodate bit of our pages won't be affected by
5355          * clear_extent_buffer_uptodate().
5356          */
5357         for (i = 0; i < num_pages; i++) {
5358                 page = eb->pages[i];
5359                 if (!PageUptodate(page)) {
5360                         num_reads++;
5361                         all_uptodate = 0;
5362                 }
5363         }
5364
5365         if (all_uptodate) {
5366                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5367                 goto unlock_exit;
5368         }
5369
5370         clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5371         eb->read_mirror = 0;
5372         atomic_set(&eb->io_pages, num_reads);
5373         for (i = 0; i < num_pages; i++) {
5374                 page = eb->pages[i];
5375
5376                 if (!PageUptodate(page)) {
5377                         if (ret) {
5378                                 atomic_dec(&eb->io_pages);
5379                                 unlock_page(page);
5380                                 continue;
5381                         }
5382
5383                         ClearPageError(page);
5384                         err = __extent_read_full_page(tree, page,
5385                                                       btree_get_extent, &bio,
5386                                                       mirror_num, &bio_flags,
5387                                                       REQ_META);
5388                         if (err) {
5389                                 ret = err;
5390                                 /*
5391                                  * We use &bio in above __extent_read_full_page,
5392                                  * so we ensure that if it returns error, the
5393                                  * current page fails to add itself to bio and
5394                                  * it's been unlocked.
5395                                  *
5396                                  * We must dec io_pages by ourselves.
5397                                  */
5398                                 atomic_dec(&eb->io_pages);
5399                         }
5400                 } else {
5401                         unlock_page(page);
5402                 }
5403         }
5404
5405         if (bio) {
5406                 err = submit_one_bio(bio, mirror_num, bio_flags);
5407                 if (err)
5408                         return err;
5409         }
5410
5411         if (ret || wait != WAIT_COMPLETE)
5412                 return ret;
5413
5414         for (i = 0; i < num_pages; i++) {
5415                 page = eb->pages[i];
5416                 wait_on_page_locked(page);
5417                 if (!PageUptodate(page))
5418                         ret = -EIO;
5419         }
5420
5421         return ret;
5422
5423 unlock_exit:
5424         while (locked_pages > 0) {
5425                 locked_pages--;
5426                 page = eb->pages[locked_pages];
5427                 unlock_page(page);
5428         }
5429         return ret;
5430 }
5431
5432 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5433                         unsigned long start, unsigned long len)
5434 {
5435         size_t cur;
5436         size_t offset;
5437         struct page *page;
5438         char *kaddr;
5439         char *dst = (char *)dstv;
5440         size_t start_offset = offset_in_page(eb->start);
5441         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5442
5443         if (start + len > eb->len) {
5444                 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5445                      eb->start, eb->len, start, len);
5446                 memset(dst, 0, len);
5447                 return;
5448         }
5449
5450         offset = offset_in_page(start_offset + start);
5451
5452         while (len > 0) {
5453                 page = eb->pages[i];
5454
5455                 cur = min(len, (PAGE_SIZE - offset));
5456                 kaddr = page_address(page);
5457                 memcpy(dst, kaddr + offset, cur);
5458
5459                 dst += cur;
5460                 len -= cur;
5461                 offset = 0;
5462                 i++;
5463         }
5464 }
5465
5466 int read_extent_buffer_to_user(const struct extent_buffer *eb,
5467                                void __user *dstv,
5468                                unsigned long start, unsigned long len)
5469 {
5470         size_t cur;
5471         size_t offset;
5472         struct page *page;
5473         char *kaddr;
5474         char __user *dst = (char __user *)dstv;
5475         size_t start_offset = offset_in_page(eb->start);
5476         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5477         int ret = 0;
5478
5479         WARN_ON(start > eb->len);
5480         WARN_ON(start + len > eb->start + eb->len);
5481
5482         offset = offset_in_page(start_offset + start);
5483
5484         while (len > 0) {
5485                 page = eb->pages[i];
5486
5487                 cur = min(len, (PAGE_SIZE - offset));
5488                 kaddr = page_address(page);
5489                 if (copy_to_user(dst, kaddr + offset, cur)) {
5490                         ret = -EFAULT;
5491                         break;
5492                 }
5493
5494                 dst += cur;
5495                 len -= cur;
5496                 offset = 0;
5497                 i++;
5498         }
5499
5500         return ret;
5501 }
5502
5503 /*
5504  * return 0 if the item is found within a page.
5505  * return 1 if the item spans two pages.
5506  * return -EINVAL otherwise.
5507  */
5508 int map_private_extent_buffer(const struct extent_buffer *eb,
5509                               unsigned long start, unsigned long min_len,
5510                               char **map, unsigned long *map_start,
5511                               unsigned long *map_len)
5512 {
5513         size_t offset;
5514         char *kaddr;
5515         struct page *p;
5516         size_t start_offset = offset_in_page(eb->start);
5517         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5518         unsigned long end_i = (start_offset + start + min_len - 1) >>
5519                 PAGE_SHIFT;
5520
5521         if (start + min_len > eb->len) {
5522                 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5523                        eb->start, eb->len, start, min_len);
5524                 return -EINVAL;
5525         }
5526
5527         if (i != end_i)
5528                 return 1;
5529
5530         if (i == 0) {
5531                 offset = start_offset;
5532                 *map_start = 0;
5533         } else {
5534                 offset = 0;
5535                 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5536         }
5537
5538         p = eb->pages[i];
5539         kaddr = page_address(p);
5540         *map = kaddr + offset;
5541         *map_len = PAGE_SIZE - offset;
5542         return 0;
5543 }
5544
5545 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5546                          unsigned long start, unsigned long len)
5547 {
5548         size_t cur;
5549         size_t offset;
5550         struct page *page;
5551         char *kaddr;
5552         char *ptr = (char *)ptrv;
5553         size_t start_offset = offset_in_page(eb->start);
5554         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5555         int ret = 0;
5556
5557         WARN_ON(start > eb->len);
5558         WARN_ON(start + len > eb->start + eb->len);
5559
5560         offset = offset_in_page(start_offset + start);
5561
5562         while (len > 0) {
5563                 page = eb->pages[i];
5564
5565                 cur = min(len, (PAGE_SIZE - offset));
5566
5567                 kaddr = page_address(page);
5568                 ret = memcmp(ptr, kaddr + offset, cur);
5569                 if (ret)
5570                         break;
5571
5572                 ptr += cur;
5573                 len -= cur;
5574                 offset = 0;
5575                 i++;
5576         }
5577         return ret;
5578 }
5579
5580 void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
5581                 const void *srcv)
5582 {
5583         char *kaddr;
5584
5585         WARN_ON(!PageUptodate(eb->pages[0]));
5586         kaddr = page_address(eb->pages[0]);
5587         memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5588                         BTRFS_FSID_SIZE);
5589 }
5590
5591 void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
5592 {
5593         char *kaddr;
5594
5595         WARN_ON(!PageUptodate(eb->pages[0]));
5596         kaddr = page_address(eb->pages[0]);
5597         memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5598                         BTRFS_FSID_SIZE);
5599 }
5600
5601 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5602                          unsigned long start, unsigned long len)
5603 {
5604         size_t cur;
5605         size_t offset;
5606         struct page *page;
5607         char *kaddr;
5608         char *src = (char *)srcv;
5609         size_t start_offset = offset_in_page(eb->start);
5610         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5611
5612         WARN_ON(start > eb->len);
5613         WARN_ON(start + len > eb->start + eb->len);
5614
5615         offset = offset_in_page(start_offset + start);
5616
5617         while (len > 0) {
5618                 page = eb->pages[i];
5619                 WARN_ON(!PageUptodate(page));
5620
5621                 cur = min(len, PAGE_SIZE - offset);
5622                 kaddr = page_address(page);
5623                 memcpy(kaddr + offset, src, cur);
5624
5625                 src += cur;
5626                 len -= cur;
5627                 offset = 0;
5628                 i++;
5629         }
5630 }
5631
5632 void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
5633                 unsigned long len)
5634 {
5635         size_t cur;
5636         size_t offset;
5637         struct page *page;
5638         char *kaddr;
5639         size_t start_offset = offset_in_page(eb->start);
5640         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5641
5642         WARN_ON(start > eb->len);
5643         WARN_ON(start + len > eb->start + eb->len);
5644
5645         offset = offset_in_page(start_offset + start);
5646
5647         while (len > 0) {
5648                 page = eb->pages[i];
5649                 WARN_ON(!PageUptodate(page));
5650
5651                 cur = min(len, PAGE_SIZE - offset);
5652                 kaddr = page_address(page);
5653                 memset(kaddr + offset, 0, cur);
5654
5655                 len -= cur;
5656                 offset = 0;
5657                 i++;
5658         }
5659 }
5660
5661 void copy_extent_buffer_full(struct extent_buffer *dst,
5662                              struct extent_buffer *src)
5663 {
5664         int i;
5665         int num_pages;
5666
5667         ASSERT(dst->len == src->len);
5668
5669         num_pages = num_extent_pages(dst);
5670         for (i = 0; i < num_pages; i++)
5671                 copy_page(page_address(dst->pages[i]),
5672                                 page_address(src->pages[i]));
5673 }
5674
5675 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5676                         unsigned long dst_offset, unsigned long src_offset,
5677                         unsigned long len)
5678 {
5679         u64 dst_len = dst->len;
5680         size_t cur;
5681         size_t offset;
5682         struct page *page;
5683         char *kaddr;
5684         size_t start_offset = offset_in_page(dst->start);
5685         unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
5686
5687         WARN_ON(src->len != dst_len);
5688
5689         offset = offset_in_page(start_offset + dst_offset);
5690
5691         while (len > 0) {
5692                 page = dst->pages[i];
5693                 WARN_ON(!PageUptodate(page));
5694
5695                 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5696
5697                 kaddr = page_address(page);
5698                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5699
5700                 src_offset += cur;
5701                 len -= cur;
5702                 offset = 0;
5703                 i++;
5704         }
5705 }
5706
5707 /*
5708  * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5709  * given bit number
5710  * @eb: the extent buffer
5711  * @start: offset of the bitmap item in the extent buffer
5712  * @nr: bit number
5713  * @page_index: return index of the page in the extent buffer that contains the
5714  * given bit number
5715  * @page_offset: return offset into the page given by page_index
5716  *
5717  * This helper hides the ugliness of finding the byte in an extent buffer which
5718  * contains a given bit.
5719  */
5720 static inline void eb_bitmap_offset(struct extent_buffer *eb,
5721                                     unsigned long start, unsigned long nr,
5722                                     unsigned long *page_index,
5723                                     size_t *page_offset)
5724 {
5725         size_t start_offset = offset_in_page(eb->start);
5726         size_t byte_offset = BIT_BYTE(nr);
5727         size_t offset;
5728
5729         /*
5730          * The byte we want is the offset of the extent buffer + the offset of
5731          * the bitmap item in the extent buffer + the offset of the byte in the
5732          * bitmap item.
5733          */
5734         offset = start_offset + start + byte_offset;
5735
5736         *page_index = offset >> PAGE_SHIFT;
5737         *page_offset = offset_in_page(offset);
5738 }
5739
5740 /**
5741  * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5742  * @eb: the extent buffer
5743  * @start: offset of the bitmap item in the extent buffer
5744  * @nr: bit number to test
5745  */
5746 int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5747                            unsigned long nr)
5748 {
5749         u8 *kaddr;
5750         struct page *page;
5751         unsigned long i;
5752         size_t offset;
5753
5754         eb_bitmap_offset(eb, start, nr, &i, &offset);
5755         page = eb->pages[i];
5756         WARN_ON(!PageUptodate(page));
5757         kaddr = page_address(page);
5758         return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5759 }
5760
5761 /**
5762  * extent_buffer_bitmap_set - set an area of a bitmap
5763  * @eb: the extent buffer
5764  * @start: offset of the bitmap item in the extent buffer
5765  * @pos: bit number of the first bit
5766  * @len: number of bits to set
5767  */
5768 void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5769                               unsigned long pos, unsigned long len)
5770 {
5771         u8 *kaddr;
5772         struct page *page;
5773         unsigned long i;
5774         size_t offset;
5775         const unsigned int size = pos + len;
5776         int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5777         u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5778
5779         eb_bitmap_offset(eb, start, pos, &i, &offset);
5780         page = eb->pages[i];
5781         WARN_ON(!PageUptodate(page));
5782         kaddr = page_address(page);
5783
5784         while (len >= bits_to_set) {
5785                 kaddr[offset] |= mask_to_set;
5786                 len -= bits_to_set;
5787                 bits_to_set = BITS_PER_BYTE;
5788                 mask_to_set = ~0;
5789                 if (++offset >= PAGE_SIZE && len > 0) {
5790                         offset = 0;
5791                         page = eb->pages[++i];
5792                         WARN_ON(!PageUptodate(page));
5793                         kaddr = page_address(page);
5794                 }
5795         }
5796         if (len) {
5797                 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5798                 kaddr[offset] |= mask_to_set;
5799         }
5800 }
5801
5802
5803 /**
5804  * extent_buffer_bitmap_clear - clear an area of a bitmap
5805  * @eb: the extent buffer
5806  * @start: offset of the bitmap item in the extent buffer
5807  * @pos: bit number of the first bit
5808  * @len: number of bits to clear
5809  */
5810 void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5811                                 unsigned long pos, unsigned long len)
5812 {
5813         u8 *kaddr;
5814         struct page *page;
5815         unsigned long i;
5816         size_t offset;
5817         const unsigned int size = pos + len;
5818         int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5819         u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5820
5821         eb_bitmap_offset(eb, start, pos, &i, &offset);
5822         page = eb->pages[i];
5823         WARN_ON(!PageUptodate(page));
5824         kaddr = page_address(page);
5825
5826         while (len >= bits_to_clear) {
5827                 kaddr[offset] &= ~mask_to_clear;
5828                 len -= bits_to_clear;
5829                 bits_to_clear = BITS_PER_BYTE;
5830                 mask_to_clear = ~0;
5831                 if (++offset >= PAGE_SIZE && len > 0) {
5832                         offset = 0;
5833                         page = eb->pages[++i];
5834                         WARN_ON(!PageUptodate(page));
5835                         kaddr = page_address(page);
5836                 }
5837         }
5838         if (len) {
5839                 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5840                 kaddr[offset] &= ~mask_to_clear;
5841         }
5842 }
5843
5844 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5845 {
5846         unsigned long distance = (src > dst) ? src - dst : dst - src;
5847         return distance < len;
5848 }
5849
5850 static void copy_pages(struct page *dst_page, struct page *src_page,
5851                        unsigned long dst_off, unsigned long src_off,
5852                        unsigned long len)
5853 {
5854         char *dst_kaddr = page_address(dst_page);
5855         char *src_kaddr;
5856         int must_memmove = 0;
5857
5858         if (dst_page != src_page) {
5859                 src_kaddr = page_address(src_page);
5860         } else {
5861                 src_kaddr = dst_kaddr;
5862                 if (areas_overlap(src_off, dst_off, len))
5863                         must_memmove = 1;
5864         }
5865
5866         if (must_memmove)
5867                 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5868         else
5869                 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5870 }
5871
5872 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5873                            unsigned long src_offset, unsigned long len)
5874 {
5875         struct btrfs_fs_info *fs_info = dst->fs_info;
5876         size_t cur;
5877         size_t dst_off_in_page;
5878         size_t src_off_in_page;
5879         size_t start_offset = offset_in_page(dst->start);
5880         unsigned long dst_i;
5881         unsigned long src_i;
5882
5883         if (src_offset + len > dst->len) {
5884                 btrfs_err(fs_info,
5885                         "memmove bogus src_offset %lu move len %lu dst len %lu",
5886                          src_offset, len, dst->len);
5887                 BUG();
5888         }
5889         if (dst_offset + len > dst->len) {
5890                 btrfs_err(fs_info,
5891                         "memmove bogus dst_offset %lu move len %lu dst len %lu",
5892                          dst_offset, len, dst->len);
5893                 BUG();
5894         }
5895
5896         while (len > 0) {
5897                 dst_off_in_page = offset_in_page(start_offset + dst_offset);
5898                 src_off_in_page = offset_in_page(start_offset + src_offset);
5899
5900                 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5901                 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5902
5903                 cur = min(len, (unsigned long)(PAGE_SIZE -
5904                                                src_off_in_page));
5905                 cur = min_t(unsigned long, cur,
5906                         (unsigned long)(PAGE_SIZE - dst_off_in_page));
5907
5908                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5909                            dst_off_in_page, src_off_in_page, cur);
5910
5911                 src_offset += cur;
5912                 dst_offset += cur;
5913                 len -= cur;
5914         }
5915 }
5916
5917 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5918                            unsigned long src_offset, unsigned long len)
5919 {
5920         struct btrfs_fs_info *fs_info = dst->fs_info;
5921         size_t cur;
5922         size_t dst_off_in_page;
5923         size_t src_off_in_page;
5924         unsigned long dst_end = dst_offset + len - 1;
5925         unsigned long src_end = src_offset + len - 1;
5926         size_t start_offset = offset_in_page(dst->start);
5927         unsigned long dst_i;
5928         unsigned long src_i;
5929
5930         if (src_offset + len > dst->len) {
5931                 btrfs_err(fs_info,
5932                           "memmove bogus src_offset %lu move len %lu len %lu",
5933                           src_offset, len, dst->len);
5934                 BUG();
5935         }
5936         if (dst_offset + len > dst->len) {
5937                 btrfs_err(fs_info,
5938                           "memmove bogus dst_offset %lu move len %lu len %lu",
5939                           dst_offset, len, dst->len);
5940                 BUG();
5941         }
5942         if (dst_offset < src_offset) {
5943                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5944                 return;
5945         }
5946         while (len > 0) {
5947                 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5948                 src_i = (start_offset + src_end) >> PAGE_SHIFT;
5949
5950                 dst_off_in_page = offset_in_page(start_offset + dst_end);
5951                 src_off_in_page = offset_in_page(start_offset + src_end);
5952
5953                 cur = min_t(unsigned long, len, src_off_in_page + 1);
5954                 cur = min(cur, dst_off_in_page + 1);
5955                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5956                            dst_off_in_page - cur + 1,
5957                            src_off_in_page - cur + 1, cur);
5958
5959                 dst_end -= cur;
5960                 src_end -= cur;
5961                 len -= cur;
5962         }
5963 }
5964
5965 int try_release_extent_buffer(struct page *page)
5966 {
5967         struct extent_buffer *eb;
5968
5969         /*
5970          * We need to make sure nobody is attaching this page to an eb right
5971          * now.
5972          */
5973         spin_lock(&page->mapping->private_lock);
5974         if (!PagePrivate(page)) {
5975                 spin_unlock(&page->mapping->private_lock);
5976                 return 1;
5977         }
5978
5979         eb = (struct extent_buffer *)page->private;
5980         BUG_ON(!eb);
5981
5982         /*
5983          * This is a little awful but should be ok, we need to make sure that
5984          * the eb doesn't disappear out from under us while we're looking at
5985          * this page.
5986          */
5987         spin_lock(&eb->refs_lock);
5988         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5989                 spin_unlock(&eb->refs_lock);
5990                 spin_unlock(&page->mapping->private_lock);
5991                 return 0;
5992         }
5993         spin_unlock(&page->mapping->private_lock);
5994
5995         /*
5996          * If tree ref isn't set then we know the ref on this eb is a real ref,
5997          * so just return, this page will likely be freed soon anyway.
5998          */
5999         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6000                 spin_unlock(&eb->refs_lock);
6001                 return 0;
6002         }
6003
6004         return release_extent_buffer(eb);
6005 }