btrfs: remove the incorrect comment on RO fs when btrfs_run_delalloc_range() fails
[sfrench/cifs-2.6.git] / fs / btrfs / extent_io.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/bio.h>
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/page-flags.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/prefetch.h>
15 #include <linux/cleancache.h>
16 #include "extent_io.h"
17 #include "extent_map.h"
18 #include "ctree.h"
19 #include "btrfs_inode.h"
20 #include "volumes.h"
21 #include "check-integrity.h"
22 #include "locking.h"
23 #include "rcu-string.h"
24 #include "backref.h"
25 #include "disk-io.h"
26
27 static struct kmem_cache *extent_state_cache;
28 static struct kmem_cache *extent_buffer_cache;
29 static struct bio_set btrfs_bioset;
30
31 static inline bool extent_state_in_tree(const struct extent_state *state)
32 {
33         return !RB_EMPTY_NODE(&state->rb_node);
34 }
35
36 #ifdef CONFIG_BTRFS_DEBUG
37 static LIST_HEAD(buffers);
38 static LIST_HEAD(states);
39
40 static DEFINE_SPINLOCK(leak_lock);
41
42 static inline
43 void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
44 {
45         unsigned long flags;
46
47         spin_lock_irqsave(&leak_lock, flags);
48         list_add(new, head);
49         spin_unlock_irqrestore(&leak_lock, flags);
50 }
51
52 static inline
53 void btrfs_leak_debug_del(struct list_head *entry)
54 {
55         unsigned long flags;
56
57         spin_lock_irqsave(&leak_lock, flags);
58         list_del(entry);
59         spin_unlock_irqrestore(&leak_lock, flags);
60 }
61
62 static inline
63 void btrfs_leak_debug_check(void)
64 {
65         struct extent_state *state;
66         struct extent_buffer *eb;
67
68         while (!list_empty(&states)) {
69                 state = list_entry(states.next, struct extent_state, leak_list);
70                 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
71                        state->start, state->end, state->state,
72                        extent_state_in_tree(state),
73                        refcount_read(&state->refs));
74                 list_del(&state->leak_list);
75                 kmem_cache_free(extent_state_cache, state);
76         }
77
78         while (!list_empty(&buffers)) {
79                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
80                 pr_err("BTRFS: buffer leak start %llu len %lu refs %d bflags %lu\n",
81                        eb->start, eb->len, atomic_read(&eb->refs), eb->bflags);
82                 list_del(&eb->leak_list);
83                 kmem_cache_free(extent_buffer_cache, eb);
84         }
85 }
86
87 #define btrfs_debug_check_extent_io_range(tree, start, end)             \
88         __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
89 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
90                 struct extent_io_tree *tree, u64 start, u64 end)
91 {
92         struct inode *inode = tree->private_data;
93         u64 isize;
94
95         if (!inode || !is_data_inode(inode))
96                 return;
97
98         isize = i_size_read(inode);
99         if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
100                 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
101                     "%s: ino %llu isize %llu odd range [%llu,%llu]",
102                         caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
103         }
104 }
105 #else
106 #define btrfs_leak_debug_add(new, head) do {} while (0)
107 #define btrfs_leak_debug_del(entry)     do {} while (0)
108 #define btrfs_leak_debug_check()        do {} while (0)
109 #define btrfs_debug_check_extent_io_range(c, s, e)      do {} while (0)
110 #endif
111
112 struct tree_entry {
113         u64 start;
114         u64 end;
115         struct rb_node rb_node;
116 };
117
118 struct extent_page_data {
119         struct bio *bio;
120         struct extent_io_tree *tree;
121         /* tells writepage not to lock the state bits for this range
122          * it still does the unlocking
123          */
124         unsigned int extent_locked:1;
125
126         /* tells the submit_bio code to use REQ_SYNC */
127         unsigned int sync_io:1;
128 };
129
130 static int add_extent_changeset(struct extent_state *state, unsigned bits,
131                                  struct extent_changeset *changeset,
132                                  int set)
133 {
134         int ret;
135
136         if (!changeset)
137                 return 0;
138         if (set && (state->state & bits) == bits)
139                 return 0;
140         if (!set && (state->state & bits) == 0)
141                 return 0;
142         changeset->bytes_changed += state->end - state->start + 1;
143         ret = ulist_add(&changeset->range_changed, state->start, state->end,
144                         GFP_ATOMIC);
145         return ret;
146 }
147
148 static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
149                                        unsigned long bio_flags)
150 {
151         blk_status_t ret = 0;
152         struct extent_io_tree *tree = bio->bi_private;
153
154         bio->bi_private = NULL;
155
156         if (tree->ops)
157                 ret = tree->ops->submit_bio_hook(tree->private_data, bio,
158                                                  mirror_num, bio_flags);
159         else
160                 btrfsic_submit_bio(bio);
161
162         return blk_status_to_errno(ret);
163 }
164
165 /* Cleanup unsubmitted bios */
166 static void end_write_bio(struct extent_page_data *epd, int ret)
167 {
168         if (epd->bio) {
169                 epd->bio->bi_status = errno_to_blk_status(ret);
170                 bio_endio(epd->bio);
171                 epd->bio = NULL;
172         }
173 }
174
175 /*
176  * Submit bio from extent page data via submit_one_bio
177  *
178  * Return 0 if everything is OK.
179  * Return <0 for error.
180  */
181 static int __must_check flush_write_bio(struct extent_page_data *epd)
182 {
183         int ret = 0;
184
185         if (epd->bio) {
186                 ret = submit_one_bio(epd->bio, 0, 0);
187                 /*
188                  * Clean up of epd->bio is handled by its endio function.
189                  * And endio is either triggered by successful bio execution
190                  * or the error handler of submit bio hook.
191                  * So at this point, no matter what happened, we don't need
192                  * to clean up epd->bio.
193                  */
194                 epd->bio = NULL;
195         }
196         return ret;
197 }
198
199 int __init extent_io_init(void)
200 {
201         extent_state_cache = kmem_cache_create("btrfs_extent_state",
202                         sizeof(struct extent_state), 0,
203                         SLAB_MEM_SPREAD, NULL);
204         if (!extent_state_cache)
205                 return -ENOMEM;
206
207         extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
208                         sizeof(struct extent_buffer), 0,
209                         SLAB_MEM_SPREAD, NULL);
210         if (!extent_buffer_cache)
211                 goto free_state_cache;
212
213         if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
214                         offsetof(struct btrfs_io_bio, bio),
215                         BIOSET_NEED_BVECS))
216                 goto free_buffer_cache;
217
218         if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
219                 goto free_bioset;
220
221         return 0;
222
223 free_bioset:
224         bioset_exit(&btrfs_bioset);
225
226 free_buffer_cache:
227         kmem_cache_destroy(extent_buffer_cache);
228         extent_buffer_cache = NULL;
229
230 free_state_cache:
231         kmem_cache_destroy(extent_state_cache);
232         extent_state_cache = NULL;
233         return -ENOMEM;
234 }
235
236 void __cold extent_io_exit(void)
237 {
238         btrfs_leak_debug_check();
239
240         /*
241          * Make sure all delayed rcu free are flushed before we
242          * destroy caches.
243          */
244         rcu_barrier();
245         kmem_cache_destroy(extent_state_cache);
246         kmem_cache_destroy(extent_buffer_cache);
247         bioset_exit(&btrfs_bioset);
248 }
249
250 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
251                          struct extent_io_tree *tree, unsigned int owner,
252                          void *private_data)
253 {
254         tree->fs_info = fs_info;
255         tree->state = RB_ROOT;
256         tree->ops = NULL;
257         tree->dirty_bytes = 0;
258         spin_lock_init(&tree->lock);
259         tree->private_data = private_data;
260         tree->owner = owner;
261 }
262
263 void extent_io_tree_release(struct extent_io_tree *tree)
264 {
265         spin_lock(&tree->lock);
266         /*
267          * Do a single barrier for the waitqueue_active check here, the state
268          * of the waitqueue should not change once extent_io_tree_release is
269          * called.
270          */
271         smp_mb();
272         while (!RB_EMPTY_ROOT(&tree->state)) {
273                 struct rb_node *node;
274                 struct extent_state *state;
275
276                 node = rb_first(&tree->state);
277                 state = rb_entry(node, struct extent_state, rb_node);
278                 rb_erase(&state->rb_node, &tree->state);
279                 RB_CLEAR_NODE(&state->rb_node);
280                 /*
281                  * btree io trees aren't supposed to have tasks waiting for
282                  * changes in the flags of extent states ever.
283                  */
284                 ASSERT(!waitqueue_active(&state->wq));
285                 free_extent_state(state);
286
287                 cond_resched_lock(&tree->lock);
288         }
289         spin_unlock(&tree->lock);
290 }
291
292 static struct extent_state *alloc_extent_state(gfp_t mask)
293 {
294         struct extent_state *state;
295
296         /*
297          * The given mask might be not appropriate for the slab allocator,
298          * drop the unsupported bits
299          */
300         mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
301         state = kmem_cache_alloc(extent_state_cache, mask);
302         if (!state)
303                 return state;
304         state->state = 0;
305         state->failrec = NULL;
306         RB_CLEAR_NODE(&state->rb_node);
307         btrfs_leak_debug_add(&state->leak_list, &states);
308         refcount_set(&state->refs, 1);
309         init_waitqueue_head(&state->wq);
310         trace_alloc_extent_state(state, mask, _RET_IP_);
311         return state;
312 }
313
314 void free_extent_state(struct extent_state *state)
315 {
316         if (!state)
317                 return;
318         if (refcount_dec_and_test(&state->refs)) {
319                 WARN_ON(extent_state_in_tree(state));
320                 btrfs_leak_debug_del(&state->leak_list);
321                 trace_free_extent_state(state, _RET_IP_);
322                 kmem_cache_free(extent_state_cache, state);
323         }
324 }
325
326 static struct rb_node *tree_insert(struct rb_root *root,
327                                    struct rb_node *search_start,
328                                    u64 offset,
329                                    struct rb_node *node,
330                                    struct rb_node ***p_in,
331                                    struct rb_node **parent_in)
332 {
333         struct rb_node **p;
334         struct rb_node *parent = NULL;
335         struct tree_entry *entry;
336
337         if (p_in && parent_in) {
338                 p = *p_in;
339                 parent = *parent_in;
340                 goto do_insert;
341         }
342
343         p = search_start ? &search_start : &root->rb_node;
344         while (*p) {
345                 parent = *p;
346                 entry = rb_entry(parent, struct tree_entry, rb_node);
347
348                 if (offset < entry->start)
349                         p = &(*p)->rb_left;
350                 else if (offset > entry->end)
351                         p = &(*p)->rb_right;
352                 else
353                         return parent;
354         }
355
356 do_insert:
357         rb_link_node(node, parent, p);
358         rb_insert_color(node, root);
359         return NULL;
360 }
361
362 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
363                                       struct rb_node **next_ret,
364                                       struct rb_node **prev_ret,
365                                       struct rb_node ***p_ret,
366                                       struct rb_node **parent_ret)
367 {
368         struct rb_root *root = &tree->state;
369         struct rb_node **n = &root->rb_node;
370         struct rb_node *prev = NULL;
371         struct rb_node *orig_prev = NULL;
372         struct tree_entry *entry;
373         struct tree_entry *prev_entry = NULL;
374
375         while (*n) {
376                 prev = *n;
377                 entry = rb_entry(prev, struct tree_entry, rb_node);
378                 prev_entry = entry;
379
380                 if (offset < entry->start)
381                         n = &(*n)->rb_left;
382                 else if (offset > entry->end)
383                         n = &(*n)->rb_right;
384                 else
385                         return *n;
386         }
387
388         if (p_ret)
389                 *p_ret = n;
390         if (parent_ret)
391                 *parent_ret = prev;
392
393         if (next_ret) {
394                 orig_prev = prev;
395                 while (prev && offset > prev_entry->end) {
396                         prev = rb_next(prev);
397                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
398                 }
399                 *next_ret = prev;
400                 prev = orig_prev;
401         }
402
403         if (prev_ret) {
404                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
405                 while (prev && offset < prev_entry->start) {
406                         prev = rb_prev(prev);
407                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
408                 }
409                 *prev_ret = prev;
410         }
411         return NULL;
412 }
413
414 static inline struct rb_node *
415 tree_search_for_insert(struct extent_io_tree *tree,
416                        u64 offset,
417                        struct rb_node ***p_ret,
418                        struct rb_node **parent_ret)
419 {
420         struct rb_node *next= NULL;
421         struct rb_node *ret;
422
423         ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
424         if (!ret)
425                 return next;
426         return ret;
427 }
428
429 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
430                                           u64 offset)
431 {
432         return tree_search_for_insert(tree, offset, NULL, NULL);
433 }
434
435 /*
436  * utility function to look for merge candidates inside a given range.
437  * Any extents with matching state are merged together into a single
438  * extent in the tree.  Extents with EXTENT_IO in their state field
439  * are not merged because the end_io handlers need to be able to do
440  * operations on them without sleeping (or doing allocations/splits).
441  *
442  * This should be called with the tree lock held.
443  */
444 static void merge_state(struct extent_io_tree *tree,
445                         struct extent_state *state)
446 {
447         struct extent_state *other;
448         struct rb_node *other_node;
449
450         if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
451                 return;
452
453         other_node = rb_prev(&state->rb_node);
454         if (other_node) {
455                 other = rb_entry(other_node, struct extent_state, rb_node);
456                 if (other->end == state->start - 1 &&
457                     other->state == state->state) {
458                         if (tree->private_data &&
459                             is_data_inode(tree->private_data))
460                                 btrfs_merge_delalloc_extent(tree->private_data,
461                                                             state, other);
462                         state->start = other->start;
463                         rb_erase(&other->rb_node, &tree->state);
464                         RB_CLEAR_NODE(&other->rb_node);
465                         free_extent_state(other);
466                 }
467         }
468         other_node = rb_next(&state->rb_node);
469         if (other_node) {
470                 other = rb_entry(other_node, struct extent_state, rb_node);
471                 if (other->start == state->end + 1 &&
472                     other->state == state->state) {
473                         if (tree->private_data &&
474                             is_data_inode(tree->private_data))
475                                 btrfs_merge_delalloc_extent(tree->private_data,
476                                                             state, other);
477                         state->end = other->end;
478                         rb_erase(&other->rb_node, &tree->state);
479                         RB_CLEAR_NODE(&other->rb_node);
480                         free_extent_state(other);
481                 }
482         }
483 }
484
485 static void set_state_bits(struct extent_io_tree *tree,
486                            struct extent_state *state, unsigned *bits,
487                            struct extent_changeset *changeset);
488
489 /*
490  * insert an extent_state struct into the tree.  'bits' are set on the
491  * struct before it is inserted.
492  *
493  * This may return -EEXIST if the extent is already there, in which case the
494  * state struct is freed.
495  *
496  * The tree lock is not taken internally.  This is a utility function and
497  * probably isn't what you want to call (see set/clear_extent_bit).
498  */
499 static int insert_state(struct extent_io_tree *tree,
500                         struct extent_state *state, u64 start, u64 end,
501                         struct rb_node ***p,
502                         struct rb_node **parent,
503                         unsigned *bits, struct extent_changeset *changeset)
504 {
505         struct rb_node *node;
506
507         if (end < start)
508                 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
509                        end, start);
510         state->start = start;
511         state->end = end;
512
513         set_state_bits(tree, state, bits, changeset);
514
515         node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
516         if (node) {
517                 struct extent_state *found;
518                 found = rb_entry(node, struct extent_state, rb_node);
519                 pr_err("BTRFS: found node %llu %llu on insert of %llu %llu\n",
520                        found->start, found->end, start, end);
521                 return -EEXIST;
522         }
523         merge_state(tree, state);
524         return 0;
525 }
526
527 /*
528  * split a given extent state struct in two, inserting the preallocated
529  * struct 'prealloc' as the newly created second half.  'split' indicates an
530  * offset inside 'orig' where it should be split.
531  *
532  * Before calling,
533  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
534  * are two extent state structs in the tree:
535  * prealloc: [orig->start, split - 1]
536  * orig: [ split, orig->end ]
537  *
538  * The tree locks are not taken by this function. They need to be held
539  * by the caller.
540  */
541 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
542                        struct extent_state *prealloc, u64 split)
543 {
544         struct rb_node *node;
545
546         if (tree->private_data && is_data_inode(tree->private_data))
547                 btrfs_split_delalloc_extent(tree->private_data, orig, split);
548
549         prealloc->start = orig->start;
550         prealloc->end = split - 1;
551         prealloc->state = orig->state;
552         orig->start = split;
553
554         node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
555                            &prealloc->rb_node, NULL, NULL);
556         if (node) {
557                 free_extent_state(prealloc);
558                 return -EEXIST;
559         }
560         return 0;
561 }
562
563 static struct extent_state *next_state(struct extent_state *state)
564 {
565         struct rb_node *next = rb_next(&state->rb_node);
566         if (next)
567                 return rb_entry(next, struct extent_state, rb_node);
568         else
569                 return NULL;
570 }
571
572 /*
573  * utility function to clear some bits in an extent state struct.
574  * it will optionally wake up anyone waiting on this state (wake == 1).
575  *
576  * If no bits are set on the state struct after clearing things, the
577  * struct is freed and removed from the tree
578  */
579 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
580                                             struct extent_state *state,
581                                             unsigned *bits, int wake,
582                                             struct extent_changeset *changeset)
583 {
584         struct extent_state *next;
585         unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
586         int ret;
587
588         if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
589                 u64 range = state->end - state->start + 1;
590                 WARN_ON(range > tree->dirty_bytes);
591                 tree->dirty_bytes -= range;
592         }
593
594         if (tree->private_data && is_data_inode(tree->private_data))
595                 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
596
597         ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
598         BUG_ON(ret < 0);
599         state->state &= ~bits_to_clear;
600         if (wake)
601                 wake_up(&state->wq);
602         if (state->state == 0) {
603                 next = next_state(state);
604                 if (extent_state_in_tree(state)) {
605                         rb_erase(&state->rb_node, &tree->state);
606                         RB_CLEAR_NODE(&state->rb_node);
607                         free_extent_state(state);
608                 } else {
609                         WARN_ON(1);
610                 }
611         } else {
612                 merge_state(tree, state);
613                 next = next_state(state);
614         }
615         return next;
616 }
617
618 static struct extent_state *
619 alloc_extent_state_atomic(struct extent_state *prealloc)
620 {
621         if (!prealloc)
622                 prealloc = alloc_extent_state(GFP_ATOMIC);
623
624         return prealloc;
625 }
626
627 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
628 {
629         struct inode *inode = tree->private_data;
630
631         btrfs_panic(btrfs_sb(inode->i_sb), err,
632         "locking error: extent tree was modified by another thread while locked");
633 }
634
635 /*
636  * clear some bits on a range in the tree.  This may require splitting
637  * or inserting elements in the tree, so the gfp mask is used to
638  * indicate which allocations or sleeping are allowed.
639  *
640  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
641  * the given range from the tree regardless of state (ie for truncate).
642  *
643  * the range [start, end] is inclusive.
644  *
645  * This takes the tree lock, and returns 0 on success and < 0 on error.
646  */
647 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
648                               unsigned bits, int wake, int delete,
649                               struct extent_state **cached_state,
650                               gfp_t mask, struct extent_changeset *changeset)
651 {
652         struct extent_state *state;
653         struct extent_state *cached;
654         struct extent_state *prealloc = NULL;
655         struct rb_node *node;
656         u64 last_end;
657         int err;
658         int clear = 0;
659
660         btrfs_debug_check_extent_io_range(tree, start, end);
661         trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
662
663         if (bits & EXTENT_DELALLOC)
664                 bits |= EXTENT_NORESERVE;
665
666         if (delete)
667                 bits |= ~EXTENT_CTLBITS;
668
669         if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
670                 clear = 1;
671 again:
672         if (!prealloc && gfpflags_allow_blocking(mask)) {
673                 /*
674                  * Don't care for allocation failure here because we might end
675                  * up not needing the pre-allocated extent state at all, which
676                  * is the case if we only have in the tree extent states that
677                  * cover our input range and don't cover too any other range.
678                  * If we end up needing a new extent state we allocate it later.
679                  */
680                 prealloc = alloc_extent_state(mask);
681         }
682
683         spin_lock(&tree->lock);
684         if (cached_state) {
685                 cached = *cached_state;
686
687                 if (clear) {
688                         *cached_state = NULL;
689                         cached_state = NULL;
690                 }
691
692                 if (cached && extent_state_in_tree(cached) &&
693                     cached->start <= start && cached->end > start) {
694                         if (clear)
695                                 refcount_dec(&cached->refs);
696                         state = cached;
697                         goto hit_next;
698                 }
699                 if (clear)
700                         free_extent_state(cached);
701         }
702         /*
703          * this search will find the extents that end after
704          * our range starts
705          */
706         node = tree_search(tree, start);
707         if (!node)
708                 goto out;
709         state = rb_entry(node, struct extent_state, rb_node);
710 hit_next:
711         if (state->start > end)
712                 goto out;
713         WARN_ON(state->end < start);
714         last_end = state->end;
715
716         /* the state doesn't have the wanted bits, go ahead */
717         if (!(state->state & bits)) {
718                 state = next_state(state);
719                 goto next;
720         }
721
722         /*
723          *     | ---- desired range ---- |
724          *  | state | or
725          *  | ------------- state -------------- |
726          *
727          * We need to split the extent we found, and may flip
728          * bits on second half.
729          *
730          * If the extent we found extends past our range, we
731          * just split and search again.  It'll get split again
732          * the next time though.
733          *
734          * If the extent we found is inside our range, we clear
735          * the desired bit on it.
736          */
737
738         if (state->start < start) {
739                 prealloc = alloc_extent_state_atomic(prealloc);
740                 BUG_ON(!prealloc);
741                 err = split_state(tree, state, prealloc, start);
742                 if (err)
743                         extent_io_tree_panic(tree, err);
744
745                 prealloc = NULL;
746                 if (err)
747                         goto out;
748                 if (state->end <= end) {
749                         state = clear_state_bit(tree, state, &bits, wake,
750                                                 changeset);
751                         goto next;
752                 }
753                 goto search_again;
754         }
755         /*
756          * | ---- desired range ---- |
757          *                        | state |
758          * We need to split the extent, and clear the bit
759          * on the first half
760          */
761         if (state->start <= end && state->end > end) {
762                 prealloc = alloc_extent_state_atomic(prealloc);
763                 BUG_ON(!prealloc);
764                 err = split_state(tree, state, prealloc, end + 1);
765                 if (err)
766                         extent_io_tree_panic(tree, err);
767
768                 if (wake)
769                         wake_up(&state->wq);
770
771                 clear_state_bit(tree, prealloc, &bits, wake, changeset);
772
773                 prealloc = NULL;
774                 goto out;
775         }
776
777         state = clear_state_bit(tree, state, &bits, wake, changeset);
778 next:
779         if (last_end == (u64)-1)
780                 goto out;
781         start = last_end + 1;
782         if (start <= end && state && !need_resched())
783                 goto hit_next;
784
785 search_again:
786         if (start > end)
787                 goto out;
788         spin_unlock(&tree->lock);
789         if (gfpflags_allow_blocking(mask))
790                 cond_resched();
791         goto again;
792
793 out:
794         spin_unlock(&tree->lock);
795         if (prealloc)
796                 free_extent_state(prealloc);
797
798         return 0;
799
800 }
801
802 static void wait_on_state(struct extent_io_tree *tree,
803                           struct extent_state *state)
804                 __releases(tree->lock)
805                 __acquires(tree->lock)
806 {
807         DEFINE_WAIT(wait);
808         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
809         spin_unlock(&tree->lock);
810         schedule();
811         spin_lock(&tree->lock);
812         finish_wait(&state->wq, &wait);
813 }
814
815 /*
816  * waits for one or more bits to clear on a range in the state tree.
817  * The range [start, end] is inclusive.
818  * The tree lock is taken by this function
819  */
820 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
821                             unsigned long bits)
822 {
823         struct extent_state *state;
824         struct rb_node *node;
825
826         btrfs_debug_check_extent_io_range(tree, start, end);
827
828         spin_lock(&tree->lock);
829 again:
830         while (1) {
831                 /*
832                  * this search will find all the extents that end after
833                  * our range starts
834                  */
835                 node = tree_search(tree, start);
836 process_node:
837                 if (!node)
838                         break;
839
840                 state = rb_entry(node, struct extent_state, rb_node);
841
842                 if (state->start > end)
843                         goto out;
844
845                 if (state->state & bits) {
846                         start = state->start;
847                         refcount_inc(&state->refs);
848                         wait_on_state(tree, state);
849                         free_extent_state(state);
850                         goto again;
851                 }
852                 start = state->end + 1;
853
854                 if (start > end)
855                         break;
856
857                 if (!cond_resched_lock(&tree->lock)) {
858                         node = rb_next(node);
859                         goto process_node;
860                 }
861         }
862 out:
863         spin_unlock(&tree->lock);
864 }
865
866 static void set_state_bits(struct extent_io_tree *tree,
867                            struct extent_state *state,
868                            unsigned *bits, struct extent_changeset *changeset)
869 {
870         unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
871         int ret;
872
873         if (tree->private_data && is_data_inode(tree->private_data))
874                 btrfs_set_delalloc_extent(tree->private_data, state, bits);
875
876         if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
877                 u64 range = state->end - state->start + 1;
878                 tree->dirty_bytes += range;
879         }
880         ret = add_extent_changeset(state, bits_to_set, changeset, 1);
881         BUG_ON(ret < 0);
882         state->state |= bits_to_set;
883 }
884
885 static void cache_state_if_flags(struct extent_state *state,
886                                  struct extent_state **cached_ptr,
887                                  unsigned flags)
888 {
889         if (cached_ptr && !(*cached_ptr)) {
890                 if (!flags || (state->state & flags)) {
891                         *cached_ptr = state;
892                         refcount_inc(&state->refs);
893                 }
894         }
895 }
896
897 static void cache_state(struct extent_state *state,
898                         struct extent_state **cached_ptr)
899 {
900         return cache_state_if_flags(state, cached_ptr,
901                                     EXTENT_LOCKED | EXTENT_BOUNDARY);
902 }
903
904 /*
905  * set some bits on a range in the tree.  This may require allocations or
906  * sleeping, so the gfp mask is used to indicate what is allowed.
907  *
908  * If any of the exclusive bits are set, this will fail with -EEXIST if some
909  * part of the range already has the desired bits set.  The start of the
910  * existing range is returned in failed_start in this case.
911  *
912  * [start, end] is inclusive This takes the tree lock.
913  */
914
915 static int __must_check
916 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
917                  unsigned bits, unsigned exclusive_bits,
918                  u64 *failed_start, struct extent_state **cached_state,
919                  gfp_t mask, struct extent_changeset *changeset)
920 {
921         struct extent_state *state;
922         struct extent_state *prealloc = NULL;
923         struct rb_node *node;
924         struct rb_node **p;
925         struct rb_node *parent;
926         int err = 0;
927         u64 last_start;
928         u64 last_end;
929
930         btrfs_debug_check_extent_io_range(tree, start, end);
931         trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
932
933 again:
934         if (!prealloc && gfpflags_allow_blocking(mask)) {
935                 /*
936                  * Don't care for allocation failure here because we might end
937                  * up not needing the pre-allocated extent state at all, which
938                  * is the case if we only have in the tree extent states that
939                  * cover our input range and don't cover too any other range.
940                  * If we end up needing a new extent state we allocate it later.
941                  */
942                 prealloc = alloc_extent_state(mask);
943         }
944
945         spin_lock(&tree->lock);
946         if (cached_state && *cached_state) {
947                 state = *cached_state;
948                 if (state->start <= start && state->end > start &&
949                     extent_state_in_tree(state)) {
950                         node = &state->rb_node;
951                         goto hit_next;
952                 }
953         }
954         /*
955          * this search will find all the extents that end after
956          * our range starts.
957          */
958         node = tree_search_for_insert(tree, start, &p, &parent);
959         if (!node) {
960                 prealloc = alloc_extent_state_atomic(prealloc);
961                 BUG_ON(!prealloc);
962                 err = insert_state(tree, prealloc, start, end,
963                                    &p, &parent, &bits, changeset);
964                 if (err)
965                         extent_io_tree_panic(tree, err);
966
967                 cache_state(prealloc, cached_state);
968                 prealloc = NULL;
969                 goto out;
970         }
971         state = rb_entry(node, struct extent_state, rb_node);
972 hit_next:
973         last_start = state->start;
974         last_end = state->end;
975
976         /*
977          * | ---- desired range ---- |
978          * | state |
979          *
980          * Just lock what we found and keep going
981          */
982         if (state->start == start && state->end <= end) {
983                 if (state->state & exclusive_bits) {
984                         *failed_start = state->start;
985                         err = -EEXIST;
986                         goto out;
987                 }
988
989                 set_state_bits(tree, state, &bits, changeset);
990                 cache_state(state, cached_state);
991                 merge_state(tree, state);
992                 if (last_end == (u64)-1)
993                         goto out;
994                 start = last_end + 1;
995                 state = next_state(state);
996                 if (start < end && state && state->start == start &&
997                     !need_resched())
998                         goto hit_next;
999                 goto search_again;
1000         }
1001
1002         /*
1003          *     | ---- desired range ---- |
1004          * | state |
1005          *   or
1006          * | ------------- state -------------- |
1007          *
1008          * We need to split the extent we found, and may flip bits on
1009          * second half.
1010          *
1011          * If the extent we found extends past our
1012          * range, we just split and search again.  It'll get split
1013          * again the next time though.
1014          *
1015          * If the extent we found is inside our range, we set the
1016          * desired bit on it.
1017          */
1018         if (state->start < start) {
1019                 if (state->state & exclusive_bits) {
1020                         *failed_start = start;
1021                         err = -EEXIST;
1022                         goto out;
1023                 }
1024
1025                 prealloc = alloc_extent_state_atomic(prealloc);
1026                 BUG_ON(!prealloc);
1027                 err = split_state(tree, state, prealloc, start);
1028                 if (err)
1029                         extent_io_tree_panic(tree, err);
1030
1031                 prealloc = NULL;
1032                 if (err)
1033                         goto out;
1034                 if (state->end <= end) {
1035                         set_state_bits(tree, state, &bits, changeset);
1036                         cache_state(state, cached_state);
1037                         merge_state(tree, state);
1038                         if (last_end == (u64)-1)
1039                                 goto out;
1040                         start = last_end + 1;
1041                         state = next_state(state);
1042                         if (start < end && state && state->start == start &&
1043                             !need_resched())
1044                                 goto hit_next;
1045                 }
1046                 goto search_again;
1047         }
1048         /*
1049          * | ---- desired range ---- |
1050          *     | state | or               | state |
1051          *
1052          * There's a hole, we need to insert something in it and
1053          * ignore the extent we found.
1054          */
1055         if (state->start > start) {
1056                 u64 this_end;
1057                 if (end < last_start)
1058                         this_end = end;
1059                 else
1060                         this_end = last_start - 1;
1061
1062                 prealloc = alloc_extent_state_atomic(prealloc);
1063                 BUG_ON(!prealloc);
1064
1065                 /*
1066                  * Avoid to free 'prealloc' if it can be merged with
1067                  * the later extent.
1068                  */
1069                 err = insert_state(tree, prealloc, start, this_end,
1070                                    NULL, NULL, &bits, changeset);
1071                 if (err)
1072                         extent_io_tree_panic(tree, err);
1073
1074                 cache_state(prealloc, cached_state);
1075                 prealloc = NULL;
1076                 start = this_end + 1;
1077                 goto search_again;
1078         }
1079         /*
1080          * | ---- desired range ---- |
1081          *                        | state |
1082          * We need to split the extent, and set the bit
1083          * on the first half
1084          */
1085         if (state->start <= end && state->end > end) {
1086                 if (state->state & exclusive_bits) {
1087                         *failed_start = start;
1088                         err = -EEXIST;
1089                         goto out;
1090                 }
1091
1092                 prealloc = alloc_extent_state_atomic(prealloc);
1093                 BUG_ON(!prealloc);
1094                 err = split_state(tree, state, prealloc, end + 1);
1095                 if (err)
1096                         extent_io_tree_panic(tree, err);
1097
1098                 set_state_bits(tree, prealloc, &bits, changeset);
1099                 cache_state(prealloc, cached_state);
1100                 merge_state(tree, prealloc);
1101                 prealloc = NULL;
1102                 goto out;
1103         }
1104
1105 search_again:
1106         if (start > end)
1107                 goto out;
1108         spin_unlock(&tree->lock);
1109         if (gfpflags_allow_blocking(mask))
1110                 cond_resched();
1111         goto again;
1112
1113 out:
1114         spin_unlock(&tree->lock);
1115         if (prealloc)
1116                 free_extent_state(prealloc);
1117
1118         return err;
1119
1120 }
1121
1122 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1123                    unsigned bits, u64 * failed_start,
1124                    struct extent_state **cached_state, gfp_t mask)
1125 {
1126         return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1127                                 cached_state, mask, NULL);
1128 }
1129
1130
1131 /**
1132  * convert_extent_bit - convert all bits in a given range from one bit to
1133  *                      another
1134  * @tree:       the io tree to search
1135  * @start:      the start offset in bytes
1136  * @end:        the end offset in bytes (inclusive)
1137  * @bits:       the bits to set in this range
1138  * @clear_bits: the bits to clear in this range
1139  * @cached_state:       state that we're going to cache
1140  *
1141  * This will go through and set bits for the given range.  If any states exist
1142  * already in this range they are set with the given bit and cleared of the
1143  * clear_bits.  This is only meant to be used by things that are mergeable, ie
1144  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1145  * boundary bits like LOCK.
1146  *
1147  * All allocations are done with GFP_NOFS.
1148  */
1149 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1150                        unsigned bits, unsigned clear_bits,
1151                        struct extent_state **cached_state)
1152 {
1153         struct extent_state *state;
1154         struct extent_state *prealloc = NULL;
1155         struct rb_node *node;
1156         struct rb_node **p;
1157         struct rb_node *parent;
1158         int err = 0;
1159         u64 last_start;
1160         u64 last_end;
1161         bool first_iteration = true;
1162
1163         btrfs_debug_check_extent_io_range(tree, start, end);
1164         trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1165                                        clear_bits);
1166
1167 again:
1168         if (!prealloc) {
1169                 /*
1170                  * Best effort, don't worry if extent state allocation fails
1171                  * here for the first iteration. We might have a cached state
1172                  * that matches exactly the target range, in which case no
1173                  * extent state allocations are needed. We'll only know this
1174                  * after locking the tree.
1175                  */
1176                 prealloc = alloc_extent_state(GFP_NOFS);
1177                 if (!prealloc && !first_iteration)
1178                         return -ENOMEM;
1179         }
1180
1181         spin_lock(&tree->lock);
1182         if (cached_state && *cached_state) {
1183                 state = *cached_state;
1184                 if (state->start <= start && state->end > start &&
1185                     extent_state_in_tree(state)) {
1186                         node = &state->rb_node;
1187                         goto hit_next;
1188                 }
1189         }
1190
1191         /*
1192          * this search will find all the extents that end after
1193          * our range starts.
1194          */
1195         node = tree_search_for_insert(tree, start, &p, &parent);
1196         if (!node) {
1197                 prealloc = alloc_extent_state_atomic(prealloc);
1198                 if (!prealloc) {
1199                         err = -ENOMEM;
1200                         goto out;
1201                 }
1202                 err = insert_state(tree, prealloc, start, end,
1203                                    &p, &parent, &bits, NULL);
1204                 if (err)
1205                         extent_io_tree_panic(tree, err);
1206                 cache_state(prealloc, cached_state);
1207                 prealloc = NULL;
1208                 goto out;
1209         }
1210         state = rb_entry(node, struct extent_state, rb_node);
1211 hit_next:
1212         last_start = state->start;
1213         last_end = state->end;
1214
1215         /*
1216          * | ---- desired range ---- |
1217          * | state |
1218          *
1219          * Just lock what we found and keep going
1220          */
1221         if (state->start == start && state->end <= end) {
1222                 set_state_bits(tree, state, &bits, NULL);
1223                 cache_state(state, cached_state);
1224                 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1225                 if (last_end == (u64)-1)
1226                         goto out;
1227                 start = last_end + 1;
1228                 if (start < end && state && state->start == start &&
1229                     !need_resched())
1230                         goto hit_next;
1231                 goto search_again;
1232         }
1233
1234         /*
1235          *     | ---- desired range ---- |
1236          * | state |
1237          *   or
1238          * | ------------- state -------------- |
1239          *
1240          * We need to split the extent we found, and may flip bits on
1241          * second half.
1242          *
1243          * If the extent we found extends past our
1244          * range, we just split and search again.  It'll get split
1245          * again the next time though.
1246          *
1247          * If the extent we found is inside our range, we set the
1248          * desired bit on it.
1249          */
1250         if (state->start < start) {
1251                 prealloc = alloc_extent_state_atomic(prealloc);
1252                 if (!prealloc) {
1253                         err = -ENOMEM;
1254                         goto out;
1255                 }
1256                 err = split_state(tree, state, prealloc, start);
1257                 if (err)
1258                         extent_io_tree_panic(tree, err);
1259                 prealloc = NULL;
1260                 if (err)
1261                         goto out;
1262                 if (state->end <= end) {
1263                         set_state_bits(tree, state, &bits, NULL);
1264                         cache_state(state, cached_state);
1265                         state = clear_state_bit(tree, state, &clear_bits, 0,
1266                                                 NULL);
1267                         if (last_end == (u64)-1)
1268                                 goto out;
1269                         start = last_end + 1;
1270                         if (start < end && state && state->start == start &&
1271                             !need_resched())
1272                                 goto hit_next;
1273                 }
1274                 goto search_again;
1275         }
1276         /*
1277          * | ---- desired range ---- |
1278          *     | state | or               | state |
1279          *
1280          * There's a hole, we need to insert something in it and
1281          * ignore the extent we found.
1282          */
1283         if (state->start > start) {
1284                 u64 this_end;
1285                 if (end < last_start)
1286                         this_end = end;
1287                 else
1288                         this_end = last_start - 1;
1289
1290                 prealloc = alloc_extent_state_atomic(prealloc);
1291                 if (!prealloc) {
1292                         err = -ENOMEM;
1293                         goto out;
1294                 }
1295
1296                 /*
1297                  * Avoid to free 'prealloc' if it can be merged with
1298                  * the later extent.
1299                  */
1300                 err = insert_state(tree, prealloc, start, this_end,
1301                                    NULL, NULL, &bits, NULL);
1302                 if (err)
1303                         extent_io_tree_panic(tree, err);
1304                 cache_state(prealloc, cached_state);
1305                 prealloc = NULL;
1306                 start = this_end + 1;
1307                 goto search_again;
1308         }
1309         /*
1310          * | ---- desired range ---- |
1311          *                        | state |
1312          * We need to split the extent, and set the bit
1313          * on the first half
1314          */
1315         if (state->start <= end && state->end > end) {
1316                 prealloc = alloc_extent_state_atomic(prealloc);
1317                 if (!prealloc) {
1318                         err = -ENOMEM;
1319                         goto out;
1320                 }
1321
1322                 err = split_state(tree, state, prealloc, end + 1);
1323                 if (err)
1324                         extent_io_tree_panic(tree, err);
1325
1326                 set_state_bits(tree, prealloc, &bits, NULL);
1327                 cache_state(prealloc, cached_state);
1328                 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1329                 prealloc = NULL;
1330                 goto out;
1331         }
1332
1333 search_again:
1334         if (start > end)
1335                 goto out;
1336         spin_unlock(&tree->lock);
1337         cond_resched();
1338         first_iteration = false;
1339         goto again;
1340
1341 out:
1342         spin_unlock(&tree->lock);
1343         if (prealloc)
1344                 free_extent_state(prealloc);
1345
1346         return err;
1347 }
1348
1349 /* wrappers around set/clear extent bit */
1350 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1351                            unsigned bits, struct extent_changeset *changeset)
1352 {
1353         /*
1354          * We don't support EXTENT_LOCKED yet, as current changeset will
1355          * record any bits changed, so for EXTENT_LOCKED case, it will
1356          * either fail with -EEXIST or changeset will record the whole
1357          * range.
1358          */
1359         BUG_ON(bits & EXTENT_LOCKED);
1360
1361         return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1362                                 changeset);
1363 }
1364
1365 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1366                            unsigned bits)
1367 {
1368         return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1369                                 GFP_NOWAIT, NULL);
1370 }
1371
1372 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1373                      unsigned bits, int wake, int delete,
1374                      struct extent_state **cached)
1375 {
1376         return __clear_extent_bit(tree, start, end, bits, wake, delete,
1377                                   cached, GFP_NOFS, NULL);
1378 }
1379
1380 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1381                 unsigned bits, struct extent_changeset *changeset)
1382 {
1383         /*
1384          * Don't support EXTENT_LOCKED case, same reason as
1385          * set_record_extent_bits().
1386          */
1387         BUG_ON(bits & EXTENT_LOCKED);
1388
1389         return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1390                                   changeset);
1391 }
1392
1393 /*
1394  * either insert or lock state struct between start and end use mask to tell
1395  * us if waiting is desired.
1396  */
1397 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1398                      struct extent_state **cached_state)
1399 {
1400         int err;
1401         u64 failed_start;
1402
1403         while (1) {
1404                 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1405                                        EXTENT_LOCKED, &failed_start,
1406                                        cached_state, GFP_NOFS, NULL);
1407                 if (err == -EEXIST) {
1408                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1409                         start = failed_start;
1410                 } else
1411                         break;
1412                 WARN_ON(start > end);
1413         }
1414         return err;
1415 }
1416
1417 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1418 {
1419         int err;
1420         u64 failed_start;
1421
1422         err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1423                                &failed_start, NULL, GFP_NOFS, NULL);
1424         if (err == -EEXIST) {
1425                 if (failed_start > start)
1426                         clear_extent_bit(tree, start, failed_start - 1,
1427                                          EXTENT_LOCKED, 1, 0, NULL);
1428                 return 0;
1429         }
1430         return 1;
1431 }
1432
1433 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1434 {
1435         unsigned long index = start >> PAGE_SHIFT;
1436         unsigned long end_index = end >> PAGE_SHIFT;
1437         struct page *page;
1438
1439         while (index <= end_index) {
1440                 page = find_get_page(inode->i_mapping, index);
1441                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1442                 clear_page_dirty_for_io(page);
1443                 put_page(page);
1444                 index++;
1445         }
1446 }
1447
1448 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1449 {
1450         unsigned long index = start >> PAGE_SHIFT;
1451         unsigned long end_index = end >> PAGE_SHIFT;
1452         struct page *page;
1453
1454         while (index <= end_index) {
1455                 page = find_get_page(inode->i_mapping, index);
1456                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1457                 __set_page_dirty_nobuffers(page);
1458                 account_page_redirty(page);
1459                 put_page(page);
1460                 index++;
1461         }
1462 }
1463
1464 /* find the first state struct with 'bits' set after 'start', and
1465  * return it.  tree->lock must be held.  NULL will returned if
1466  * nothing was found after 'start'
1467  */
1468 static struct extent_state *
1469 find_first_extent_bit_state(struct extent_io_tree *tree,
1470                             u64 start, unsigned bits)
1471 {
1472         struct rb_node *node;
1473         struct extent_state *state;
1474
1475         /*
1476          * this search will find all the extents that end after
1477          * our range starts.
1478          */
1479         node = tree_search(tree, start);
1480         if (!node)
1481                 goto out;
1482
1483         while (1) {
1484                 state = rb_entry(node, struct extent_state, rb_node);
1485                 if (state->end >= start && (state->state & bits))
1486                         return state;
1487
1488                 node = rb_next(node);
1489                 if (!node)
1490                         break;
1491         }
1492 out:
1493         return NULL;
1494 }
1495
1496 /*
1497  * find the first offset in the io tree with 'bits' set. zero is
1498  * returned if we find something, and *start_ret and *end_ret are
1499  * set to reflect the state struct that was found.
1500  *
1501  * If nothing was found, 1 is returned. If found something, return 0.
1502  */
1503 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1504                           u64 *start_ret, u64 *end_ret, unsigned bits,
1505                           struct extent_state **cached_state)
1506 {
1507         struct extent_state *state;
1508         int ret = 1;
1509
1510         spin_lock(&tree->lock);
1511         if (cached_state && *cached_state) {
1512                 state = *cached_state;
1513                 if (state->end == start - 1 && extent_state_in_tree(state)) {
1514                         while ((state = next_state(state)) != NULL) {
1515                                 if (state->state & bits)
1516                                         goto got_it;
1517                         }
1518                         free_extent_state(*cached_state);
1519                         *cached_state = NULL;
1520                         goto out;
1521                 }
1522                 free_extent_state(*cached_state);
1523                 *cached_state = NULL;
1524         }
1525
1526         state = find_first_extent_bit_state(tree, start, bits);
1527 got_it:
1528         if (state) {
1529                 cache_state_if_flags(state, cached_state, 0);
1530                 *start_ret = state->start;
1531                 *end_ret = state->end;
1532                 ret = 0;
1533         }
1534 out:
1535         spin_unlock(&tree->lock);
1536         return ret;
1537 }
1538
1539 /**
1540  * find_first_clear_extent_bit - finds the first range that has @bits not set
1541  * and that starts after @start
1542  *
1543  * @tree - the tree to search
1544  * @start - the offset at/after which the found extent should start
1545  * @start_ret - records the beginning of the range
1546  * @end_ret - records the end of the range (inclusive)
1547  * @bits - the set of bits which must be unset
1548  *
1549  * Since unallocated range is also considered one which doesn't have the bits
1550  * set it's possible that @end_ret contains -1, this happens in case the range
1551  * spans (last_range_end, end of device]. In this case it's up to the caller to
1552  * trim @end_ret to the appropriate size.
1553  */
1554 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1555                                  u64 *start_ret, u64 *end_ret, unsigned bits)
1556 {
1557         struct extent_state *state;
1558         struct rb_node *node, *prev = NULL, *next;
1559
1560         spin_lock(&tree->lock);
1561
1562         /* Find first extent with bits cleared */
1563         while (1) {
1564                 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1565                 if (!node) {
1566                         node = next;
1567                         if (!node) {
1568                                 /*
1569                                  * We are past the last allocated chunk,
1570                                  * set start at the end of the last extent. The
1571                                  * device alloc tree should never be empty so
1572                                  * prev is always set.
1573                                  */
1574                                 ASSERT(prev);
1575                                 state = rb_entry(prev, struct extent_state, rb_node);
1576                                 *start_ret = state->end + 1;
1577                                 *end_ret = -1;
1578                                 goto out;
1579                         }
1580                 }
1581                 state = rb_entry(node, struct extent_state, rb_node);
1582                 if (in_range(start, state->start, state->end - state->start + 1) &&
1583                         (state->state & bits)) {
1584                         start = state->end + 1;
1585                 } else {
1586                         *start_ret = start;
1587                         break;
1588                 }
1589         }
1590
1591         /*
1592          * Find the longest stretch from start until an entry which has the
1593          * bits set
1594          */
1595         while (1) {
1596                 state = rb_entry(node, struct extent_state, rb_node);
1597                 if (state->end >= start && !(state->state & bits)) {
1598                         *end_ret = state->end;
1599                 } else {
1600                         *end_ret = state->start - 1;
1601                         break;
1602                 }
1603
1604                 node = rb_next(node);
1605                 if (!node)
1606                         break;
1607         }
1608 out:
1609         spin_unlock(&tree->lock);
1610 }
1611
1612 /*
1613  * find a contiguous range of bytes in the file marked as delalloc, not
1614  * more than 'max_bytes'.  start and end are used to return the range,
1615  *
1616  * true is returned if we find something, false if nothing was in the tree
1617  */
1618 static noinline bool find_delalloc_range(struct extent_io_tree *tree,
1619                                         u64 *start, u64 *end, u64 max_bytes,
1620                                         struct extent_state **cached_state)
1621 {
1622         struct rb_node *node;
1623         struct extent_state *state;
1624         u64 cur_start = *start;
1625         bool found = false;
1626         u64 total_bytes = 0;
1627
1628         spin_lock(&tree->lock);
1629
1630         /*
1631          * this search will find all the extents that end after
1632          * our range starts.
1633          */
1634         node = tree_search(tree, cur_start);
1635         if (!node) {
1636                 *end = (u64)-1;
1637                 goto out;
1638         }
1639
1640         while (1) {
1641                 state = rb_entry(node, struct extent_state, rb_node);
1642                 if (found && (state->start != cur_start ||
1643                               (state->state & EXTENT_BOUNDARY))) {
1644                         goto out;
1645                 }
1646                 if (!(state->state & EXTENT_DELALLOC)) {
1647                         if (!found)
1648                                 *end = state->end;
1649                         goto out;
1650                 }
1651                 if (!found) {
1652                         *start = state->start;
1653                         *cached_state = state;
1654                         refcount_inc(&state->refs);
1655                 }
1656                 found = true;
1657                 *end = state->end;
1658                 cur_start = state->end + 1;
1659                 node = rb_next(node);
1660                 total_bytes += state->end - state->start + 1;
1661                 if (total_bytes >= max_bytes)
1662                         break;
1663                 if (!node)
1664                         break;
1665         }
1666 out:
1667         spin_unlock(&tree->lock);
1668         return found;
1669 }
1670
1671 static int __process_pages_contig(struct address_space *mapping,
1672                                   struct page *locked_page,
1673                                   pgoff_t start_index, pgoff_t end_index,
1674                                   unsigned long page_ops, pgoff_t *index_ret);
1675
1676 static noinline void __unlock_for_delalloc(struct inode *inode,
1677                                            struct page *locked_page,
1678                                            u64 start, u64 end)
1679 {
1680         unsigned long index = start >> PAGE_SHIFT;
1681         unsigned long end_index = end >> PAGE_SHIFT;
1682
1683         ASSERT(locked_page);
1684         if (index == locked_page->index && end_index == index)
1685                 return;
1686
1687         __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1688                                PAGE_UNLOCK, NULL);
1689 }
1690
1691 static noinline int lock_delalloc_pages(struct inode *inode,
1692                                         struct page *locked_page,
1693                                         u64 delalloc_start,
1694                                         u64 delalloc_end)
1695 {
1696         unsigned long index = delalloc_start >> PAGE_SHIFT;
1697         unsigned long index_ret = index;
1698         unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1699         int ret;
1700
1701         ASSERT(locked_page);
1702         if (index == locked_page->index && index == end_index)
1703                 return 0;
1704
1705         ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1706                                      end_index, PAGE_LOCK, &index_ret);
1707         if (ret == -EAGAIN)
1708                 __unlock_for_delalloc(inode, locked_page, delalloc_start,
1709                                       (u64)index_ret << PAGE_SHIFT);
1710         return ret;
1711 }
1712
1713 /*
1714  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1715  * more than @max_bytes.  @Start and @end are used to return the range,
1716  *
1717  * Return: true if we find something
1718  *         false if nothing was in the tree
1719  */
1720 EXPORT_FOR_TESTS
1721 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1722                                     struct extent_io_tree *tree,
1723                                     struct page *locked_page, u64 *start,
1724                                     u64 *end)
1725 {
1726         u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
1727         u64 delalloc_start;
1728         u64 delalloc_end;
1729         bool found;
1730         struct extent_state *cached_state = NULL;
1731         int ret;
1732         int loops = 0;
1733
1734 again:
1735         /* step one, find a bunch of delalloc bytes starting at start */
1736         delalloc_start = *start;
1737         delalloc_end = 0;
1738         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1739                                     max_bytes, &cached_state);
1740         if (!found || delalloc_end <= *start) {
1741                 *start = delalloc_start;
1742                 *end = delalloc_end;
1743                 free_extent_state(cached_state);
1744                 return false;
1745         }
1746
1747         /*
1748          * start comes from the offset of locked_page.  We have to lock
1749          * pages in order, so we can't process delalloc bytes before
1750          * locked_page
1751          */
1752         if (delalloc_start < *start)
1753                 delalloc_start = *start;
1754
1755         /*
1756          * make sure to limit the number of pages we try to lock down
1757          */
1758         if (delalloc_end + 1 - delalloc_start > max_bytes)
1759                 delalloc_end = delalloc_start + max_bytes - 1;
1760
1761         /* step two, lock all the pages after the page that has start */
1762         ret = lock_delalloc_pages(inode, locked_page,
1763                                   delalloc_start, delalloc_end);
1764         ASSERT(!ret || ret == -EAGAIN);
1765         if (ret == -EAGAIN) {
1766                 /* some of the pages are gone, lets avoid looping by
1767                  * shortening the size of the delalloc range we're searching
1768                  */
1769                 free_extent_state(cached_state);
1770                 cached_state = NULL;
1771                 if (!loops) {
1772                         max_bytes = PAGE_SIZE;
1773                         loops = 1;
1774                         goto again;
1775                 } else {
1776                         found = false;
1777                         goto out_failed;
1778                 }
1779         }
1780
1781         /* step three, lock the state bits for the whole range */
1782         lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1783
1784         /* then test to make sure it is all still delalloc */
1785         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1786                              EXTENT_DELALLOC, 1, cached_state);
1787         if (!ret) {
1788                 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1789                                      &cached_state);
1790                 __unlock_for_delalloc(inode, locked_page,
1791                               delalloc_start, delalloc_end);
1792                 cond_resched();
1793                 goto again;
1794         }
1795         free_extent_state(cached_state);
1796         *start = delalloc_start;
1797         *end = delalloc_end;
1798 out_failed:
1799         return found;
1800 }
1801
1802 static int __process_pages_contig(struct address_space *mapping,
1803                                   struct page *locked_page,
1804                                   pgoff_t start_index, pgoff_t end_index,
1805                                   unsigned long page_ops, pgoff_t *index_ret)
1806 {
1807         unsigned long nr_pages = end_index - start_index + 1;
1808         unsigned long pages_locked = 0;
1809         pgoff_t index = start_index;
1810         struct page *pages[16];
1811         unsigned ret;
1812         int err = 0;
1813         int i;
1814
1815         if (page_ops & PAGE_LOCK) {
1816                 ASSERT(page_ops == PAGE_LOCK);
1817                 ASSERT(index_ret && *index_ret == start_index);
1818         }
1819
1820         if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1821                 mapping_set_error(mapping, -EIO);
1822
1823         while (nr_pages > 0) {
1824                 ret = find_get_pages_contig(mapping, index,
1825                                      min_t(unsigned long,
1826                                      nr_pages, ARRAY_SIZE(pages)), pages);
1827                 if (ret == 0) {
1828                         /*
1829                          * Only if we're going to lock these pages,
1830                          * can we find nothing at @index.
1831                          */
1832                         ASSERT(page_ops & PAGE_LOCK);
1833                         err = -EAGAIN;
1834                         goto out;
1835                 }
1836
1837                 for (i = 0; i < ret; i++) {
1838                         if (page_ops & PAGE_SET_PRIVATE2)
1839                                 SetPagePrivate2(pages[i]);
1840
1841                         if (pages[i] == locked_page) {
1842                                 put_page(pages[i]);
1843                                 pages_locked++;
1844                                 continue;
1845                         }
1846                         if (page_ops & PAGE_CLEAR_DIRTY)
1847                                 clear_page_dirty_for_io(pages[i]);
1848                         if (page_ops & PAGE_SET_WRITEBACK)
1849                                 set_page_writeback(pages[i]);
1850                         if (page_ops & PAGE_SET_ERROR)
1851                                 SetPageError(pages[i]);
1852                         if (page_ops & PAGE_END_WRITEBACK)
1853                                 end_page_writeback(pages[i]);
1854                         if (page_ops & PAGE_UNLOCK)
1855                                 unlock_page(pages[i]);
1856                         if (page_ops & PAGE_LOCK) {
1857                                 lock_page(pages[i]);
1858                                 if (!PageDirty(pages[i]) ||
1859                                     pages[i]->mapping != mapping) {
1860                                         unlock_page(pages[i]);
1861                                         put_page(pages[i]);
1862                                         err = -EAGAIN;
1863                                         goto out;
1864                                 }
1865                         }
1866                         put_page(pages[i]);
1867                         pages_locked++;
1868                 }
1869                 nr_pages -= ret;
1870                 index += ret;
1871                 cond_resched();
1872         }
1873 out:
1874         if (err && index_ret)
1875                 *index_ret = start_index + pages_locked - 1;
1876         return err;
1877 }
1878
1879 void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1880                                  u64 delalloc_end, struct page *locked_page,
1881                                  unsigned clear_bits,
1882                                  unsigned long page_ops)
1883 {
1884         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
1885                          NULL);
1886
1887         __process_pages_contig(inode->i_mapping, locked_page,
1888                                start >> PAGE_SHIFT, end >> PAGE_SHIFT,
1889                                page_ops, NULL);
1890 }
1891
1892 /*
1893  * count the number of bytes in the tree that have a given bit(s)
1894  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1895  * cached.  The total number found is returned.
1896  */
1897 u64 count_range_bits(struct extent_io_tree *tree,
1898                      u64 *start, u64 search_end, u64 max_bytes,
1899                      unsigned bits, int contig)
1900 {
1901         struct rb_node *node;
1902         struct extent_state *state;
1903         u64 cur_start = *start;
1904         u64 total_bytes = 0;
1905         u64 last = 0;
1906         int found = 0;
1907
1908         if (WARN_ON(search_end <= cur_start))
1909                 return 0;
1910
1911         spin_lock(&tree->lock);
1912         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1913                 total_bytes = tree->dirty_bytes;
1914                 goto out;
1915         }
1916         /*
1917          * this search will find all the extents that end after
1918          * our range starts.
1919          */
1920         node = tree_search(tree, cur_start);
1921         if (!node)
1922                 goto out;
1923
1924         while (1) {
1925                 state = rb_entry(node, struct extent_state, rb_node);
1926                 if (state->start > search_end)
1927                         break;
1928                 if (contig && found && state->start > last + 1)
1929                         break;
1930                 if (state->end >= cur_start && (state->state & bits) == bits) {
1931                         total_bytes += min(search_end, state->end) + 1 -
1932                                        max(cur_start, state->start);
1933                         if (total_bytes >= max_bytes)
1934                                 break;
1935                         if (!found) {
1936                                 *start = max(cur_start, state->start);
1937                                 found = 1;
1938                         }
1939                         last = state->end;
1940                 } else if (contig && found) {
1941                         break;
1942                 }
1943                 node = rb_next(node);
1944                 if (!node)
1945                         break;
1946         }
1947 out:
1948         spin_unlock(&tree->lock);
1949         return total_bytes;
1950 }
1951
1952 /*
1953  * set the private field for a given byte offset in the tree.  If there isn't
1954  * an extent_state there already, this does nothing.
1955  */
1956 static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
1957                 struct io_failure_record *failrec)
1958 {
1959         struct rb_node *node;
1960         struct extent_state *state;
1961         int ret = 0;
1962
1963         spin_lock(&tree->lock);
1964         /*
1965          * this search will find all the extents that end after
1966          * our range starts.
1967          */
1968         node = tree_search(tree, start);
1969         if (!node) {
1970                 ret = -ENOENT;
1971                 goto out;
1972         }
1973         state = rb_entry(node, struct extent_state, rb_node);
1974         if (state->start != start) {
1975                 ret = -ENOENT;
1976                 goto out;
1977         }
1978         state->failrec = failrec;
1979 out:
1980         spin_unlock(&tree->lock);
1981         return ret;
1982 }
1983
1984 static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
1985                 struct io_failure_record **failrec)
1986 {
1987         struct rb_node *node;
1988         struct extent_state *state;
1989         int ret = 0;
1990
1991         spin_lock(&tree->lock);
1992         /*
1993          * this search will find all the extents that end after
1994          * our range starts.
1995          */
1996         node = tree_search(tree, start);
1997         if (!node) {
1998                 ret = -ENOENT;
1999                 goto out;
2000         }
2001         state = rb_entry(node, struct extent_state, rb_node);
2002         if (state->start != start) {
2003                 ret = -ENOENT;
2004                 goto out;
2005         }
2006         *failrec = state->failrec;
2007 out:
2008         spin_unlock(&tree->lock);
2009         return ret;
2010 }
2011
2012 /*
2013  * searches a range in the state tree for a given mask.
2014  * If 'filled' == 1, this returns 1 only if every extent in the tree
2015  * has the bits set.  Otherwise, 1 is returned if any bit in the
2016  * range is found set.
2017  */
2018 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2019                    unsigned bits, int filled, struct extent_state *cached)
2020 {
2021         struct extent_state *state = NULL;
2022         struct rb_node *node;
2023         int bitset = 0;
2024
2025         spin_lock(&tree->lock);
2026         if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2027             cached->end > start)
2028                 node = &cached->rb_node;
2029         else
2030                 node = tree_search(tree, start);
2031         while (node && start <= end) {
2032                 state = rb_entry(node, struct extent_state, rb_node);
2033
2034                 if (filled && state->start > start) {
2035                         bitset = 0;
2036                         break;
2037                 }
2038
2039                 if (state->start > end)
2040                         break;
2041
2042                 if (state->state & bits) {
2043                         bitset = 1;
2044                         if (!filled)
2045                                 break;
2046                 } else if (filled) {
2047                         bitset = 0;
2048                         break;
2049                 }
2050
2051                 if (state->end == (u64)-1)
2052                         break;
2053
2054                 start = state->end + 1;
2055                 if (start > end)
2056                         break;
2057                 node = rb_next(node);
2058                 if (!node) {
2059                         if (filled)
2060                                 bitset = 0;
2061                         break;
2062                 }
2063         }
2064         spin_unlock(&tree->lock);
2065         return bitset;
2066 }
2067
2068 /*
2069  * helper function to set a given page up to date if all the
2070  * extents in the tree for that page are up to date
2071  */
2072 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
2073 {
2074         u64 start = page_offset(page);
2075         u64 end = start + PAGE_SIZE - 1;
2076         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2077                 SetPageUptodate(page);
2078 }
2079
2080 int free_io_failure(struct extent_io_tree *failure_tree,
2081                     struct extent_io_tree *io_tree,
2082                     struct io_failure_record *rec)
2083 {
2084         int ret;
2085         int err = 0;
2086
2087         set_state_failrec(failure_tree, rec->start, NULL);
2088         ret = clear_extent_bits(failure_tree, rec->start,
2089                                 rec->start + rec->len - 1,
2090                                 EXTENT_LOCKED | EXTENT_DIRTY);
2091         if (ret)
2092                 err = ret;
2093
2094         ret = clear_extent_bits(io_tree, rec->start,
2095                                 rec->start + rec->len - 1,
2096                                 EXTENT_DAMAGED);
2097         if (ret && !err)
2098                 err = ret;
2099
2100         kfree(rec);
2101         return err;
2102 }
2103
2104 /*
2105  * this bypasses the standard btrfs submit functions deliberately, as
2106  * the standard behavior is to write all copies in a raid setup. here we only
2107  * want to write the one bad copy. so we do the mapping for ourselves and issue
2108  * submit_bio directly.
2109  * to avoid any synchronization issues, wait for the data after writing, which
2110  * actually prevents the read that triggered the error from finishing.
2111  * currently, there can be no more than two copies of every data bit. thus,
2112  * exactly one rewrite is required.
2113  */
2114 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2115                       u64 length, u64 logical, struct page *page,
2116                       unsigned int pg_offset, int mirror_num)
2117 {
2118         struct bio *bio;
2119         struct btrfs_device *dev;
2120         u64 map_length = 0;
2121         u64 sector;
2122         struct btrfs_bio *bbio = NULL;
2123         int ret;
2124
2125         ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2126         BUG_ON(!mirror_num);
2127
2128         bio = btrfs_io_bio_alloc(1);
2129         bio->bi_iter.bi_size = 0;
2130         map_length = length;
2131
2132         /*
2133          * Avoid races with device replace and make sure our bbio has devices
2134          * associated to its stripes that don't go away while we are doing the
2135          * read repair operation.
2136          */
2137         btrfs_bio_counter_inc_blocked(fs_info);
2138         if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2139                 /*
2140                  * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2141                  * to update all raid stripes, but here we just want to correct
2142                  * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2143                  * stripe's dev and sector.
2144                  */
2145                 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2146                                       &map_length, &bbio, 0);
2147                 if (ret) {
2148                         btrfs_bio_counter_dec(fs_info);
2149                         bio_put(bio);
2150                         return -EIO;
2151                 }
2152                 ASSERT(bbio->mirror_num == 1);
2153         } else {
2154                 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2155                                       &map_length, &bbio, mirror_num);
2156                 if (ret) {
2157                         btrfs_bio_counter_dec(fs_info);
2158                         bio_put(bio);
2159                         return -EIO;
2160                 }
2161                 BUG_ON(mirror_num != bbio->mirror_num);
2162         }
2163
2164         sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
2165         bio->bi_iter.bi_sector = sector;
2166         dev = bbio->stripes[bbio->mirror_num - 1].dev;
2167         btrfs_put_bbio(bbio);
2168         if (!dev || !dev->bdev ||
2169             !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2170                 btrfs_bio_counter_dec(fs_info);
2171                 bio_put(bio);
2172                 return -EIO;
2173         }
2174         bio_set_dev(bio, dev->bdev);
2175         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2176         bio_add_page(bio, page, length, pg_offset);
2177
2178         if (btrfsic_submit_bio_wait(bio)) {
2179                 /* try to remap that extent elsewhere? */
2180                 btrfs_bio_counter_dec(fs_info);
2181                 bio_put(bio);
2182                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2183                 return -EIO;
2184         }
2185
2186         btrfs_info_rl_in_rcu(fs_info,
2187                 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2188                                   ino, start,
2189                                   rcu_str_deref(dev->name), sector);
2190         btrfs_bio_counter_dec(fs_info);
2191         bio_put(bio);
2192         return 0;
2193 }
2194
2195 int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num)
2196 {
2197         struct btrfs_fs_info *fs_info = eb->fs_info;
2198         u64 start = eb->start;
2199         int i, num_pages = num_extent_pages(eb);
2200         int ret = 0;
2201
2202         if (sb_rdonly(fs_info->sb))
2203                 return -EROFS;
2204
2205         for (i = 0; i < num_pages; i++) {
2206                 struct page *p = eb->pages[i];
2207
2208                 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2209                                         start - page_offset(p), mirror_num);
2210                 if (ret)
2211                         break;
2212                 start += PAGE_SIZE;
2213         }
2214
2215         return ret;
2216 }
2217
2218 /*
2219  * each time an IO finishes, we do a fast check in the IO failure tree
2220  * to see if we need to process or clean up an io_failure_record
2221  */
2222 int clean_io_failure(struct btrfs_fs_info *fs_info,
2223                      struct extent_io_tree *failure_tree,
2224                      struct extent_io_tree *io_tree, u64 start,
2225                      struct page *page, u64 ino, unsigned int pg_offset)
2226 {
2227         u64 private;
2228         struct io_failure_record *failrec;
2229         struct extent_state *state;
2230         int num_copies;
2231         int ret;
2232
2233         private = 0;
2234         ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2235                                EXTENT_DIRTY, 0);
2236         if (!ret)
2237                 return 0;
2238
2239         ret = get_state_failrec(failure_tree, start, &failrec);
2240         if (ret)
2241                 return 0;
2242
2243         BUG_ON(!failrec->this_mirror);
2244
2245         if (failrec->in_validation) {
2246                 /* there was no real error, just free the record */
2247                 btrfs_debug(fs_info,
2248                         "clean_io_failure: freeing dummy error at %llu",
2249                         failrec->start);
2250                 goto out;
2251         }
2252         if (sb_rdonly(fs_info->sb))
2253                 goto out;
2254
2255         spin_lock(&io_tree->lock);
2256         state = find_first_extent_bit_state(io_tree,
2257                                             failrec->start,
2258                                             EXTENT_LOCKED);
2259         spin_unlock(&io_tree->lock);
2260
2261         if (state && state->start <= failrec->start &&
2262             state->end >= failrec->start + failrec->len - 1) {
2263                 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2264                                               failrec->len);
2265                 if (num_copies > 1)  {
2266                         repair_io_failure(fs_info, ino, start, failrec->len,
2267                                           failrec->logical, page, pg_offset,
2268                                           failrec->failed_mirror);
2269                 }
2270         }
2271
2272 out:
2273         free_io_failure(failure_tree, io_tree, failrec);
2274
2275         return 0;
2276 }
2277
2278 /*
2279  * Can be called when
2280  * - hold extent lock
2281  * - under ordered extent
2282  * - the inode is freeing
2283  */
2284 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2285 {
2286         struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2287         struct io_failure_record *failrec;
2288         struct extent_state *state, *next;
2289
2290         if (RB_EMPTY_ROOT(&failure_tree->state))
2291                 return;
2292
2293         spin_lock(&failure_tree->lock);
2294         state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2295         while (state) {
2296                 if (state->start > end)
2297                         break;
2298
2299                 ASSERT(state->end <= end);
2300
2301                 next = next_state(state);
2302
2303                 failrec = state->failrec;
2304                 free_extent_state(state);
2305                 kfree(failrec);
2306
2307                 state = next;
2308         }
2309         spin_unlock(&failure_tree->lock);
2310 }
2311
2312 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2313                 struct io_failure_record **failrec_ret)
2314 {
2315         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2316         struct io_failure_record *failrec;
2317         struct extent_map *em;
2318         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2319         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2320         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2321         int ret;
2322         u64 logical;
2323
2324         ret = get_state_failrec(failure_tree, start, &failrec);
2325         if (ret) {
2326                 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2327                 if (!failrec)
2328                         return -ENOMEM;
2329
2330                 failrec->start = start;
2331                 failrec->len = end - start + 1;
2332                 failrec->this_mirror = 0;
2333                 failrec->bio_flags = 0;
2334                 failrec->in_validation = 0;
2335
2336                 read_lock(&em_tree->lock);
2337                 em = lookup_extent_mapping(em_tree, start, failrec->len);
2338                 if (!em) {
2339                         read_unlock(&em_tree->lock);
2340                         kfree(failrec);
2341                         return -EIO;
2342                 }
2343
2344                 if (em->start > start || em->start + em->len <= start) {
2345                         free_extent_map(em);
2346                         em = NULL;
2347                 }
2348                 read_unlock(&em_tree->lock);
2349                 if (!em) {
2350                         kfree(failrec);
2351                         return -EIO;
2352                 }
2353
2354                 logical = start - em->start;
2355                 logical = em->block_start + logical;
2356                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2357                         logical = em->block_start;
2358                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2359                         extent_set_compress_type(&failrec->bio_flags,
2360                                                  em->compress_type);
2361                 }
2362
2363                 btrfs_debug(fs_info,
2364                         "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2365                         logical, start, failrec->len);
2366
2367                 failrec->logical = logical;
2368                 free_extent_map(em);
2369
2370                 /* set the bits in the private failure tree */
2371                 ret = set_extent_bits(failure_tree, start, end,
2372                                         EXTENT_LOCKED | EXTENT_DIRTY);
2373                 if (ret >= 0)
2374                         ret = set_state_failrec(failure_tree, start, failrec);
2375                 /* set the bits in the inode's tree */
2376                 if (ret >= 0)
2377                         ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2378                 if (ret < 0) {
2379                         kfree(failrec);
2380                         return ret;
2381                 }
2382         } else {
2383                 btrfs_debug(fs_info,
2384                         "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2385                         failrec->logical, failrec->start, failrec->len,
2386                         failrec->in_validation);
2387                 /*
2388                  * when data can be on disk more than twice, add to failrec here
2389                  * (e.g. with a list for failed_mirror) to make
2390                  * clean_io_failure() clean all those errors at once.
2391                  */
2392         }
2393
2394         *failrec_ret = failrec;
2395
2396         return 0;
2397 }
2398
2399 bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
2400                            struct io_failure_record *failrec, int failed_mirror)
2401 {
2402         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2403         int num_copies;
2404
2405         num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2406         if (num_copies == 1) {
2407                 /*
2408                  * we only have a single copy of the data, so don't bother with
2409                  * all the retry and error correction code that follows. no
2410                  * matter what the error is, it is very likely to persist.
2411                  */
2412                 btrfs_debug(fs_info,
2413                         "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2414                         num_copies, failrec->this_mirror, failed_mirror);
2415                 return false;
2416         }
2417
2418         /*
2419          * there are two premises:
2420          *      a) deliver good data to the caller
2421          *      b) correct the bad sectors on disk
2422          */
2423         if (failed_bio_pages > 1) {
2424                 /*
2425                  * to fulfill b), we need to know the exact failing sectors, as
2426                  * we don't want to rewrite any more than the failed ones. thus,
2427                  * we need separate read requests for the failed bio
2428                  *
2429                  * if the following BUG_ON triggers, our validation request got
2430                  * merged. we need separate requests for our algorithm to work.
2431                  */
2432                 BUG_ON(failrec->in_validation);
2433                 failrec->in_validation = 1;
2434                 failrec->this_mirror = failed_mirror;
2435         } else {
2436                 /*
2437                  * we're ready to fulfill a) and b) alongside. get a good copy
2438                  * of the failed sector and if we succeed, we have setup
2439                  * everything for repair_io_failure to do the rest for us.
2440                  */
2441                 if (failrec->in_validation) {
2442                         BUG_ON(failrec->this_mirror != failed_mirror);
2443                         failrec->in_validation = 0;
2444                         failrec->this_mirror = 0;
2445                 }
2446                 failrec->failed_mirror = failed_mirror;
2447                 failrec->this_mirror++;
2448                 if (failrec->this_mirror == failed_mirror)
2449                         failrec->this_mirror++;
2450         }
2451
2452         if (failrec->this_mirror > num_copies) {
2453                 btrfs_debug(fs_info,
2454                         "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2455                         num_copies, failrec->this_mirror, failed_mirror);
2456                 return false;
2457         }
2458
2459         return true;
2460 }
2461
2462
2463 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2464                                     struct io_failure_record *failrec,
2465                                     struct page *page, int pg_offset, int icsum,
2466                                     bio_end_io_t *endio_func, void *data)
2467 {
2468         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2469         struct bio *bio;
2470         struct btrfs_io_bio *btrfs_failed_bio;
2471         struct btrfs_io_bio *btrfs_bio;
2472
2473         bio = btrfs_io_bio_alloc(1);
2474         bio->bi_end_io = endio_func;
2475         bio->bi_iter.bi_sector = failrec->logical >> 9;
2476         bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
2477         bio->bi_iter.bi_size = 0;
2478         bio->bi_private = data;
2479
2480         btrfs_failed_bio = btrfs_io_bio(failed_bio);
2481         if (btrfs_failed_bio->csum) {
2482                 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2483
2484                 btrfs_bio = btrfs_io_bio(bio);
2485                 btrfs_bio->csum = btrfs_bio->csum_inline;
2486                 icsum *= csum_size;
2487                 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2488                        csum_size);
2489         }
2490
2491         bio_add_page(bio, page, failrec->len, pg_offset);
2492
2493         return bio;
2494 }
2495
2496 /*
2497  * This is a generic handler for readpage errors. If other copies exist, read
2498  * those and write back good data to the failed position. Does not investigate
2499  * in remapping the failed extent elsewhere, hoping the device will be smart
2500  * enough to do this as needed
2501  */
2502 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2503                               struct page *page, u64 start, u64 end,
2504                               int failed_mirror)
2505 {
2506         struct io_failure_record *failrec;
2507         struct inode *inode = page->mapping->host;
2508         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2509         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2510         struct bio *bio;
2511         int read_mode = 0;
2512         blk_status_t status;
2513         int ret;
2514         unsigned failed_bio_pages = failed_bio->bi_iter.bi_size >> PAGE_SHIFT;
2515
2516         BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2517
2518         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2519         if (ret)
2520                 return ret;
2521
2522         if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
2523                                     failed_mirror)) {
2524                 free_io_failure(failure_tree, tree, failrec);
2525                 return -EIO;
2526         }
2527
2528         if (failed_bio_pages > 1)
2529                 read_mode |= REQ_FAILFAST_DEV;
2530
2531         phy_offset >>= inode->i_sb->s_blocksize_bits;
2532         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2533                                       start - page_offset(page),
2534                                       (int)phy_offset, failed_bio->bi_end_io,
2535                                       NULL);
2536         bio->bi_opf = REQ_OP_READ | read_mode;
2537
2538         btrfs_debug(btrfs_sb(inode->i_sb),
2539                 "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
2540                 read_mode, failrec->this_mirror, failrec->in_validation);
2541
2542         status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
2543                                          failrec->bio_flags);
2544         if (status) {
2545                 free_io_failure(failure_tree, tree, failrec);
2546                 bio_put(bio);
2547                 ret = blk_status_to_errno(status);
2548         }
2549
2550         return ret;
2551 }
2552
2553 /* lots and lots of room for performance fixes in the end_bio funcs */
2554
2555 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2556 {
2557         int uptodate = (err == 0);
2558         int ret = 0;
2559
2560         btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
2561
2562         if (!uptodate) {
2563                 ClearPageUptodate(page);
2564                 SetPageError(page);
2565                 ret = err < 0 ? err : -EIO;
2566                 mapping_set_error(page->mapping, ret);
2567         }
2568 }
2569
2570 /*
2571  * after a writepage IO is done, we need to:
2572  * clear the uptodate bits on error
2573  * clear the writeback bits in the extent tree for this IO
2574  * end_page_writeback if the page has no more pending IO
2575  *
2576  * Scheduling is not allowed, so the extent state tree is expected
2577  * to have one and only one object corresponding to this IO.
2578  */
2579 static void end_bio_extent_writepage(struct bio *bio)
2580 {
2581         int error = blk_status_to_errno(bio->bi_status);
2582         struct bio_vec *bvec;
2583         u64 start;
2584         u64 end;
2585         struct bvec_iter_all iter_all;
2586
2587         ASSERT(!bio_flagged(bio, BIO_CLONED));
2588         bio_for_each_segment_all(bvec, bio, iter_all) {
2589                 struct page *page = bvec->bv_page;
2590                 struct inode *inode = page->mapping->host;
2591                 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2592
2593                 /* We always issue full-page reads, but if some block
2594                  * in a page fails to read, blk_update_request() will
2595                  * advance bv_offset and adjust bv_len to compensate.
2596                  * Print a warning for nonzero offsets, and an error
2597                  * if they don't add up to a full page.  */
2598                 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2599                         if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2600                                 btrfs_err(fs_info,
2601                                    "partial page write in btrfs with offset %u and length %u",
2602                                         bvec->bv_offset, bvec->bv_len);
2603                         else
2604                                 btrfs_info(fs_info,
2605                                    "incomplete page write in btrfs with offset %u and length %u",
2606                                         bvec->bv_offset, bvec->bv_len);
2607                 }
2608
2609                 start = page_offset(page);
2610                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2611
2612                 end_extent_writepage(page, error, start, end);
2613                 end_page_writeback(page);
2614         }
2615
2616         bio_put(bio);
2617 }
2618
2619 static void
2620 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2621                               int uptodate)
2622 {
2623         struct extent_state *cached = NULL;
2624         u64 end = start + len - 1;
2625
2626         if (uptodate && tree->track_uptodate)
2627                 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2628         unlock_extent_cached_atomic(tree, start, end, &cached);
2629 }
2630
2631 /*
2632  * after a readpage IO is done, we need to:
2633  * clear the uptodate bits on error
2634  * set the uptodate bits if things worked
2635  * set the page up to date if all extents in the tree are uptodate
2636  * clear the lock bit in the extent tree
2637  * unlock the page if there are no other extents locked for it
2638  *
2639  * Scheduling is not allowed, so the extent state tree is expected
2640  * to have one and only one object corresponding to this IO.
2641  */
2642 static void end_bio_extent_readpage(struct bio *bio)
2643 {
2644         struct bio_vec *bvec;
2645         int uptodate = !bio->bi_status;
2646         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2647         struct extent_io_tree *tree, *failure_tree;
2648         u64 offset = 0;
2649         u64 start;
2650         u64 end;
2651         u64 len;
2652         u64 extent_start = 0;
2653         u64 extent_len = 0;
2654         int mirror;
2655         int ret;
2656         struct bvec_iter_all iter_all;
2657
2658         ASSERT(!bio_flagged(bio, BIO_CLONED));
2659         bio_for_each_segment_all(bvec, bio, iter_all) {
2660                 struct page *page = bvec->bv_page;
2661                 struct inode *inode = page->mapping->host;
2662                 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2663                 bool data_inode = btrfs_ino(BTRFS_I(inode))
2664                         != BTRFS_BTREE_INODE_OBJECTID;
2665
2666                 btrfs_debug(fs_info,
2667                         "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
2668                         (u64)bio->bi_iter.bi_sector, bio->bi_status,
2669                         io_bio->mirror_num);
2670                 tree = &BTRFS_I(inode)->io_tree;
2671                 failure_tree = &BTRFS_I(inode)->io_failure_tree;
2672
2673                 /* We always issue full-page reads, but if some block
2674                  * in a page fails to read, blk_update_request() will
2675                  * advance bv_offset and adjust bv_len to compensate.
2676                  * Print a warning for nonzero offsets, and an error
2677                  * if they don't add up to a full page.  */
2678                 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2679                         if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2680                                 btrfs_err(fs_info,
2681                                         "partial page read in btrfs with offset %u and length %u",
2682                                         bvec->bv_offset, bvec->bv_len);
2683                         else
2684                                 btrfs_info(fs_info,
2685                                         "incomplete page read in btrfs with offset %u and length %u",
2686                                         bvec->bv_offset, bvec->bv_len);
2687                 }
2688
2689                 start = page_offset(page);
2690                 end = start + bvec->bv_offset + bvec->bv_len - 1;
2691                 len = bvec->bv_len;
2692
2693                 mirror = io_bio->mirror_num;
2694                 if (likely(uptodate)) {
2695                         ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2696                                                               page, start, end,
2697                                                               mirror);
2698                         if (ret)
2699                                 uptodate = 0;
2700                         else
2701                                 clean_io_failure(BTRFS_I(inode)->root->fs_info,
2702                                                  failure_tree, tree, start,
2703                                                  page,
2704                                                  btrfs_ino(BTRFS_I(inode)), 0);
2705                 }
2706
2707                 if (likely(uptodate))
2708                         goto readpage_ok;
2709
2710                 if (data_inode) {
2711
2712                         /*
2713                          * The generic bio_readpage_error handles errors the
2714                          * following way: If possible, new read requests are
2715                          * created and submitted and will end up in
2716                          * end_bio_extent_readpage as well (if we're lucky,
2717                          * not in the !uptodate case). In that case it returns
2718                          * 0 and we just go on with the next page in our bio.
2719                          * If it can't handle the error it will return -EIO and
2720                          * we remain responsible for that page.
2721                          */
2722                         ret = bio_readpage_error(bio, offset, page, start, end,
2723                                                  mirror);
2724                         if (ret == 0) {
2725                                 uptodate = !bio->bi_status;
2726                                 offset += len;
2727                                 continue;
2728                         }
2729                 } else {
2730                         struct extent_buffer *eb;
2731
2732                         eb = (struct extent_buffer *)page->private;
2733                         set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
2734                         eb->read_mirror = mirror;
2735                         atomic_dec(&eb->io_pages);
2736                         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
2737                                                &eb->bflags))
2738                                 btree_readahead_hook(eb, -EIO);
2739                 }
2740 readpage_ok:
2741                 if (likely(uptodate)) {
2742                         loff_t i_size = i_size_read(inode);
2743                         pgoff_t end_index = i_size >> PAGE_SHIFT;
2744                         unsigned off;
2745
2746                         /* Zero out the end if this page straddles i_size */
2747                         off = offset_in_page(i_size);
2748                         if (page->index == end_index && off)
2749                                 zero_user_segment(page, off, PAGE_SIZE);
2750                         SetPageUptodate(page);
2751                 } else {
2752                         ClearPageUptodate(page);
2753                         SetPageError(page);
2754                 }
2755                 unlock_page(page);
2756                 offset += len;
2757
2758                 if (unlikely(!uptodate)) {
2759                         if (extent_len) {
2760                                 endio_readpage_release_extent(tree,
2761                                                               extent_start,
2762                                                               extent_len, 1);
2763                                 extent_start = 0;
2764                                 extent_len = 0;
2765                         }
2766                         endio_readpage_release_extent(tree, start,
2767                                                       end - start + 1, 0);
2768                 } else if (!extent_len) {
2769                         extent_start = start;
2770                         extent_len = end + 1 - start;
2771                 } else if (extent_start + extent_len == start) {
2772                         extent_len += end + 1 - start;
2773                 } else {
2774                         endio_readpage_release_extent(tree, extent_start,
2775                                                       extent_len, uptodate);
2776                         extent_start = start;
2777                         extent_len = end + 1 - start;
2778                 }
2779         }
2780
2781         if (extent_len)
2782                 endio_readpage_release_extent(tree, extent_start, extent_len,
2783                                               uptodate);
2784         btrfs_io_bio_free_csum(io_bio);
2785         bio_put(bio);
2786 }
2787
2788 /*
2789  * Initialize the members up to but not including 'bio'. Use after allocating a
2790  * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2791  * 'bio' because use of __GFP_ZERO is not supported.
2792  */
2793 static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
2794 {
2795         memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2796 }
2797
2798 /*
2799  * The following helpers allocate a bio. As it's backed by a bioset, it'll
2800  * never fail.  We're returning a bio right now but you can call btrfs_io_bio
2801  * for the appropriate container_of magic
2802  */
2803 struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
2804 {
2805         struct bio *bio;
2806
2807         bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
2808         bio_set_dev(bio, bdev);
2809         bio->bi_iter.bi_sector = first_byte >> 9;
2810         btrfs_io_bio_init(btrfs_io_bio(bio));
2811         return bio;
2812 }
2813
2814 struct bio *btrfs_bio_clone(struct bio *bio)
2815 {
2816         struct btrfs_io_bio *btrfs_bio;
2817         struct bio *new;
2818
2819         /* Bio allocation backed by a bioset does not fail */
2820         new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
2821         btrfs_bio = btrfs_io_bio(new);
2822         btrfs_io_bio_init(btrfs_bio);
2823         btrfs_bio->iter = bio->bi_iter;
2824         return new;
2825 }
2826
2827 struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
2828 {
2829         struct bio *bio;
2830
2831         /* Bio allocation backed by a bioset does not fail */
2832         bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
2833         btrfs_io_bio_init(btrfs_io_bio(bio));
2834         return bio;
2835 }
2836
2837 struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
2838 {
2839         struct bio *bio;
2840         struct btrfs_io_bio *btrfs_bio;
2841
2842         /* this will never fail when it's backed by a bioset */
2843         bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
2844         ASSERT(bio);
2845
2846         btrfs_bio = btrfs_io_bio(bio);
2847         btrfs_io_bio_init(btrfs_bio);
2848
2849         bio_trim(bio, offset >> 9, size >> 9);
2850         btrfs_bio->iter = bio->bi_iter;
2851         return bio;
2852 }
2853
2854 /*
2855  * @opf:        bio REQ_OP_* and REQ_* flags as one value
2856  * @tree:       tree so we can call our merge_bio hook
2857  * @wbc:        optional writeback control for io accounting
2858  * @page:       page to add to the bio
2859  * @pg_offset:  offset of the new bio or to check whether we are adding
2860  *              a contiguous page to the previous one
2861  * @size:       portion of page that we want to write
2862  * @offset:     starting offset in the page
2863  * @bdev:       attach newly created bios to this bdev
2864  * @bio_ret:    must be valid pointer, newly allocated bio will be stored there
2865  * @end_io_func:     end_io callback for new bio
2866  * @mirror_num:      desired mirror to read/write
2867  * @prev_bio_flags:  flags of previous bio to see if we can merge the current one
2868  * @bio_flags:  flags of the current bio to see if we can merge them
2869  */
2870 static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
2871                               struct writeback_control *wbc,
2872                               struct page *page, u64 offset,
2873                               size_t size, unsigned long pg_offset,
2874                               struct block_device *bdev,
2875                               struct bio **bio_ret,
2876                               bio_end_io_t end_io_func,
2877                               int mirror_num,
2878                               unsigned long prev_bio_flags,
2879                               unsigned long bio_flags,
2880                               bool force_bio_submit)
2881 {
2882         int ret = 0;
2883         struct bio *bio;
2884         size_t page_size = min_t(size_t, size, PAGE_SIZE);
2885         sector_t sector = offset >> 9;
2886
2887         ASSERT(bio_ret);
2888
2889         if (*bio_ret) {
2890                 bool contig;
2891                 bool can_merge = true;
2892
2893                 bio = *bio_ret;
2894                 if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
2895                         contig = bio->bi_iter.bi_sector == sector;
2896                 else
2897                         contig = bio_end_sector(bio) == sector;
2898
2899                 ASSERT(tree->ops);
2900                 if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
2901                         can_merge = false;
2902
2903                 if (prev_bio_flags != bio_flags || !contig || !can_merge ||
2904                     force_bio_submit ||
2905                     bio_add_page(bio, page, page_size, pg_offset) < page_size) {
2906                         ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
2907                         if (ret < 0) {
2908                                 *bio_ret = NULL;
2909                                 return ret;
2910                         }
2911                         bio = NULL;
2912                 } else {
2913                         if (wbc)
2914                                 wbc_account_io(wbc, page, page_size);
2915                         return 0;
2916                 }
2917         }
2918
2919         bio = btrfs_bio_alloc(bdev, offset);
2920         bio_add_page(bio, page, page_size, pg_offset);
2921         bio->bi_end_io = end_io_func;
2922         bio->bi_private = tree;
2923         bio->bi_write_hint = page->mapping->host->i_write_hint;
2924         bio->bi_opf = opf;
2925         if (wbc) {
2926                 wbc_init_bio(wbc, bio);
2927                 wbc_account_io(wbc, page, page_size);
2928         }
2929
2930         *bio_ret = bio;
2931
2932         return ret;
2933 }
2934
2935 static void attach_extent_buffer_page(struct extent_buffer *eb,
2936                                       struct page *page)
2937 {
2938         if (!PagePrivate(page)) {
2939                 SetPagePrivate(page);
2940                 get_page(page);
2941                 set_page_private(page, (unsigned long)eb);
2942         } else {
2943                 WARN_ON(page->private != (unsigned long)eb);
2944         }
2945 }
2946
2947 void set_page_extent_mapped(struct page *page)
2948 {
2949         if (!PagePrivate(page)) {
2950                 SetPagePrivate(page);
2951                 get_page(page);
2952                 set_page_private(page, EXTENT_PAGE_PRIVATE);
2953         }
2954 }
2955
2956 static struct extent_map *
2957 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2958                  u64 start, u64 len, get_extent_t *get_extent,
2959                  struct extent_map **em_cached)
2960 {
2961         struct extent_map *em;
2962
2963         if (em_cached && *em_cached) {
2964                 em = *em_cached;
2965                 if (extent_map_in_tree(em) && start >= em->start &&
2966                     start < extent_map_end(em)) {
2967                         refcount_inc(&em->refs);
2968                         return em;
2969                 }
2970
2971                 free_extent_map(em);
2972                 *em_cached = NULL;
2973         }
2974
2975         em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
2976         if (em_cached && !IS_ERR_OR_NULL(em)) {
2977                 BUG_ON(*em_cached);
2978                 refcount_inc(&em->refs);
2979                 *em_cached = em;
2980         }
2981         return em;
2982 }
2983 /*
2984  * basic readpage implementation.  Locked extent state structs are inserted
2985  * into the tree that are removed when the IO is done (by the end_io
2986  * handlers)
2987  * XXX JDM: This needs looking at to ensure proper page locking
2988  * return 0 on success, otherwise return error
2989  */
2990 static int __do_readpage(struct extent_io_tree *tree,
2991                          struct page *page,
2992                          get_extent_t *get_extent,
2993                          struct extent_map **em_cached,
2994                          struct bio **bio, int mirror_num,
2995                          unsigned long *bio_flags, unsigned int read_flags,
2996                          u64 *prev_em_start)
2997 {
2998         struct inode *inode = page->mapping->host;
2999         u64 start = page_offset(page);
3000         const u64 end = start + PAGE_SIZE - 1;
3001         u64 cur = start;
3002         u64 extent_offset;
3003         u64 last_byte = i_size_read(inode);
3004         u64 block_start;
3005         u64 cur_end;
3006         struct extent_map *em;
3007         struct block_device *bdev;
3008         int ret = 0;
3009         int nr = 0;
3010         size_t pg_offset = 0;
3011         size_t iosize;
3012         size_t disk_io_size;
3013         size_t blocksize = inode->i_sb->s_blocksize;
3014         unsigned long this_bio_flag = 0;
3015
3016         set_page_extent_mapped(page);
3017
3018         if (!PageUptodate(page)) {
3019                 if (cleancache_get_page(page) == 0) {
3020                         BUG_ON(blocksize != PAGE_SIZE);
3021                         unlock_extent(tree, start, end);
3022                         goto out;
3023                 }
3024         }
3025
3026         if (page->index == last_byte >> PAGE_SHIFT) {
3027                 char *userpage;
3028                 size_t zero_offset = offset_in_page(last_byte);
3029
3030                 if (zero_offset) {
3031                         iosize = PAGE_SIZE - zero_offset;
3032                         userpage = kmap_atomic(page);
3033                         memset(userpage + zero_offset, 0, iosize);
3034                         flush_dcache_page(page);
3035                         kunmap_atomic(userpage);
3036                 }
3037         }
3038         while (cur <= end) {
3039                 bool force_bio_submit = false;
3040                 u64 offset;
3041
3042                 if (cur >= last_byte) {
3043                         char *userpage;
3044                         struct extent_state *cached = NULL;
3045
3046                         iosize = PAGE_SIZE - pg_offset;
3047                         userpage = kmap_atomic(page);
3048                         memset(userpage + pg_offset, 0, iosize);
3049                         flush_dcache_page(page);
3050                         kunmap_atomic(userpage);
3051                         set_extent_uptodate(tree, cur, cur + iosize - 1,
3052                                             &cached, GFP_NOFS);
3053                         unlock_extent_cached(tree, cur,
3054                                              cur + iosize - 1, &cached);
3055                         break;
3056                 }
3057                 em = __get_extent_map(inode, page, pg_offset, cur,
3058                                       end - cur + 1, get_extent, em_cached);
3059                 if (IS_ERR_OR_NULL(em)) {
3060                         SetPageError(page);
3061                         unlock_extent(tree, cur, end);
3062                         break;
3063                 }
3064                 extent_offset = cur - em->start;
3065                 BUG_ON(extent_map_end(em) <= cur);
3066                 BUG_ON(end < cur);
3067
3068                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3069                         this_bio_flag |= EXTENT_BIO_COMPRESSED;
3070                         extent_set_compress_type(&this_bio_flag,
3071                                                  em->compress_type);
3072                 }
3073
3074                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3075                 cur_end = min(extent_map_end(em) - 1, end);
3076                 iosize = ALIGN(iosize, blocksize);
3077                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
3078                         disk_io_size = em->block_len;
3079                         offset = em->block_start;
3080                 } else {
3081                         offset = em->block_start + extent_offset;
3082                         disk_io_size = iosize;
3083                 }
3084                 bdev = em->bdev;
3085                 block_start = em->block_start;
3086                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3087                         block_start = EXTENT_MAP_HOLE;
3088
3089                 /*
3090                  * If we have a file range that points to a compressed extent
3091                  * and it's followed by a consecutive file range that points to
3092                  * to the same compressed extent (possibly with a different
3093                  * offset and/or length, so it either points to the whole extent
3094                  * or only part of it), we must make sure we do not submit a
3095                  * single bio to populate the pages for the 2 ranges because
3096                  * this makes the compressed extent read zero out the pages
3097                  * belonging to the 2nd range. Imagine the following scenario:
3098                  *
3099                  *  File layout
3100                  *  [0 - 8K]                     [8K - 24K]
3101                  *    |                               |
3102                  *    |                               |
3103                  * points to extent X,         points to extent X,
3104                  * offset 4K, length of 8K     offset 0, length 16K
3105                  *
3106                  * [extent X, compressed length = 4K uncompressed length = 16K]
3107                  *
3108                  * If the bio to read the compressed extent covers both ranges,
3109                  * it will decompress extent X into the pages belonging to the
3110                  * first range and then it will stop, zeroing out the remaining
3111                  * pages that belong to the other range that points to extent X.
3112                  * So here we make sure we submit 2 bios, one for the first
3113                  * range and another one for the third range. Both will target
3114                  * the same physical extent from disk, but we can't currently
3115                  * make the compressed bio endio callback populate the pages
3116                  * for both ranges because each compressed bio is tightly
3117                  * coupled with a single extent map, and each range can have
3118                  * an extent map with a different offset value relative to the
3119                  * uncompressed data of our extent and different lengths. This
3120                  * is a corner case so we prioritize correctness over
3121                  * non-optimal behavior (submitting 2 bios for the same extent).
3122                  */
3123                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3124                     prev_em_start && *prev_em_start != (u64)-1 &&
3125                     *prev_em_start != em->start)
3126                         force_bio_submit = true;
3127
3128                 if (prev_em_start)
3129                         *prev_em_start = em->start;
3130
3131                 free_extent_map(em);
3132                 em = NULL;
3133
3134                 /* we've found a hole, just zero and go on */
3135                 if (block_start == EXTENT_MAP_HOLE) {
3136                         char *userpage;
3137                         struct extent_state *cached = NULL;
3138
3139                         userpage = kmap_atomic(page);
3140                         memset(userpage + pg_offset, 0, iosize);
3141                         flush_dcache_page(page);
3142                         kunmap_atomic(userpage);
3143
3144                         set_extent_uptodate(tree, cur, cur + iosize - 1,
3145                                             &cached, GFP_NOFS);
3146                         unlock_extent_cached(tree, cur,
3147                                              cur + iosize - 1, &cached);
3148                         cur = cur + iosize;
3149                         pg_offset += iosize;
3150                         continue;
3151                 }
3152                 /* the get_extent function already copied into the page */
3153                 if (test_range_bit(tree, cur, cur_end,
3154                                    EXTENT_UPTODATE, 1, NULL)) {
3155                         check_page_uptodate(tree, page);
3156                         unlock_extent(tree, cur, cur + iosize - 1);
3157                         cur = cur + iosize;
3158                         pg_offset += iosize;
3159                         continue;
3160                 }
3161                 /* we have an inline extent but it didn't get marked up
3162                  * to date.  Error out
3163                  */
3164                 if (block_start == EXTENT_MAP_INLINE) {
3165                         SetPageError(page);
3166                         unlock_extent(tree, cur, cur + iosize - 1);
3167                         cur = cur + iosize;
3168                         pg_offset += iosize;
3169                         continue;
3170                 }
3171
3172                 ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
3173                                          page, offset, disk_io_size,
3174                                          pg_offset, bdev, bio,
3175                                          end_bio_extent_readpage, mirror_num,
3176                                          *bio_flags,
3177                                          this_bio_flag,
3178                                          force_bio_submit);
3179                 if (!ret) {
3180                         nr++;
3181                         *bio_flags = this_bio_flag;
3182                 } else {
3183                         SetPageError(page);
3184                         unlock_extent(tree, cur, cur + iosize - 1);
3185                         goto out;
3186                 }
3187                 cur = cur + iosize;
3188                 pg_offset += iosize;
3189         }
3190 out:
3191         if (!nr) {
3192                 if (!PageError(page))
3193                         SetPageUptodate(page);
3194                 unlock_page(page);
3195         }
3196         return ret;
3197 }
3198
3199 static inline void contiguous_readpages(struct extent_io_tree *tree,
3200                                              struct page *pages[], int nr_pages,
3201                                              u64 start, u64 end,
3202                                              struct extent_map **em_cached,
3203                                              struct bio **bio,
3204                                              unsigned long *bio_flags,
3205                                              u64 *prev_em_start)
3206 {
3207         struct inode *inode;
3208         struct btrfs_ordered_extent *ordered;
3209         int index;
3210
3211         inode = pages[0]->mapping->host;
3212         while (1) {
3213                 lock_extent(tree, start, end);
3214                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
3215                                                      end - start + 1);
3216                 if (!ordered)
3217                         break;
3218                 unlock_extent(tree, start, end);
3219                 btrfs_start_ordered_extent(inode, ordered, 1);
3220                 btrfs_put_ordered_extent(ordered);
3221         }
3222
3223         for (index = 0; index < nr_pages; index++) {
3224                 __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
3225                                 bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
3226                 put_page(pages[index]);
3227         }
3228 }
3229
3230 static int __extent_read_full_page(struct extent_io_tree *tree,
3231                                    struct page *page,
3232                                    get_extent_t *get_extent,
3233                                    struct bio **bio, int mirror_num,
3234                                    unsigned long *bio_flags,
3235                                    unsigned int read_flags)
3236 {
3237         struct inode *inode = page->mapping->host;
3238         struct btrfs_ordered_extent *ordered;
3239         u64 start = page_offset(page);
3240         u64 end = start + PAGE_SIZE - 1;
3241         int ret;
3242
3243         while (1) {
3244                 lock_extent(tree, start, end);
3245                 ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
3246                                                 PAGE_SIZE);
3247                 if (!ordered)
3248                         break;
3249                 unlock_extent(tree, start, end);
3250                 btrfs_start_ordered_extent(inode, ordered, 1);
3251                 btrfs_put_ordered_extent(ordered);
3252         }
3253
3254         ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3255                             bio_flags, read_flags, NULL);
3256         return ret;
3257 }
3258
3259 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3260                             get_extent_t *get_extent, int mirror_num)
3261 {
3262         struct bio *bio = NULL;
3263         unsigned long bio_flags = 0;
3264         int ret;
3265
3266         ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3267                                       &bio_flags, 0);
3268         if (bio)
3269                 ret = submit_one_bio(bio, mirror_num, bio_flags);
3270         return ret;
3271 }
3272
3273 static void update_nr_written(struct writeback_control *wbc,
3274                               unsigned long nr_written)
3275 {
3276         wbc->nr_to_write -= nr_written;
3277 }
3278
3279 /*
3280  * helper for __extent_writepage, doing all of the delayed allocation setup.
3281  *
3282  * This returns 1 if btrfs_run_delalloc_range function did all the work required
3283  * to write the page (copy into inline extent).  In this case the IO has
3284  * been started and the page is already unlocked.
3285  *
3286  * This returns 0 if all went well (page still locked)
3287  * This returns < 0 if there were errors (page still locked)
3288  */
3289 static noinline_for_stack int writepage_delalloc(struct inode *inode,
3290                 struct page *page, struct writeback_control *wbc,
3291                 u64 delalloc_start, unsigned long *nr_written)
3292 {
3293         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3294         u64 page_end = delalloc_start + PAGE_SIZE - 1;
3295         bool found;
3296         u64 delalloc_to_write = 0;
3297         u64 delalloc_end = 0;
3298         int ret;
3299         int page_started = 0;
3300
3301
3302         while (delalloc_end < page_end) {
3303                 found = find_lock_delalloc_range(inode, tree,
3304                                                page,
3305                                                &delalloc_start,
3306                                                &delalloc_end);
3307                 if (!found) {
3308                         delalloc_start = delalloc_end + 1;
3309                         continue;
3310                 }
3311                 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3312                                 delalloc_end, &page_started, nr_written, wbc);
3313                 if (ret) {
3314                         SetPageError(page);
3315                         /*
3316                          * btrfs_run_delalloc_range should return < 0 for error
3317                          * but just in case, we use > 0 here meaning the IO is
3318                          * started, so we don't want to return > 0 unless
3319                          * things are going well.
3320                          */
3321                         ret = ret < 0 ? ret : -EIO;
3322                         goto done;
3323                 }
3324                 /*
3325                  * delalloc_end is already one less than the total length, so
3326                  * we don't subtract one from PAGE_SIZE
3327                  */
3328                 delalloc_to_write += (delalloc_end - delalloc_start +
3329                                       PAGE_SIZE) >> PAGE_SHIFT;
3330                 delalloc_start = delalloc_end + 1;
3331         }
3332         if (wbc->nr_to_write < delalloc_to_write) {
3333                 int thresh = 8192;
3334
3335                 if (delalloc_to_write < thresh * 2)
3336                         thresh = delalloc_to_write;
3337                 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3338                                          thresh);
3339         }
3340
3341         /* did the fill delalloc function already unlock and start
3342          * the IO?
3343          */
3344         if (page_started) {
3345                 /*
3346                  * we've unlocked the page, so we can't update
3347                  * the mapping's writeback index, just update
3348                  * nr_to_write.
3349                  */
3350                 wbc->nr_to_write -= *nr_written;
3351                 return 1;
3352         }
3353
3354         ret = 0;
3355
3356 done:
3357         return ret;
3358 }
3359
3360 /*
3361  * helper for __extent_writepage.  This calls the writepage start hooks,
3362  * and does the loop to map the page into extents and bios.
3363  *
3364  * We return 1 if the IO is started and the page is unlocked,
3365  * 0 if all went well (page still locked)
3366  * < 0 if there were errors (page still locked)
3367  */
3368 static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3369                                  struct page *page,
3370                                  struct writeback_control *wbc,
3371                                  struct extent_page_data *epd,
3372                                  loff_t i_size,
3373                                  unsigned long nr_written,
3374                                  unsigned int write_flags, int *nr_ret)
3375 {
3376         struct extent_io_tree *tree = epd->tree;
3377         u64 start = page_offset(page);
3378         u64 page_end = start + PAGE_SIZE - 1;
3379         u64 end;
3380         u64 cur = start;
3381         u64 extent_offset;
3382         u64 block_start;
3383         u64 iosize;
3384         struct extent_map *em;
3385         struct block_device *bdev;
3386         size_t pg_offset = 0;
3387         size_t blocksize;
3388         int ret = 0;
3389         int nr = 0;
3390         bool compressed;
3391
3392         ret = btrfs_writepage_cow_fixup(page, start, page_end);
3393         if (ret) {
3394                 /* Fixup worker will requeue */
3395                 if (ret == -EBUSY)
3396                         wbc->pages_skipped++;
3397                 else
3398                         redirty_page_for_writepage(wbc, page);
3399
3400                 update_nr_written(wbc, nr_written);
3401                 unlock_page(page);
3402                 return 1;
3403         }
3404
3405         /*
3406          * we don't want to touch the inode after unlocking the page,
3407          * so we update the mapping writeback index now
3408          */
3409         update_nr_written(wbc, nr_written + 1);
3410
3411         end = page_end;
3412         if (i_size <= start) {
3413                 btrfs_writepage_endio_finish_ordered(page, start, page_end, 1);
3414                 goto done;
3415         }
3416
3417         blocksize = inode->i_sb->s_blocksize;
3418
3419         while (cur <= end) {
3420                 u64 em_end;
3421                 u64 offset;
3422
3423                 if (cur >= i_size) {
3424                         btrfs_writepage_endio_finish_ordered(page, cur,
3425                                                              page_end, 1);
3426                         break;
3427                 }
3428                 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
3429                                      end - cur + 1, 1);
3430                 if (IS_ERR_OR_NULL(em)) {
3431                         SetPageError(page);
3432                         ret = PTR_ERR_OR_ZERO(em);
3433                         break;
3434                 }
3435
3436                 extent_offset = cur - em->start;
3437                 em_end = extent_map_end(em);
3438                 BUG_ON(em_end <= cur);
3439                 BUG_ON(end < cur);
3440                 iosize = min(em_end - cur, end - cur + 1);
3441                 iosize = ALIGN(iosize, blocksize);
3442                 offset = em->block_start + extent_offset;
3443                 bdev = em->bdev;
3444                 block_start = em->block_start;
3445                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3446                 free_extent_map(em);
3447                 em = NULL;
3448
3449                 /*
3450                  * compressed and inline extents are written through other
3451                  * paths in the FS
3452                  */
3453                 if (compressed || block_start == EXTENT_MAP_HOLE ||
3454                     block_start == EXTENT_MAP_INLINE) {
3455                         /*
3456                          * end_io notification does not happen here for
3457                          * compressed extents
3458                          */
3459                         if (!compressed)
3460                                 btrfs_writepage_endio_finish_ordered(page, cur,
3461                                                             cur + iosize - 1,
3462                                                             1);
3463                         else if (compressed) {
3464                                 /* we don't want to end_page_writeback on
3465                                  * a compressed extent.  this happens
3466                                  * elsewhere
3467                                  */
3468                                 nr++;
3469                         }
3470
3471                         cur += iosize;
3472                         pg_offset += iosize;
3473                         continue;
3474                 }
3475
3476                 btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
3477                 if (!PageWriteback(page)) {
3478                         btrfs_err(BTRFS_I(inode)->root->fs_info,
3479                                    "page %lu not writeback, cur %llu end %llu",
3480                                page->index, cur, end);
3481                 }
3482
3483                 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
3484                                          page, offset, iosize, pg_offset,
3485                                          bdev, &epd->bio,
3486                                          end_bio_extent_writepage,
3487                                          0, 0, 0, false);
3488                 if (ret) {
3489                         SetPageError(page);
3490                         if (PageWriteback(page))
3491                                 end_page_writeback(page);
3492                 }
3493
3494                 cur = cur + iosize;
3495                 pg_offset += iosize;
3496                 nr++;
3497         }
3498 done:
3499         *nr_ret = nr;
3500         return ret;
3501 }
3502
3503 /*
3504  * the writepage semantics are similar to regular writepage.  extent
3505  * records are inserted to lock ranges in the tree, and as dirty areas
3506  * are found, they are marked writeback.  Then the lock bits are removed
3507  * and the end_io handler clears the writeback ranges
3508  *
3509  * Return 0 if everything goes well.
3510  * Return <0 for error.
3511  */
3512 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3513                               struct extent_page_data *epd)
3514 {
3515         struct inode *inode = page->mapping->host;
3516         u64 start = page_offset(page);
3517         u64 page_end = start + PAGE_SIZE - 1;
3518         int ret;
3519         int nr = 0;
3520         size_t pg_offset = 0;
3521         loff_t i_size = i_size_read(inode);
3522         unsigned long end_index = i_size >> PAGE_SHIFT;
3523         unsigned int write_flags = 0;
3524         unsigned long nr_written = 0;
3525
3526         write_flags = wbc_to_write_flags(wbc);
3527
3528         trace___extent_writepage(page, inode, wbc);
3529
3530         WARN_ON(!PageLocked(page));
3531
3532         ClearPageError(page);
3533
3534         pg_offset = offset_in_page(i_size);
3535         if (page->index > end_index ||
3536            (page->index == end_index && !pg_offset)) {
3537                 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3538                 unlock_page(page);
3539                 return 0;
3540         }
3541
3542         if (page->index == end_index) {
3543                 char *userpage;
3544
3545                 userpage = kmap_atomic(page);
3546                 memset(userpage + pg_offset, 0,
3547                        PAGE_SIZE - pg_offset);
3548                 kunmap_atomic(userpage);
3549                 flush_dcache_page(page);
3550         }
3551
3552         pg_offset = 0;
3553
3554         set_page_extent_mapped(page);
3555
3556         if (!epd->extent_locked) {
3557                 ret = writepage_delalloc(inode, page, wbc, start, &nr_written);
3558                 if (ret == 1)
3559                         goto done_unlocked;
3560                 if (ret)
3561                         goto done;
3562         }
3563
3564         ret = __extent_writepage_io(inode, page, wbc, epd,
3565                                     i_size, nr_written, write_flags, &nr);
3566         if (ret == 1)
3567                 goto done_unlocked;
3568
3569 done:
3570         if (nr == 0) {
3571                 /* make sure the mapping tag for page dirty gets cleared */
3572                 set_page_writeback(page);
3573                 end_page_writeback(page);
3574         }
3575         if (PageError(page)) {
3576                 ret = ret < 0 ? ret : -EIO;
3577                 end_extent_writepage(page, ret, start, page_end);
3578         }
3579         unlock_page(page);
3580         ASSERT(ret <= 0);
3581         return ret;
3582
3583 done_unlocked:
3584         return 0;
3585 }
3586
3587 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3588 {
3589         wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3590                        TASK_UNINTERRUPTIBLE);
3591 }
3592
3593 /*
3594  * Lock eb pages and flush the bio if we can't the locks
3595  *
3596  * Return  0 if nothing went wrong
3597  * Return >0 is same as 0, except bio is not submitted
3598  * Return <0 if something went wrong, no page is locked
3599  */
3600 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
3601                           struct extent_page_data *epd)
3602 {
3603         struct btrfs_fs_info *fs_info = eb->fs_info;
3604         int i, num_pages, failed_page_nr;
3605         int flush = 0;
3606         int ret = 0;
3607
3608         if (!btrfs_try_tree_write_lock(eb)) {
3609                 ret = flush_write_bio(epd);
3610                 if (ret < 0)
3611                         return ret;
3612                 flush = 1;
3613                 btrfs_tree_lock(eb);
3614         }
3615
3616         if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3617                 btrfs_tree_unlock(eb);
3618                 if (!epd->sync_io)
3619                         return 0;
3620                 if (!flush) {
3621                         ret = flush_write_bio(epd);
3622                         if (ret < 0)
3623                                 return ret;
3624                         flush = 1;
3625                 }
3626                 while (1) {
3627                         wait_on_extent_buffer_writeback(eb);
3628                         btrfs_tree_lock(eb);
3629                         if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3630                                 break;
3631                         btrfs_tree_unlock(eb);
3632                 }
3633         }
3634
3635         /*
3636          * We need to do this to prevent races in people who check if the eb is
3637          * under IO since we can end up having no IO bits set for a short period
3638          * of time.
3639          */
3640         spin_lock(&eb->refs_lock);
3641         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3642                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3643                 spin_unlock(&eb->refs_lock);
3644                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3645                 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3646                                          -eb->len,
3647                                          fs_info->dirty_metadata_batch);
3648                 ret = 1;
3649         } else {
3650                 spin_unlock(&eb->refs_lock);
3651         }
3652
3653         btrfs_tree_unlock(eb);
3654
3655         if (!ret)
3656                 return ret;
3657
3658         num_pages = num_extent_pages(eb);
3659         for (i = 0; i < num_pages; i++) {
3660                 struct page *p = eb->pages[i];
3661
3662                 if (!trylock_page(p)) {
3663                         if (!flush) {
3664                                 ret = flush_write_bio(epd);
3665                                 if (ret < 0) {
3666                                         failed_page_nr = i;
3667                                         goto err_unlock;
3668                                 }
3669                                 flush = 1;
3670                         }
3671                         lock_page(p);
3672                 }
3673         }
3674
3675         return ret;
3676 err_unlock:
3677         /* Unlock already locked pages */
3678         for (i = 0; i < failed_page_nr; i++)
3679                 unlock_page(eb->pages[i]);
3680         return ret;
3681 }
3682
3683 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3684 {
3685         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3686         smp_mb__after_atomic();
3687         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3688 }
3689
3690 static void set_btree_ioerr(struct page *page)
3691 {
3692         struct extent_buffer *eb = (struct extent_buffer *)page->private;
3693
3694         SetPageError(page);
3695         if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3696                 return;
3697
3698         /*
3699          * If writeback for a btree extent that doesn't belong to a log tree
3700          * failed, increment the counter transaction->eb_write_errors.
3701          * We do this because while the transaction is running and before it's
3702          * committing (when we call filemap_fdata[write|wait]_range against
3703          * the btree inode), we might have
3704          * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3705          * returns an error or an error happens during writeback, when we're
3706          * committing the transaction we wouldn't know about it, since the pages
3707          * can be no longer dirty nor marked anymore for writeback (if a
3708          * subsequent modification to the extent buffer didn't happen before the
3709          * transaction commit), which makes filemap_fdata[write|wait]_range not
3710          * able to find the pages tagged with SetPageError at transaction
3711          * commit time. So if this happens we must abort the transaction,
3712          * otherwise we commit a super block with btree roots that point to
3713          * btree nodes/leafs whose content on disk is invalid - either garbage
3714          * or the content of some node/leaf from a past generation that got
3715          * cowed or deleted and is no longer valid.
3716          *
3717          * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3718          * not be enough - we need to distinguish between log tree extents vs
3719          * non-log tree extents, and the next filemap_fdatawait_range() call
3720          * will catch and clear such errors in the mapping - and that call might
3721          * be from a log sync and not from a transaction commit. Also, checking
3722          * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3723          * not done and would not be reliable - the eb might have been released
3724          * from memory and reading it back again means that flag would not be
3725          * set (since it's a runtime flag, not persisted on disk).
3726          *
3727          * Using the flags below in the btree inode also makes us achieve the
3728          * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3729          * writeback for all dirty pages and before filemap_fdatawait_range()
3730          * is called, the writeback for all dirty pages had already finished
3731          * with errors - because we were not using AS_EIO/AS_ENOSPC,
3732          * filemap_fdatawait_range() would return success, as it could not know
3733          * that writeback errors happened (the pages were no longer tagged for
3734          * writeback).
3735          */
3736         switch (eb->log_index) {
3737         case -1:
3738                 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
3739                 break;
3740         case 0:
3741                 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
3742                 break;
3743         case 1:
3744                 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
3745                 break;
3746         default:
3747                 BUG(); /* unexpected, logic error */
3748         }
3749 }
3750
3751 static void end_bio_extent_buffer_writepage(struct bio *bio)
3752 {
3753         struct bio_vec *bvec;
3754         struct extent_buffer *eb;
3755         int done;
3756         struct bvec_iter_all iter_all;
3757
3758         ASSERT(!bio_flagged(bio, BIO_CLONED));
3759         bio_for_each_segment_all(bvec, bio, iter_all) {
3760                 struct page *page = bvec->bv_page;
3761
3762                 eb = (struct extent_buffer *)page->private;
3763                 BUG_ON(!eb);
3764                 done = atomic_dec_and_test(&eb->io_pages);
3765
3766                 if (bio->bi_status ||
3767                     test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3768                         ClearPageUptodate(page);
3769                         set_btree_ioerr(page);
3770                 }
3771
3772                 end_page_writeback(page);
3773
3774                 if (!done)
3775                         continue;
3776
3777                 end_extent_buffer_writeback(eb);
3778         }
3779
3780         bio_put(bio);
3781 }
3782
3783 static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3784                         struct writeback_control *wbc,
3785                         struct extent_page_data *epd)
3786 {
3787         struct btrfs_fs_info *fs_info = eb->fs_info;
3788         struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3789         struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3790         u64 offset = eb->start;
3791         u32 nritems;
3792         int i, num_pages;
3793         unsigned long start, end;
3794         unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
3795         int ret = 0;
3796
3797         clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3798         num_pages = num_extent_pages(eb);
3799         atomic_set(&eb->io_pages, num_pages);
3800
3801         /* set btree blocks beyond nritems with 0 to avoid stale content. */
3802         nritems = btrfs_header_nritems(eb);
3803         if (btrfs_header_level(eb) > 0) {
3804                 end = btrfs_node_key_ptr_offset(nritems);
3805
3806                 memzero_extent_buffer(eb, end, eb->len - end);
3807         } else {
3808                 /*
3809                  * leaf:
3810                  * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3811                  */
3812                 start = btrfs_item_nr_offset(nritems);
3813                 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
3814                 memzero_extent_buffer(eb, start, end - start);
3815         }
3816
3817         for (i = 0; i < num_pages; i++) {
3818                 struct page *p = eb->pages[i];
3819
3820                 clear_page_dirty_for_io(p);
3821                 set_page_writeback(p);
3822                 ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
3823                                          p, offset, PAGE_SIZE, 0, bdev,
3824                                          &epd->bio,
3825                                          end_bio_extent_buffer_writepage,
3826                                          0, 0, 0, false);
3827                 if (ret) {
3828                         set_btree_ioerr(p);
3829                         if (PageWriteback(p))
3830                                 end_page_writeback(p);
3831                         if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3832                                 end_extent_buffer_writeback(eb);
3833                         ret = -EIO;
3834                         break;
3835                 }
3836                 offset += PAGE_SIZE;
3837                 update_nr_written(wbc, 1);
3838                 unlock_page(p);
3839         }
3840
3841         if (unlikely(ret)) {
3842                 for (; i < num_pages; i++) {
3843                         struct page *p = eb->pages[i];
3844                         clear_page_dirty_for_io(p);
3845                         unlock_page(p);
3846                 }
3847         }
3848
3849         return ret;
3850 }
3851
3852 int btree_write_cache_pages(struct address_space *mapping,
3853                                    struct writeback_control *wbc)
3854 {
3855         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3856         struct extent_buffer *eb, *prev_eb = NULL;
3857         struct extent_page_data epd = {
3858                 .bio = NULL,
3859                 .tree = tree,
3860                 .extent_locked = 0,
3861                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3862         };
3863         int ret = 0;
3864         int done = 0;
3865         int nr_to_write_done = 0;
3866         struct pagevec pvec;
3867         int nr_pages;
3868         pgoff_t index;
3869         pgoff_t end;            /* Inclusive */
3870         int scanned = 0;
3871         xa_mark_t tag;
3872
3873         pagevec_init(&pvec);
3874         if (wbc->range_cyclic) {
3875                 index = mapping->writeback_index; /* Start from prev offset */
3876                 end = -1;
3877         } else {
3878                 index = wbc->range_start >> PAGE_SHIFT;
3879                 end = wbc->range_end >> PAGE_SHIFT;
3880                 scanned = 1;
3881         }
3882         if (wbc->sync_mode == WB_SYNC_ALL)
3883                 tag = PAGECACHE_TAG_TOWRITE;
3884         else
3885                 tag = PAGECACHE_TAG_DIRTY;
3886 retry:
3887         if (wbc->sync_mode == WB_SYNC_ALL)
3888                 tag_pages_for_writeback(mapping, index, end);
3889         while (!done && !nr_to_write_done && (index <= end) &&
3890                (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
3891                         tag))) {
3892                 unsigned i;
3893
3894                 scanned = 1;
3895                 for (i = 0; i < nr_pages; i++) {
3896                         struct page *page = pvec.pages[i];
3897
3898                         if (!PagePrivate(page))
3899                                 continue;
3900
3901                         spin_lock(&mapping->private_lock);
3902                         if (!PagePrivate(page)) {
3903                                 spin_unlock(&mapping->private_lock);
3904                                 continue;
3905                         }
3906
3907                         eb = (struct extent_buffer *)page->private;
3908
3909                         /*
3910                          * Shouldn't happen and normally this would be a BUG_ON
3911                          * but no sense in crashing the users box for something
3912                          * we can survive anyway.
3913                          */
3914                         if (WARN_ON(!eb)) {
3915                                 spin_unlock(&mapping->private_lock);
3916                                 continue;
3917                         }
3918
3919                         if (eb == prev_eb) {
3920                                 spin_unlock(&mapping->private_lock);
3921                                 continue;
3922                         }
3923
3924                         ret = atomic_inc_not_zero(&eb->refs);
3925                         spin_unlock(&mapping->private_lock);
3926                         if (!ret)
3927                                 continue;
3928
3929                         prev_eb = eb;
3930                         ret = lock_extent_buffer_for_io(eb, &epd);
3931                         if (!ret) {
3932                                 free_extent_buffer(eb);
3933                                 continue;
3934                         }
3935
3936                         ret = write_one_eb(eb, wbc, &epd);
3937                         if (ret) {
3938                                 done = 1;
3939                                 free_extent_buffer(eb);
3940                                 break;
3941                         }
3942                         free_extent_buffer(eb);
3943
3944                         /*
3945                          * the filesystem may choose to bump up nr_to_write.
3946                          * We have to make sure to honor the new nr_to_write
3947                          * at any time
3948                          */
3949                         nr_to_write_done = wbc->nr_to_write <= 0;
3950                 }
3951                 pagevec_release(&pvec);
3952                 cond_resched();
3953         }
3954         if (!scanned && !done) {
3955                 /*
3956                  * We hit the last page and there is more work to be done: wrap
3957                  * back to the start of the file
3958                  */
3959                 scanned = 1;
3960                 index = 0;
3961                 goto retry;
3962         }
3963         ASSERT(ret <= 0);
3964         if (ret < 0) {
3965                 end_write_bio(&epd, ret);
3966                 return ret;
3967         }
3968         ret = flush_write_bio(&epd);
3969         return ret;
3970 }
3971
3972 /**
3973  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3974  * @mapping: address space structure to write
3975  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3976  * @data: data passed to __extent_writepage function
3977  *
3978  * If a page is already under I/O, write_cache_pages() skips it, even
3979  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
3980  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
3981  * and msync() need to guarantee that all the data which was dirty at the time
3982  * the call was made get new I/O started against them.  If wbc->sync_mode is
3983  * WB_SYNC_ALL then we were called for data integrity and we must wait for
3984  * existing IO to complete.
3985  */
3986 static int extent_write_cache_pages(struct address_space *mapping,
3987                              struct writeback_control *wbc,
3988                              struct extent_page_data *epd)
3989 {
3990         struct inode *inode = mapping->host;
3991         int ret = 0;
3992         int done = 0;
3993         int nr_to_write_done = 0;
3994         struct pagevec pvec;
3995         int nr_pages;
3996         pgoff_t index;
3997         pgoff_t end;            /* Inclusive */
3998         pgoff_t done_index;
3999         int range_whole = 0;
4000         int scanned = 0;
4001         xa_mark_t tag;
4002
4003         /*
4004          * We have to hold onto the inode so that ordered extents can do their
4005          * work when the IO finishes.  The alternative to this is failing to add
4006          * an ordered extent if the igrab() fails there and that is a huge pain
4007          * to deal with, so instead just hold onto the inode throughout the
4008          * writepages operation.  If it fails here we are freeing up the inode
4009          * anyway and we'd rather not waste our time writing out stuff that is
4010          * going to be truncated anyway.
4011          */
4012         if (!igrab(inode))
4013                 return 0;
4014
4015         pagevec_init(&pvec);
4016         if (wbc->range_cyclic) {
4017                 index = mapping->writeback_index; /* Start from prev offset */
4018                 end = -1;
4019         } else {
4020                 index = wbc->range_start >> PAGE_SHIFT;
4021                 end = wbc->range_end >> PAGE_SHIFT;
4022                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4023                         range_whole = 1;
4024                 scanned = 1;
4025         }
4026
4027         /*
4028          * We do the tagged writepage as long as the snapshot flush bit is set
4029          * and we are the first one who do the filemap_flush() on this inode.
4030          *
4031          * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4032          * not race in and drop the bit.
4033          */
4034         if (range_whole && wbc->nr_to_write == LONG_MAX &&
4035             test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4036                                &BTRFS_I(inode)->runtime_flags))
4037                 wbc->tagged_writepages = 1;
4038
4039         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4040                 tag = PAGECACHE_TAG_TOWRITE;
4041         else
4042                 tag = PAGECACHE_TAG_DIRTY;
4043 retry:
4044         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4045                 tag_pages_for_writeback(mapping, index, end);
4046         done_index = index;
4047         while (!done && !nr_to_write_done && (index <= end) &&
4048                         (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4049                                                 &index, end, tag))) {
4050                 unsigned i;
4051
4052                 scanned = 1;
4053                 for (i = 0; i < nr_pages; i++) {
4054                         struct page *page = pvec.pages[i];
4055
4056                         done_index = page->index;
4057                         /*
4058                          * At this point we hold neither the i_pages lock nor
4059                          * the page lock: the page may be truncated or
4060                          * invalidated (changing page->mapping to NULL),
4061                          * or even swizzled back from swapper_space to
4062                          * tmpfs file mapping
4063                          */
4064                         if (!trylock_page(page)) {
4065                                 ret = flush_write_bio(epd);
4066                                 BUG_ON(ret < 0);
4067                                 lock_page(page);
4068                         }
4069
4070                         if (unlikely(page->mapping != mapping)) {
4071                                 unlock_page(page);
4072                                 continue;
4073                         }
4074
4075                         if (wbc->sync_mode != WB_SYNC_NONE) {
4076                                 if (PageWriteback(page)) {
4077                                         ret = flush_write_bio(epd);
4078                                         BUG_ON(ret < 0);
4079                                 }
4080                                 wait_on_page_writeback(page);
4081                         }
4082
4083                         if (PageWriteback(page) ||
4084                             !clear_page_dirty_for_io(page)) {
4085                                 unlock_page(page);
4086                                 continue;
4087                         }
4088
4089                         ret = __extent_writepage(page, wbc, epd);
4090                         if (ret < 0) {
4091                                 /*
4092                                  * done_index is set past this page,
4093                                  * so media errors will not choke
4094                                  * background writeout for the entire
4095                                  * file. This has consequences for
4096                                  * range_cyclic semantics (ie. it may
4097                                  * not be suitable for data integrity
4098                                  * writeout).
4099                                  */
4100                                 done_index = page->index + 1;
4101                                 done = 1;
4102                                 break;
4103                         }
4104
4105                         /*
4106                          * the filesystem may choose to bump up nr_to_write.
4107                          * We have to make sure to honor the new nr_to_write
4108                          * at any time
4109                          */
4110                         nr_to_write_done = wbc->nr_to_write <= 0;
4111                 }
4112                 pagevec_release(&pvec);
4113                 cond_resched();
4114         }
4115         if (!scanned && !done) {
4116                 /*
4117                  * We hit the last page and there is more work to be done: wrap
4118                  * back to the start of the file
4119                  */
4120                 scanned = 1;
4121                 index = 0;
4122                 goto retry;
4123         }
4124
4125         if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4126                 mapping->writeback_index = done_index;
4127
4128         btrfs_add_delayed_iput(inode);
4129         return ret;
4130 }
4131
4132 int extent_write_full_page(struct page *page, struct writeback_control *wbc)
4133 {
4134         int ret;
4135         struct extent_page_data epd = {
4136                 .bio = NULL,
4137                 .tree = &BTRFS_I(page->mapping->host)->io_tree,
4138                 .extent_locked = 0,
4139                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4140         };
4141
4142         ret = __extent_writepage(page, wbc, &epd);
4143         ASSERT(ret <= 0);
4144         if (ret < 0) {
4145                 end_write_bio(&epd, ret);
4146                 return ret;
4147         }
4148
4149         ret = flush_write_bio(&epd);
4150         ASSERT(ret <= 0);
4151         return ret;
4152 }
4153
4154 int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4155                               int mode)
4156 {
4157         int ret = 0;
4158         struct address_space *mapping = inode->i_mapping;
4159         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
4160         struct page *page;
4161         unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4162                 PAGE_SHIFT;
4163
4164         struct extent_page_data epd = {
4165                 .bio = NULL,
4166                 .tree = tree,
4167                 .extent_locked = 1,
4168                 .sync_io = mode == WB_SYNC_ALL,
4169         };
4170         struct writeback_control wbc_writepages = {
4171                 .sync_mode      = mode,
4172                 .nr_to_write    = nr_pages * 2,
4173                 .range_start    = start,
4174                 .range_end      = end + 1,
4175         };
4176
4177         while (start <= end) {
4178                 page = find_get_page(mapping, start >> PAGE_SHIFT);
4179                 if (clear_page_dirty_for_io(page))
4180                         ret = __extent_writepage(page, &wbc_writepages, &epd);
4181                 else {
4182                         btrfs_writepage_endio_finish_ordered(page, start,
4183                                                     start + PAGE_SIZE - 1, 1);
4184                         unlock_page(page);
4185                 }
4186                 put_page(page);
4187                 start += PAGE_SIZE;
4188         }
4189
4190         ASSERT(ret <= 0);
4191         if (ret < 0) {
4192                 end_write_bio(&epd, ret);
4193                 return ret;
4194         }
4195         ret = flush_write_bio(&epd);
4196         return ret;
4197 }
4198
4199 int extent_writepages(struct address_space *mapping,
4200                       struct writeback_control *wbc)
4201 {
4202         int ret = 0;
4203         struct extent_page_data epd = {
4204                 .bio = NULL,
4205                 .tree = &BTRFS_I(mapping->host)->io_tree,
4206                 .extent_locked = 0,
4207                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4208         };
4209
4210         ret = extent_write_cache_pages(mapping, wbc, &epd);
4211         ASSERT(ret <= 0);
4212         if (ret < 0) {
4213                 end_write_bio(&epd, ret);
4214                 return ret;
4215         }
4216         ret = flush_write_bio(&epd);
4217         return ret;
4218 }
4219
4220 int extent_readpages(struct address_space *mapping, struct list_head *pages,
4221                      unsigned nr_pages)
4222 {
4223         struct bio *bio = NULL;
4224         unsigned long bio_flags = 0;
4225         struct page *pagepool[16];
4226         struct extent_map *em_cached = NULL;
4227         struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
4228         int nr = 0;
4229         u64 prev_em_start = (u64)-1;
4230
4231         while (!list_empty(pages)) {
4232                 u64 contig_end = 0;
4233
4234                 for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
4235                         struct page *page = lru_to_page(pages);
4236
4237                         prefetchw(&page->flags);
4238                         list_del(&page->lru);
4239                         if (add_to_page_cache_lru(page, mapping, page->index,
4240                                                 readahead_gfp_mask(mapping))) {
4241                                 put_page(page);
4242                                 break;
4243                         }
4244
4245                         pagepool[nr++] = page;
4246                         contig_end = page_offset(page) + PAGE_SIZE - 1;
4247                 }
4248
4249                 if (nr) {
4250                         u64 contig_start = page_offset(pagepool[0]);
4251
4252                         ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
4253
4254                         contiguous_readpages(tree, pagepool, nr, contig_start,
4255                                      contig_end, &em_cached, &bio, &bio_flags,
4256                                      &prev_em_start);
4257                 }
4258         }
4259
4260         if (em_cached)
4261                 free_extent_map(em_cached);
4262
4263         if (bio)
4264                 return submit_one_bio(bio, 0, bio_flags);
4265         return 0;
4266 }
4267
4268 /*
4269  * basic invalidatepage code, this waits on any locked or writeback
4270  * ranges corresponding to the page, and then deletes any extent state
4271  * records from the tree
4272  */
4273 int extent_invalidatepage(struct extent_io_tree *tree,
4274                           struct page *page, unsigned long offset)
4275 {
4276         struct extent_state *cached_state = NULL;
4277         u64 start = page_offset(page);
4278         u64 end = start + PAGE_SIZE - 1;
4279         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4280
4281         start += ALIGN(offset, blocksize);
4282         if (start > end)
4283                 return 0;
4284
4285         lock_extent_bits(tree, start, end, &cached_state);
4286         wait_on_page_writeback(page);
4287         clear_extent_bit(tree, start, end,
4288                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4289                          EXTENT_DO_ACCOUNTING,
4290                          1, 1, &cached_state);
4291         return 0;
4292 }
4293
4294 /*
4295  * a helper for releasepage, this tests for areas of the page that
4296  * are locked or under IO and drops the related state bits if it is safe
4297  * to drop the page.
4298  */
4299 static int try_release_extent_state(struct extent_io_tree *tree,
4300                                     struct page *page, gfp_t mask)
4301 {
4302         u64 start = page_offset(page);
4303         u64 end = start + PAGE_SIZE - 1;
4304         int ret = 1;
4305
4306         if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
4307                 ret = 0;
4308         } else {
4309                 /*
4310                  * at this point we can safely clear everything except the
4311                  * locked bit and the nodatasum bit
4312                  */
4313                 ret = __clear_extent_bit(tree, start, end,
4314                                  ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4315                                  0, 0, NULL, mask, NULL);
4316
4317                 /* if clear_extent_bit failed for enomem reasons,
4318                  * we can't allow the release to continue.
4319                  */
4320                 if (ret < 0)
4321                         ret = 0;
4322                 else
4323                         ret = 1;
4324         }
4325         return ret;
4326 }
4327
4328 /*
4329  * a helper for releasepage.  As long as there are no locked extents
4330  * in the range corresponding to the page, both state records and extent
4331  * map records are removed
4332  */
4333 int try_release_extent_mapping(struct page *page, gfp_t mask)
4334 {
4335         struct extent_map *em;
4336         u64 start = page_offset(page);
4337         u64 end = start + PAGE_SIZE - 1;
4338         struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4339         struct extent_io_tree *tree = &btrfs_inode->io_tree;
4340         struct extent_map_tree *map = &btrfs_inode->extent_tree;
4341
4342         if (gfpflags_allow_blocking(mask) &&
4343             page->mapping->host->i_size > SZ_16M) {
4344                 u64 len;
4345                 while (start <= end) {
4346                         len = end - start + 1;
4347                         write_lock(&map->lock);
4348                         em = lookup_extent_mapping(map, start, len);
4349                         if (!em) {
4350                                 write_unlock(&map->lock);
4351                                 break;
4352                         }
4353                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4354                             em->start != start) {
4355                                 write_unlock(&map->lock);
4356                                 free_extent_map(em);
4357                                 break;
4358                         }
4359                         if (!test_range_bit(tree, em->start,
4360                                             extent_map_end(em) - 1,
4361                                             EXTENT_LOCKED, 0, NULL)) {
4362                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4363                                         &btrfs_inode->runtime_flags);
4364                                 remove_extent_mapping(map, em);
4365                                 /* once for the rb tree */
4366                                 free_extent_map(em);
4367                         }
4368                         start = extent_map_end(em);
4369                         write_unlock(&map->lock);
4370
4371                         /* once for us */
4372                         free_extent_map(em);
4373                 }
4374         }
4375         return try_release_extent_state(tree, page, mask);
4376 }
4377
4378 /*
4379  * helper function for fiemap, which doesn't want to see any holes.
4380  * This maps until we find something past 'last'
4381  */
4382 static struct extent_map *get_extent_skip_holes(struct inode *inode,
4383                                                 u64 offset, u64 last)
4384 {
4385         u64 sectorsize = btrfs_inode_sectorsize(inode);
4386         struct extent_map *em;
4387         u64 len;
4388
4389         if (offset >= last)
4390                 return NULL;
4391
4392         while (1) {
4393                 len = last - offset;
4394                 if (len == 0)
4395                         break;
4396                 len = ALIGN(len, sectorsize);
4397                 em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len);
4398                 if (IS_ERR_OR_NULL(em))
4399                         return em;
4400
4401                 /* if this isn't a hole return it */
4402                 if (em->block_start != EXTENT_MAP_HOLE)
4403                         return em;
4404
4405                 /* this is a hole, advance to the next extent */
4406                 offset = extent_map_end(em);
4407                 free_extent_map(em);
4408                 if (offset >= last)
4409                         break;
4410         }
4411         return NULL;
4412 }
4413
4414 /*
4415  * To cache previous fiemap extent
4416  *
4417  * Will be used for merging fiemap extent
4418  */
4419 struct fiemap_cache {
4420         u64 offset;
4421         u64 phys;
4422         u64 len;
4423         u32 flags;
4424         bool cached;
4425 };
4426
4427 /*
4428  * Helper to submit fiemap extent.
4429  *
4430  * Will try to merge current fiemap extent specified by @offset, @phys,
4431  * @len and @flags with cached one.
4432  * And only when we fails to merge, cached one will be submitted as
4433  * fiemap extent.
4434  *
4435  * Return value is the same as fiemap_fill_next_extent().
4436  */
4437 static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4438                                 struct fiemap_cache *cache,
4439                                 u64 offset, u64 phys, u64 len, u32 flags)
4440 {
4441         int ret = 0;
4442
4443         if (!cache->cached)
4444                 goto assign;
4445
4446         /*
4447          * Sanity check, extent_fiemap() should have ensured that new
4448          * fiemap extent won't overlap with cached one.
4449          * Not recoverable.
4450          *
4451          * NOTE: Physical address can overlap, due to compression
4452          */
4453         if (cache->offset + cache->len > offset) {
4454                 WARN_ON(1);
4455                 return -EINVAL;
4456         }
4457
4458         /*
4459          * Only merges fiemap extents if
4460          * 1) Their logical addresses are continuous
4461          *
4462          * 2) Their physical addresses are continuous
4463          *    So truly compressed (physical size smaller than logical size)
4464          *    extents won't get merged with each other
4465          *
4466          * 3) Share same flags except FIEMAP_EXTENT_LAST
4467          *    So regular extent won't get merged with prealloc extent
4468          */
4469         if (cache->offset + cache->len  == offset &&
4470             cache->phys + cache->len == phys  &&
4471             (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4472                         (flags & ~FIEMAP_EXTENT_LAST)) {
4473                 cache->len += len;
4474                 cache->flags |= flags;
4475                 goto try_submit_last;
4476         }
4477
4478         /* Not mergeable, need to submit cached one */
4479         ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4480                                       cache->len, cache->flags);
4481         cache->cached = false;
4482         if (ret)
4483                 return ret;
4484 assign:
4485         cache->cached = true;
4486         cache->offset = offset;
4487         cache->phys = phys;
4488         cache->len = len;
4489         cache->flags = flags;
4490 try_submit_last:
4491         if (cache->flags & FIEMAP_EXTENT_LAST) {
4492                 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4493                                 cache->phys, cache->len, cache->flags);
4494                 cache->cached = false;
4495         }
4496         return ret;
4497 }
4498
4499 /*
4500  * Emit last fiemap cache
4501  *
4502  * The last fiemap cache may still be cached in the following case:
4503  * 0                  4k                    8k
4504  * |<- Fiemap range ->|
4505  * |<------------  First extent ----------->|
4506  *
4507  * In this case, the first extent range will be cached but not emitted.
4508  * So we must emit it before ending extent_fiemap().
4509  */
4510 static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
4511                                   struct fiemap_cache *cache)
4512 {
4513         int ret;
4514
4515         if (!cache->cached)
4516                 return 0;
4517
4518         ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4519                                       cache->len, cache->flags);
4520         cache->cached = false;
4521         if (ret > 0)
4522                 ret = 0;
4523         return ret;
4524 }
4525
4526 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4527                 __u64 start, __u64 len)
4528 {
4529         int ret = 0;
4530         u64 off = start;
4531         u64 max = start + len;
4532         u32 flags = 0;
4533         u32 found_type;
4534         u64 last;
4535         u64 last_for_get_extent = 0;
4536         u64 disko = 0;
4537         u64 isize = i_size_read(inode);
4538         struct btrfs_key found_key;
4539         struct extent_map *em = NULL;
4540         struct extent_state *cached_state = NULL;
4541         struct btrfs_path *path;
4542         struct btrfs_root *root = BTRFS_I(inode)->root;
4543         struct fiemap_cache cache = { 0 };
4544         struct ulist *roots;
4545         struct ulist *tmp_ulist;
4546         int end = 0;
4547         u64 em_start = 0;
4548         u64 em_len = 0;
4549         u64 em_end = 0;
4550
4551         if (len == 0)
4552                 return -EINVAL;
4553
4554         path = btrfs_alloc_path();
4555         if (!path)
4556                 return -ENOMEM;
4557         path->leave_spinning = 1;
4558
4559         roots = ulist_alloc(GFP_KERNEL);
4560         tmp_ulist = ulist_alloc(GFP_KERNEL);
4561         if (!roots || !tmp_ulist) {
4562                 ret = -ENOMEM;
4563                 goto out_free_ulist;
4564         }
4565
4566         start = round_down(start, btrfs_inode_sectorsize(inode));
4567         len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4568
4569         /*
4570          * lookup the last file extent.  We're not using i_size here
4571          * because there might be preallocation past i_size
4572          */
4573         ret = btrfs_lookup_file_extent(NULL, root, path,
4574                         btrfs_ino(BTRFS_I(inode)), -1, 0);
4575         if (ret < 0) {
4576                 btrfs_free_path(path);
4577                 goto out_free_ulist;
4578         } else {
4579                 WARN_ON(!ret);
4580                 if (ret == 1)
4581                         ret = 0;
4582         }
4583
4584         path->slots[0]--;
4585         btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4586         found_type = found_key.type;
4587
4588         /* No extents, but there might be delalloc bits */
4589         if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
4590             found_type != BTRFS_EXTENT_DATA_KEY) {
4591                 /* have to trust i_size as the end */
4592                 last = (u64)-1;
4593                 last_for_get_extent = isize;
4594         } else {
4595                 /*
4596                  * remember the start of the last extent.  There are a
4597                  * bunch of different factors that go into the length of the
4598                  * extent, so its much less complex to remember where it started
4599                  */
4600                 last = found_key.offset;
4601                 last_for_get_extent = last + 1;
4602         }
4603         btrfs_release_path(path);
4604
4605         /*
4606          * we might have some extents allocated but more delalloc past those
4607          * extents.  so, we trust isize unless the start of the last extent is
4608          * beyond isize
4609          */
4610         if (last < isize) {
4611                 last = (u64)-1;
4612                 last_for_get_extent = isize;
4613         }
4614
4615         lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4616                          &cached_state);
4617
4618         em = get_extent_skip_holes(inode, start, last_for_get_extent);
4619         if (!em)
4620                 goto out;
4621         if (IS_ERR(em)) {
4622                 ret = PTR_ERR(em);
4623                 goto out;
4624         }
4625
4626         while (!end) {
4627                 u64 offset_in_extent = 0;
4628
4629                 /* break if the extent we found is outside the range */
4630                 if (em->start >= max || extent_map_end(em) < off)
4631                         break;
4632
4633                 /*
4634                  * get_extent may return an extent that starts before our
4635                  * requested range.  We have to make sure the ranges
4636                  * we return to fiemap always move forward and don't
4637                  * overlap, so adjust the offsets here
4638                  */
4639                 em_start = max(em->start, off);
4640
4641                 /*
4642                  * record the offset from the start of the extent
4643                  * for adjusting the disk offset below.  Only do this if the
4644                  * extent isn't compressed since our in ram offset may be past
4645                  * what we have actually allocated on disk.
4646                  */
4647                 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4648                         offset_in_extent = em_start - em->start;
4649                 em_end = extent_map_end(em);
4650                 em_len = em_end - em_start;
4651                 flags = 0;
4652                 if (em->block_start < EXTENT_MAP_LAST_BYTE)
4653                         disko = em->block_start + offset_in_extent;
4654                 else
4655                         disko = 0;
4656
4657                 /*
4658                  * bump off for our next call to get_extent
4659                  */
4660                 off = extent_map_end(em);
4661                 if (off >= max)
4662                         end = 1;
4663
4664                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4665                         end = 1;
4666                         flags |= FIEMAP_EXTENT_LAST;
4667                 } else if (em->block_start == EXTENT_MAP_INLINE) {
4668                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
4669                                   FIEMAP_EXTENT_NOT_ALIGNED);
4670                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4671                         flags |= (FIEMAP_EXTENT_DELALLOC |
4672                                   FIEMAP_EXTENT_UNKNOWN);
4673                 } else if (fieinfo->fi_extents_max) {
4674                         u64 bytenr = em->block_start -
4675                                 (em->start - em->orig_start);
4676
4677                         /*
4678                          * As btrfs supports shared space, this information
4679                          * can be exported to userspace tools via
4680                          * flag FIEMAP_EXTENT_SHARED.  If fi_extents_max == 0
4681                          * then we're just getting a count and we can skip the
4682                          * lookup stuff.
4683                          */
4684                         ret = btrfs_check_shared(root,
4685                                                  btrfs_ino(BTRFS_I(inode)),
4686                                                  bytenr, roots, tmp_ulist);
4687                         if (ret < 0)
4688                                 goto out_free;
4689                         if (ret)
4690                                 flags |= FIEMAP_EXTENT_SHARED;
4691                         ret = 0;
4692                 }
4693                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4694                         flags |= FIEMAP_EXTENT_ENCODED;
4695                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4696                         flags |= FIEMAP_EXTENT_UNWRITTEN;
4697
4698                 free_extent_map(em);
4699                 em = NULL;
4700                 if ((em_start >= last) || em_len == (u64)-1 ||
4701                    (last == (u64)-1 && isize <= em_end)) {
4702                         flags |= FIEMAP_EXTENT_LAST;
4703                         end = 1;
4704                 }
4705
4706                 /* now scan forward to see if this is really the last extent. */
4707                 em = get_extent_skip_holes(inode, off, last_for_get_extent);
4708                 if (IS_ERR(em)) {
4709                         ret = PTR_ERR(em);
4710                         goto out;
4711                 }
4712                 if (!em) {
4713                         flags |= FIEMAP_EXTENT_LAST;
4714                         end = 1;
4715                 }
4716                 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4717                                            em_len, flags);
4718                 if (ret) {
4719                         if (ret == 1)
4720                                 ret = 0;
4721                         goto out_free;
4722                 }
4723         }
4724 out_free:
4725         if (!ret)
4726                 ret = emit_last_fiemap_cache(fieinfo, &cache);
4727         free_extent_map(em);
4728 out:
4729         btrfs_free_path(path);
4730         unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4731                              &cached_state);
4732
4733 out_free_ulist:
4734         ulist_free(roots);
4735         ulist_free(tmp_ulist);
4736         return ret;
4737 }
4738
4739 static void __free_extent_buffer(struct extent_buffer *eb)
4740 {
4741         btrfs_leak_debug_del(&eb->leak_list);
4742         kmem_cache_free(extent_buffer_cache, eb);
4743 }
4744
4745 int extent_buffer_under_io(struct extent_buffer *eb)
4746 {
4747         return (atomic_read(&eb->io_pages) ||
4748                 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4749                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4750 }
4751
4752 /*
4753  * Release all pages attached to the extent buffer.
4754  */
4755 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
4756 {
4757         int i;
4758         int num_pages;
4759         int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4760
4761         BUG_ON(extent_buffer_under_io(eb));
4762
4763         num_pages = num_extent_pages(eb);
4764         for (i = 0; i < num_pages; i++) {
4765                 struct page *page = eb->pages[i];
4766
4767                 if (!page)
4768                         continue;
4769                 if (mapped)
4770                         spin_lock(&page->mapping->private_lock);
4771                 /*
4772                  * We do this since we'll remove the pages after we've
4773                  * removed the eb from the radix tree, so we could race
4774                  * and have this page now attached to the new eb.  So
4775                  * only clear page_private if it's still connected to
4776                  * this eb.
4777                  */
4778                 if (PagePrivate(page) &&
4779                     page->private == (unsigned long)eb) {
4780                         BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4781                         BUG_ON(PageDirty(page));
4782                         BUG_ON(PageWriteback(page));
4783                         /*
4784                          * We need to make sure we haven't be attached
4785                          * to a new eb.
4786                          */
4787                         ClearPagePrivate(page);
4788                         set_page_private(page, 0);
4789                         /* One for the page private */
4790                         put_page(page);
4791                 }
4792
4793                 if (mapped)
4794                         spin_unlock(&page->mapping->private_lock);
4795
4796                 /* One for when we allocated the page */
4797                 put_page(page);
4798         }
4799 }
4800
4801 /*
4802  * Helper for releasing the extent buffer.
4803  */
4804 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4805 {
4806         btrfs_release_extent_buffer_pages(eb);
4807         __free_extent_buffer(eb);
4808 }
4809
4810 static struct extent_buffer *
4811 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4812                       unsigned long len)
4813 {
4814         struct extent_buffer *eb = NULL;
4815
4816         eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4817         eb->start = start;
4818         eb->len = len;
4819         eb->fs_info = fs_info;
4820         eb->bflags = 0;
4821         rwlock_init(&eb->lock);
4822         atomic_set(&eb->blocking_readers, 0);
4823         atomic_set(&eb->blocking_writers, 0);
4824         eb->lock_nested = false;
4825         init_waitqueue_head(&eb->write_lock_wq);
4826         init_waitqueue_head(&eb->read_lock_wq);
4827
4828         btrfs_leak_debug_add(&eb->leak_list, &buffers);
4829
4830         spin_lock_init(&eb->refs_lock);
4831         atomic_set(&eb->refs, 1);
4832         atomic_set(&eb->io_pages, 0);
4833
4834         /*
4835          * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4836          */
4837         BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4838                 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4839         BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4840
4841 #ifdef CONFIG_BTRFS_DEBUG
4842         atomic_set(&eb->spinning_writers, 0);
4843         atomic_set(&eb->spinning_readers, 0);
4844         atomic_set(&eb->read_locks, 0);
4845         atomic_set(&eb->write_locks, 0);
4846 #endif
4847
4848         return eb;
4849 }
4850
4851 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4852 {
4853         int i;
4854         struct page *p;
4855         struct extent_buffer *new;
4856         int num_pages = num_extent_pages(src);
4857
4858         new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4859         if (new == NULL)
4860                 return NULL;
4861
4862         for (i = 0; i < num_pages; i++) {
4863                 p = alloc_page(GFP_NOFS);
4864                 if (!p) {
4865                         btrfs_release_extent_buffer(new);
4866                         return NULL;
4867                 }
4868                 attach_extent_buffer_page(new, p);
4869                 WARN_ON(PageDirty(p));
4870                 SetPageUptodate(p);
4871                 new->pages[i] = p;
4872                 copy_page(page_address(p), page_address(src->pages[i]));
4873         }
4874
4875         set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4876         set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
4877
4878         return new;
4879 }
4880
4881 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4882                                                   u64 start, unsigned long len)
4883 {
4884         struct extent_buffer *eb;
4885         int num_pages;
4886         int i;
4887
4888         eb = __alloc_extent_buffer(fs_info, start, len);
4889         if (!eb)
4890                 return NULL;
4891
4892         num_pages = num_extent_pages(eb);
4893         for (i = 0; i < num_pages; i++) {
4894                 eb->pages[i] = alloc_page(GFP_NOFS);
4895                 if (!eb->pages[i])
4896                         goto err;
4897         }
4898         set_extent_buffer_uptodate(eb);
4899         btrfs_set_header_nritems(eb, 0);
4900         set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4901
4902         return eb;
4903 err:
4904         for (; i > 0; i--)
4905                 __free_page(eb->pages[i - 1]);
4906         __free_extent_buffer(eb);
4907         return NULL;
4908 }
4909
4910 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4911                                                 u64 start)
4912 {
4913         return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
4914 }
4915
4916 static void check_buffer_tree_ref(struct extent_buffer *eb)
4917 {
4918         int refs;
4919         /* the ref bit is tricky.  We have to make sure it is set
4920          * if we have the buffer dirty.   Otherwise the
4921          * code to free a buffer can end up dropping a dirty
4922          * page
4923          *
4924          * Once the ref bit is set, it won't go away while the
4925          * buffer is dirty or in writeback, and it also won't
4926          * go away while we have the reference count on the
4927          * eb bumped.
4928          *
4929          * We can't just set the ref bit without bumping the
4930          * ref on the eb because free_extent_buffer might
4931          * see the ref bit and try to clear it.  If this happens
4932          * free_extent_buffer might end up dropping our original
4933          * ref by mistake and freeing the page before we are able
4934          * to add one more ref.
4935          *
4936          * So bump the ref count first, then set the bit.  If someone
4937          * beat us to it, drop the ref we added.
4938          */
4939         refs = atomic_read(&eb->refs);
4940         if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4941                 return;
4942
4943         spin_lock(&eb->refs_lock);
4944         if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4945                 atomic_inc(&eb->refs);
4946         spin_unlock(&eb->refs_lock);
4947 }
4948
4949 static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4950                 struct page *accessed)
4951 {
4952         int num_pages, i;
4953
4954         check_buffer_tree_ref(eb);
4955
4956         num_pages = num_extent_pages(eb);
4957         for (i = 0; i < num_pages; i++) {
4958                 struct page *p = eb->pages[i];
4959
4960                 if (p != accessed)
4961                         mark_page_accessed(p);
4962         }
4963 }
4964
4965 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4966                                          u64 start)
4967 {
4968         struct extent_buffer *eb;
4969
4970         rcu_read_lock();
4971         eb = radix_tree_lookup(&fs_info->buffer_radix,
4972                                start >> PAGE_SHIFT);
4973         if (eb && atomic_inc_not_zero(&eb->refs)) {
4974                 rcu_read_unlock();
4975                 /*
4976                  * Lock our eb's refs_lock to avoid races with
4977                  * free_extent_buffer. When we get our eb it might be flagged
4978                  * with EXTENT_BUFFER_STALE and another task running
4979                  * free_extent_buffer might have seen that flag set,
4980                  * eb->refs == 2, that the buffer isn't under IO (dirty and
4981                  * writeback flags not set) and it's still in the tree (flag
4982                  * EXTENT_BUFFER_TREE_REF set), therefore being in the process
4983                  * of decrementing the extent buffer's reference count twice.
4984                  * So here we could race and increment the eb's reference count,
4985                  * clear its stale flag, mark it as dirty and drop our reference
4986                  * before the other task finishes executing free_extent_buffer,
4987                  * which would later result in an attempt to free an extent
4988                  * buffer that is dirty.
4989                  */
4990                 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4991                         spin_lock(&eb->refs_lock);
4992                         spin_unlock(&eb->refs_lock);
4993                 }
4994                 mark_extent_buffer_accessed(eb, NULL);
4995                 return eb;
4996         }
4997         rcu_read_unlock();
4998
4999         return NULL;
5000 }
5001
5002 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5003 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
5004                                         u64 start)
5005 {
5006         struct extent_buffer *eb, *exists = NULL;
5007         int ret;
5008
5009         eb = find_extent_buffer(fs_info, start);
5010         if (eb)
5011                 return eb;
5012         eb = alloc_dummy_extent_buffer(fs_info, start);
5013         if (!eb)
5014                 return NULL;
5015         eb->fs_info = fs_info;
5016 again:
5017         ret = radix_tree_preload(GFP_NOFS);
5018         if (ret)
5019                 goto free_eb;
5020         spin_lock(&fs_info->buffer_lock);
5021         ret = radix_tree_insert(&fs_info->buffer_radix,
5022                                 start >> PAGE_SHIFT, eb);
5023         spin_unlock(&fs_info->buffer_lock);
5024         radix_tree_preload_end();
5025         if (ret == -EEXIST) {
5026                 exists = find_extent_buffer(fs_info, start);
5027                 if (exists)
5028                         goto free_eb;
5029                 else
5030                         goto again;
5031         }
5032         check_buffer_tree_ref(eb);
5033         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5034
5035         return eb;
5036 free_eb:
5037         btrfs_release_extent_buffer(eb);
5038         return exists;
5039 }
5040 #endif
5041
5042 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
5043                                           u64 start)
5044 {
5045         unsigned long len = fs_info->nodesize;
5046         int num_pages;
5047         int i;
5048         unsigned long index = start >> PAGE_SHIFT;
5049         struct extent_buffer *eb;
5050         struct extent_buffer *exists = NULL;
5051         struct page *p;
5052         struct address_space *mapping = fs_info->btree_inode->i_mapping;
5053         int uptodate = 1;
5054         int ret;
5055
5056         if (!IS_ALIGNED(start, fs_info->sectorsize)) {
5057                 btrfs_err(fs_info, "bad tree block start %llu", start);
5058                 return ERR_PTR(-EINVAL);
5059         }
5060
5061         eb = find_extent_buffer(fs_info, start);
5062         if (eb)
5063                 return eb;
5064
5065         eb = __alloc_extent_buffer(fs_info, start, len);
5066         if (!eb)
5067                 return ERR_PTR(-ENOMEM);
5068
5069         num_pages = num_extent_pages(eb);
5070         for (i = 0; i < num_pages; i++, index++) {
5071                 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
5072                 if (!p) {
5073                         exists = ERR_PTR(-ENOMEM);
5074                         goto free_eb;
5075                 }
5076
5077                 spin_lock(&mapping->private_lock);
5078                 if (PagePrivate(p)) {
5079                         /*
5080                          * We could have already allocated an eb for this page
5081                          * and attached one so lets see if we can get a ref on
5082                          * the existing eb, and if we can we know it's good and
5083                          * we can just return that one, else we know we can just
5084                          * overwrite page->private.
5085                          */
5086                         exists = (struct extent_buffer *)p->private;
5087                         if (atomic_inc_not_zero(&exists->refs)) {
5088                                 spin_unlock(&mapping->private_lock);
5089                                 unlock_page(p);
5090                                 put_page(p);
5091                                 mark_extent_buffer_accessed(exists, p);
5092                                 goto free_eb;
5093                         }
5094                         exists = NULL;
5095
5096                         /*
5097                          * Do this so attach doesn't complain and we need to
5098                          * drop the ref the old guy had.
5099                          */
5100                         ClearPagePrivate(p);
5101                         WARN_ON(PageDirty(p));
5102                         put_page(p);
5103                 }
5104                 attach_extent_buffer_page(eb, p);
5105                 spin_unlock(&mapping->private_lock);
5106                 WARN_ON(PageDirty(p));
5107                 eb->pages[i] = p;
5108                 if (!PageUptodate(p))
5109                         uptodate = 0;
5110
5111                 /*
5112                  * We can't unlock the pages just yet since the extent buffer
5113                  * hasn't been properly inserted in the radix tree, this
5114                  * opens a race with btree_releasepage which can free a page
5115                  * while we are still filling in all pages for the buffer and
5116                  * we could crash.
5117                  */
5118         }
5119         if (uptodate)
5120                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5121 again:
5122         ret = radix_tree_preload(GFP_NOFS);
5123         if (ret) {
5124                 exists = ERR_PTR(ret);
5125                 goto free_eb;
5126         }
5127
5128         spin_lock(&fs_info->buffer_lock);
5129         ret = radix_tree_insert(&fs_info->buffer_radix,
5130                                 start >> PAGE_SHIFT, eb);
5131         spin_unlock(&fs_info->buffer_lock);
5132         radix_tree_preload_end();
5133         if (ret == -EEXIST) {
5134                 exists = find_extent_buffer(fs_info, start);
5135                 if (exists)
5136                         goto free_eb;
5137                 else
5138                         goto again;
5139         }
5140         /* add one reference for the tree */
5141         check_buffer_tree_ref(eb);
5142         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5143
5144         /*
5145          * Now it's safe to unlock the pages because any calls to
5146          * btree_releasepage will correctly detect that a page belongs to a
5147          * live buffer and won't free them prematurely.
5148          */
5149         for (i = 0; i < num_pages; i++)
5150                 unlock_page(eb->pages[i]);
5151         return eb;
5152
5153 free_eb:
5154         WARN_ON(!atomic_dec_and_test(&eb->refs));
5155         for (i = 0; i < num_pages; i++) {
5156                 if (eb->pages[i])
5157                         unlock_page(eb->pages[i]);
5158         }
5159
5160         btrfs_release_extent_buffer(eb);
5161         return exists;
5162 }
5163
5164 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5165 {
5166         struct extent_buffer *eb =
5167                         container_of(head, struct extent_buffer, rcu_head);
5168
5169         __free_extent_buffer(eb);
5170 }
5171
5172 static int release_extent_buffer(struct extent_buffer *eb)
5173 {
5174         lockdep_assert_held(&eb->refs_lock);
5175
5176         WARN_ON(atomic_read(&eb->refs) == 0);
5177         if (atomic_dec_and_test(&eb->refs)) {
5178                 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
5179                         struct btrfs_fs_info *fs_info = eb->fs_info;
5180
5181                         spin_unlock(&eb->refs_lock);
5182
5183                         spin_lock(&fs_info->buffer_lock);
5184                         radix_tree_delete(&fs_info->buffer_radix,
5185                                           eb->start >> PAGE_SHIFT);
5186                         spin_unlock(&fs_info->buffer_lock);
5187                 } else {
5188                         spin_unlock(&eb->refs_lock);
5189                 }
5190
5191                 /* Should be safe to release our pages at this point */
5192                 btrfs_release_extent_buffer_pages(eb);
5193 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5194                 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
5195                         __free_extent_buffer(eb);
5196                         return 1;
5197                 }
5198 #endif
5199                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5200                 return 1;
5201         }
5202         spin_unlock(&eb->refs_lock);
5203
5204         return 0;
5205 }
5206
5207 void free_extent_buffer(struct extent_buffer *eb)
5208 {
5209         int refs;
5210         int old;
5211         if (!eb)
5212                 return;
5213
5214         while (1) {
5215                 refs = atomic_read(&eb->refs);
5216                 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
5217                     || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
5218                         refs == 1))
5219                         break;
5220                 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5221                 if (old == refs)
5222                         return;
5223         }
5224
5225         spin_lock(&eb->refs_lock);
5226         if (atomic_read(&eb->refs) == 2 &&
5227             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5228             !extent_buffer_under_io(eb) &&
5229             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5230                 atomic_dec(&eb->refs);
5231
5232         /*
5233          * I know this is terrible, but it's temporary until we stop tracking
5234          * the uptodate bits and such for the extent buffers.
5235          */
5236         release_extent_buffer(eb);
5237 }
5238
5239 void free_extent_buffer_stale(struct extent_buffer *eb)
5240 {
5241         if (!eb)
5242                 return;
5243
5244         spin_lock(&eb->refs_lock);
5245         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5246
5247         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5248             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5249                 atomic_dec(&eb->refs);
5250         release_extent_buffer(eb);
5251 }
5252
5253 void clear_extent_buffer_dirty(struct extent_buffer *eb)
5254 {
5255         int i;
5256         int num_pages;
5257         struct page *page;
5258
5259         num_pages = num_extent_pages(eb);
5260
5261         for (i = 0; i < num_pages; i++) {
5262                 page = eb->pages[i];
5263                 if (!PageDirty(page))
5264                         continue;
5265
5266                 lock_page(page);
5267                 WARN_ON(!PagePrivate(page));
5268
5269                 clear_page_dirty_for_io(page);
5270                 xa_lock_irq(&page->mapping->i_pages);
5271                 if (!PageDirty(page))
5272                         __xa_clear_mark(&page->mapping->i_pages,
5273                                         page_index(page), PAGECACHE_TAG_DIRTY);
5274                 xa_unlock_irq(&page->mapping->i_pages);
5275                 ClearPageError(page);
5276                 unlock_page(page);
5277         }
5278         WARN_ON(atomic_read(&eb->refs) == 0);
5279 }
5280
5281 bool set_extent_buffer_dirty(struct extent_buffer *eb)
5282 {
5283         int i;
5284         int num_pages;
5285         bool was_dirty;
5286
5287         check_buffer_tree_ref(eb);
5288
5289         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5290
5291         num_pages = num_extent_pages(eb);
5292         WARN_ON(atomic_read(&eb->refs) == 0);
5293         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5294
5295         if (!was_dirty)
5296                 for (i = 0; i < num_pages; i++)
5297                         set_page_dirty(eb->pages[i]);
5298
5299 #ifdef CONFIG_BTRFS_DEBUG
5300         for (i = 0; i < num_pages; i++)
5301                 ASSERT(PageDirty(eb->pages[i]));
5302 #endif
5303
5304         return was_dirty;
5305 }
5306
5307 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5308 {
5309         int i;
5310         struct page *page;
5311         int num_pages;
5312
5313         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5314         num_pages = num_extent_pages(eb);
5315         for (i = 0; i < num_pages; i++) {
5316                 page = eb->pages[i];
5317                 if (page)
5318                         ClearPageUptodate(page);
5319         }
5320 }
5321
5322 void set_extent_buffer_uptodate(struct extent_buffer *eb)
5323 {
5324         int i;
5325         struct page *page;
5326         int num_pages;
5327
5328         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5329         num_pages = num_extent_pages(eb);
5330         for (i = 0; i < num_pages; i++) {
5331                 page = eb->pages[i];
5332                 SetPageUptodate(page);
5333         }
5334 }
5335
5336 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
5337 {
5338         int i;
5339         struct page *page;
5340         int err;
5341         int ret = 0;
5342         int locked_pages = 0;
5343         int all_uptodate = 1;
5344         int num_pages;
5345         unsigned long num_reads = 0;
5346         struct bio *bio = NULL;
5347         unsigned long bio_flags = 0;
5348         struct extent_io_tree *tree = &BTRFS_I(eb->fs_info->btree_inode)->io_tree;
5349
5350         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5351                 return 0;
5352
5353         num_pages = num_extent_pages(eb);
5354         for (i = 0; i < num_pages; i++) {
5355                 page = eb->pages[i];
5356                 if (wait == WAIT_NONE) {
5357                         if (!trylock_page(page))
5358                                 goto unlock_exit;
5359                 } else {
5360                         lock_page(page);
5361                 }
5362                 locked_pages++;
5363         }
5364         /*
5365          * We need to firstly lock all pages to make sure that
5366          * the uptodate bit of our pages won't be affected by
5367          * clear_extent_buffer_uptodate().
5368          */
5369         for (i = 0; i < num_pages; i++) {
5370                 page = eb->pages[i];
5371                 if (!PageUptodate(page)) {
5372                         num_reads++;
5373                         all_uptodate = 0;
5374                 }
5375         }
5376
5377         if (all_uptodate) {
5378                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5379                 goto unlock_exit;
5380         }
5381
5382         clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5383         eb->read_mirror = 0;
5384         atomic_set(&eb->io_pages, num_reads);
5385         for (i = 0; i < num_pages; i++) {
5386                 page = eb->pages[i];
5387
5388                 if (!PageUptodate(page)) {
5389                         if (ret) {
5390                                 atomic_dec(&eb->io_pages);
5391                                 unlock_page(page);
5392                                 continue;
5393                         }
5394
5395                         ClearPageError(page);
5396                         err = __extent_read_full_page(tree, page,
5397                                                       btree_get_extent, &bio,
5398                                                       mirror_num, &bio_flags,
5399                                                       REQ_META);
5400                         if (err) {
5401                                 ret = err;
5402                                 /*
5403                                  * We use &bio in above __extent_read_full_page,
5404                                  * so we ensure that if it returns error, the
5405                                  * current page fails to add itself to bio and
5406                                  * it's been unlocked.
5407                                  *
5408                                  * We must dec io_pages by ourselves.
5409                                  */
5410                                 atomic_dec(&eb->io_pages);
5411                         }
5412                 } else {
5413                         unlock_page(page);
5414                 }
5415         }
5416
5417         if (bio) {
5418                 err = submit_one_bio(bio, mirror_num, bio_flags);
5419                 if (err)
5420                         return err;
5421         }
5422
5423         if (ret || wait != WAIT_COMPLETE)
5424                 return ret;
5425
5426         for (i = 0; i < num_pages; i++) {
5427                 page = eb->pages[i];
5428                 wait_on_page_locked(page);
5429                 if (!PageUptodate(page))
5430                         ret = -EIO;
5431         }
5432
5433         return ret;
5434
5435 unlock_exit:
5436         while (locked_pages > 0) {
5437                 locked_pages--;
5438                 page = eb->pages[locked_pages];
5439                 unlock_page(page);
5440         }
5441         return ret;
5442 }
5443
5444 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5445                         unsigned long start, unsigned long len)
5446 {
5447         size_t cur;
5448         size_t offset;
5449         struct page *page;
5450         char *kaddr;
5451         char *dst = (char *)dstv;
5452         size_t start_offset = offset_in_page(eb->start);
5453         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5454
5455         if (start + len > eb->len) {
5456                 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5457                      eb->start, eb->len, start, len);
5458                 memset(dst, 0, len);
5459                 return;
5460         }
5461
5462         offset = offset_in_page(start_offset + start);
5463
5464         while (len > 0) {
5465                 page = eb->pages[i];
5466
5467                 cur = min(len, (PAGE_SIZE - offset));
5468                 kaddr = page_address(page);
5469                 memcpy(dst, kaddr + offset, cur);
5470
5471                 dst += cur;
5472                 len -= cur;
5473                 offset = 0;
5474                 i++;
5475         }
5476 }
5477
5478 int read_extent_buffer_to_user(const struct extent_buffer *eb,
5479                                void __user *dstv,
5480                                unsigned long start, unsigned long len)
5481 {
5482         size_t cur;
5483         size_t offset;
5484         struct page *page;
5485         char *kaddr;
5486         char __user *dst = (char __user *)dstv;
5487         size_t start_offset = offset_in_page(eb->start);
5488         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5489         int ret = 0;
5490
5491         WARN_ON(start > eb->len);
5492         WARN_ON(start + len > eb->start + eb->len);
5493
5494         offset = offset_in_page(start_offset + start);
5495
5496         while (len > 0) {
5497                 page = eb->pages[i];
5498
5499                 cur = min(len, (PAGE_SIZE - offset));
5500                 kaddr = page_address(page);
5501                 if (copy_to_user(dst, kaddr + offset, cur)) {
5502                         ret = -EFAULT;
5503                         break;
5504                 }
5505
5506                 dst += cur;
5507                 len -= cur;
5508                 offset = 0;
5509                 i++;
5510         }
5511
5512         return ret;
5513 }
5514
5515 /*
5516  * return 0 if the item is found within a page.
5517  * return 1 if the item spans two pages.
5518  * return -EINVAL otherwise.
5519  */
5520 int map_private_extent_buffer(const struct extent_buffer *eb,
5521                               unsigned long start, unsigned long min_len,
5522                               char **map, unsigned long *map_start,
5523                               unsigned long *map_len)
5524 {
5525         size_t offset;
5526         char *kaddr;
5527         struct page *p;
5528         size_t start_offset = offset_in_page(eb->start);
5529         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5530         unsigned long end_i = (start_offset + start + min_len - 1) >>
5531                 PAGE_SHIFT;
5532
5533         if (start + min_len > eb->len) {
5534                 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5535                        eb->start, eb->len, start, min_len);
5536                 return -EINVAL;
5537         }
5538
5539         if (i != end_i)
5540                 return 1;
5541
5542         if (i == 0) {
5543                 offset = start_offset;
5544                 *map_start = 0;
5545         } else {
5546                 offset = 0;
5547                 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5548         }
5549
5550         p = eb->pages[i];
5551         kaddr = page_address(p);
5552         *map = kaddr + offset;
5553         *map_len = PAGE_SIZE - offset;
5554         return 0;
5555 }
5556
5557 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5558                          unsigned long start, unsigned long len)
5559 {
5560         size_t cur;
5561         size_t offset;
5562         struct page *page;
5563         char *kaddr;
5564         char *ptr = (char *)ptrv;
5565         size_t start_offset = offset_in_page(eb->start);
5566         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5567         int ret = 0;
5568
5569         WARN_ON(start > eb->len);
5570         WARN_ON(start + len > eb->start + eb->len);
5571
5572         offset = offset_in_page(start_offset + start);
5573
5574         while (len > 0) {
5575                 page = eb->pages[i];
5576
5577                 cur = min(len, (PAGE_SIZE - offset));
5578
5579                 kaddr = page_address(page);
5580                 ret = memcmp(ptr, kaddr + offset, cur);
5581                 if (ret)
5582                         break;
5583
5584                 ptr += cur;
5585                 len -= cur;
5586                 offset = 0;
5587                 i++;
5588         }
5589         return ret;
5590 }
5591
5592 void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
5593                 const void *srcv)
5594 {
5595         char *kaddr;
5596
5597         WARN_ON(!PageUptodate(eb->pages[0]));
5598         kaddr = page_address(eb->pages[0]);
5599         memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5600                         BTRFS_FSID_SIZE);
5601 }
5602
5603 void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
5604 {
5605         char *kaddr;
5606
5607         WARN_ON(!PageUptodate(eb->pages[0]));
5608         kaddr = page_address(eb->pages[0]);
5609         memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5610                         BTRFS_FSID_SIZE);
5611 }
5612
5613 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5614                          unsigned long start, unsigned long len)
5615 {
5616         size_t cur;
5617         size_t offset;
5618         struct page *page;
5619         char *kaddr;
5620         char *src = (char *)srcv;
5621         size_t start_offset = offset_in_page(eb->start);
5622         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5623
5624         WARN_ON(start > eb->len);
5625         WARN_ON(start + len > eb->start + eb->len);
5626
5627         offset = offset_in_page(start_offset + start);
5628
5629         while (len > 0) {
5630                 page = eb->pages[i];
5631                 WARN_ON(!PageUptodate(page));
5632
5633                 cur = min(len, PAGE_SIZE - offset);
5634                 kaddr = page_address(page);
5635                 memcpy(kaddr + offset, src, cur);
5636
5637                 src += cur;
5638                 len -= cur;
5639                 offset = 0;
5640                 i++;
5641         }
5642 }
5643
5644 void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
5645                 unsigned long len)
5646 {
5647         size_t cur;
5648         size_t offset;
5649         struct page *page;
5650         char *kaddr;
5651         size_t start_offset = offset_in_page(eb->start);
5652         unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5653
5654         WARN_ON(start > eb->len);
5655         WARN_ON(start + len > eb->start + eb->len);
5656
5657         offset = offset_in_page(start_offset + start);
5658
5659         while (len > 0) {
5660                 page = eb->pages[i];
5661                 WARN_ON(!PageUptodate(page));
5662
5663                 cur = min(len, PAGE_SIZE - offset);
5664                 kaddr = page_address(page);
5665                 memset(kaddr + offset, 0, cur);
5666
5667                 len -= cur;
5668                 offset = 0;
5669                 i++;
5670         }
5671 }
5672
5673 void copy_extent_buffer_full(struct extent_buffer *dst,
5674                              struct extent_buffer *src)
5675 {
5676         int i;
5677         int num_pages;
5678
5679         ASSERT(dst->len == src->len);
5680
5681         num_pages = num_extent_pages(dst);
5682         for (i = 0; i < num_pages; i++)
5683                 copy_page(page_address(dst->pages[i]),
5684                                 page_address(src->pages[i]));
5685 }
5686
5687 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5688                         unsigned long dst_offset, unsigned long src_offset,
5689                         unsigned long len)
5690 {
5691         u64 dst_len = dst->len;
5692         size_t cur;
5693         size_t offset;
5694         struct page *page;
5695         char *kaddr;
5696         size_t start_offset = offset_in_page(dst->start);
5697         unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
5698
5699         WARN_ON(src->len != dst_len);
5700
5701         offset = offset_in_page(start_offset + dst_offset);
5702
5703         while (len > 0) {
5704                 page = dst->pages[i];
5705                 WARN_ON(!PageUptodate(page));
5706
5707                 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5708
5709                 kaddr = page_address(page);
5710                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5711
5712                 src_offset += cur;
5713                 len -= cur;
5714                 offset = 0;
5715                 i++;
5716         }
5717 }
5718
5719 /*
5720  * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5721  * given bit number
5722  * @eb: the extent buffer
5723  * @start: offset of the bitmap item in the extent buffer
5724  * @nr: bit number
5725  * @page_index: return index of the page in the extent buffer that contains the
5726  * given bit number
5727  * @page_offset: return offset into the page given by page_index
5728  *
5729  * This helper hides the ugliness of finding the byte in an extent buffer which
5730  * contains a given bit.
5731  */
5732 static inline void eb_bitmap_offset(struct extent_buffer *eb,
5733                                     unsigned long start, unsigned long nr,
5734                                     unsigned long *page_index,
5735                                     size_t *page_offset)
5736 {
5737         size_t start_offset = offset_in_page(eb->start);
5738         size_t byte_offset = BIT_BYTE(nr);
5739         size_t offset;
5740
5741         /*
5742          * The byte we want is the offset of the extent buffer + the offset of
5743          * the bitmap item in the extent buffer + the offset of the byte in the
5744          * bitmap item.
5745          */
5746         offset = start_offset + start + byte_offset;
5747
5748         *page_index = offset >> PAGE_SHIFT;
5749         *page_offset = offset_in_page(offset);
5750 }
5751
5752 /**
5753  * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5754  * @eb: the extent buffer
5755  * @start: offset of the bitmap item in the extent buffer
5756  * @nr: bit number to test
5757  */
5758 int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5759                            unsigned long nr)
5760 {
5761         u8 *kaddr;
5762         struct page *page;
5763         unsigned long i;
5764         size_t offset;
5765
5766         eb_bitmap_offset(eb, start, nr, &i, &offset);
5767         page = eb->pages[i];
5768         WARN_ON(!PageUptodate(page));
5769         kaddr = page_address(page);
5770         return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5771 }
5772
5773 /**
5774  * extent_buffer_bitmap_set - set an area of a bitmap
5775  * @eb: the extent buffer
5776  * @start: offset of the bitmap item in the extent buffer
5777  * @pos: bit number of the first bit
5778  * @len: number of bits to set
5779  */
5780 void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5781                               unsigned long pos, unsigned long len)
5782 {
5783         u8 *kaddr;
5784         struct page *page;
5785         unsigned long i;
5786         size_t offset;
5787         const unsigned int size = pos + len;
5788         int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5789         u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5790
5791         eb_bitmap_offset(eb, start, pos, &i, &offset);
5792         page = eb->pages[i];
5793         WARN_ON(!PageUptodate(page));
5794         kaddr = page_address(page);
5795
5796         while (len >= bits_to_set) {
5797                 kaddr[offset] |= mask_to_set;
5798                 len -= bits_to_set;
5799                 bits_to_set = BITS_PER_BYTE;
5800                 mask_to_set = ~0;
5801                 if (++offset >= PAGE_SIZE && len > 0) {
5802                         offset = 0;
5803                         page = eb->pages[++i];
5804                         WARN_ON(!PageUptodate(page));
5805                         kaddr = page_address(page);
5806                 }
5807         }
5808         if (len) {
5809                 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5810                 kaddr[offset] |= mask_to_set;
5811         }
5812 }
5813
5814
5815 /**
5816  * extent_buffer_bitmap_clear - clear an area of a bitmap
5817  * @eb: the extent buffer
5818  * @start: offset of the bitmap item in the extent buffer
5819  * @pos: bit number of the first bit
5820  * @len: number of bits to clear
5821  */
5822 void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5823                                 unsigned long pos, unsigned long len)
5824 {
5825         u8 *kaddr;
5826         struct page *page;
5827         unsigned long i;
5828         size_t offset;
5829         const unsigned int size = pos + len;
5830         int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5831         u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5832
5833         eb_bitmap_offset(eb, start, pos, &i, &offset);
5834         page = eb->pages[i];
5835         WARN_ON(!PageUptodate(page));
5836         kaddr = page_address(page);
5837
5838         while (len >= bits_to_clear) {
5839                 kaddr[offset] &= ~mask_to_clear;
5840                 len -= bits_to_clear;
5841                 bits_to_clear = BITS_PER_BYTE;
5842                 mask_to_clear = ~0;
5843                 if (++offset >= PAGE_SIZE && len > 0) {
5844                         offset = 0;
5845                         page = eb->pages[++i];
5846                         WARN_ON(!PageUptodate(page));
5847                         kaddr = page_address(page);
5848                 }
5849         }
5850         if (len) {
5851                 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5852                 kaddr[offset] &= ~mask_to_clear;
5853         }
5854 }
5855
5856 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5857 {
5858         unsigned long distance = (src > dst) ? src - dst : dst - src;
5859         return distance < len;
5860 }
5861
5862 static void copy_pages(struct page *dst_page, struct page *src_page,
5863                        unsigned long dst_off, unsigned long src_off,
5864                        unsigned long len)
5865 {
5866         char *dst_kaddr = page_address(dst_page);
5867         char *src_kaddr;
5868         int must_memmove = 0;
5869
5870         if (dst_page != src_page) {
5871                 src_kaddr = page_address(src_page);
5872         } else {
5873                 src_kaddr = dst_kaddr;
5874                 if (areas_overlap(src_off, dst_off, len))
5875                         must_memmove = 1;
5876         }
5877
5878         if (must_memmove)
5879                 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5880         else
5881                 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5882 }
5883
5884 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5885                            unsigned long src_offset, unsigned long len)
5886 {
5887         struct btrfs_fs_info *fs_info = dst->fs_info;
5888         size_t cur;
5889         size_t dst_off_in_page;
5890         size_t src_off_in_page;
5891         size_t start_offset = offset_in_page(dst->start);
5892         unsigned long dst_i;
5893         unsigned long src_i;
5894
5895         if (src_offset + len > dst->len) {
5896                 btrfs_err(fs_info,
5897                         "memmove bogus src_offset %lu move len %lu dst len %lu",
5898                          src_offset, len, dst->len);
5899                 BUG();
5900         }
5901         if (dst_offset + len > dst->len) {
5902                 btrfs_err(fs_info,
5903                         "memmove bogus dst_offset %lu move len %lu dst len %lu",
5904                          dst_offset, len, dst->len);
5905                 BUG();
5906         }
5907
5908         while (len > 0) {
5909                 dst_off_in_page = offset_in_page(start_offset + dst_offset);
5910                 src_off_in_page = offset_in_page(start_offset + src_offset);
5911
5912                 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5913                 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5914
5915                 cur = min(len, (unsigned long)(PAGE_SIZE -
5916                                                src_off_in_page));
5917                 cur = min_t(unsigned long, cur,
5918                         (unsigned long)(PAGE_SIZE - dst_off_in_page));
5919
5920                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5921                            dst_off_in_page, src_off_in_page, cur);
5922
5923                 src_offset += cur;
5924                 dst_offset += cur;
5925                 len -= cur;
5926         }
5927 }
5928
5929 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5930                            unsigned long src_offset, unsigned long len)
5931 {
5932         struct btrfs_fs_info *fs_info = dst->fs_info;
5933         size_t cur;
5934         size_t dst_off_in_page;
5935         size_t src_off_in_page;
5936         unsigned long dst_end = dst_offset + len - 1;
5937         unsigned long src_end = src_offset + len - 1;
5938         size_t start_offset = offset_in_page(dst->start);
5939         unsigned long dst_i;
5940         unsigned long src_i;
5941
5942         if (src_offset + len > dst->len) {
5943                 btrfs_err(fs_info,
5944                           "memmove bogus src_offset %lu move len %lu len %lu",
5945                           src_offset, len, dst->len);
5946                 BUG();
5947         }
5948         if (dst_offset + len > dst->len) {
5949                 btrfs_err(fs_info,
5950                           "memmove bogus dst_offset %lu move len %lu len %lu",
5951                           dst_offset, len, dst->len);
5952                 BUG();
5953         }
5954         if (dst_offset < src_offset) {
5955                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5956                 return;
5957         }
5958         while (len > 0) {
5959                 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5960                 src_i = (start_offset + src_end) >> PAGE_SHIFT;
5961
5962                 dst_off_in_page = offset_in_page(start_offset + dst_end);
5963                 src_off_in_page = offset_in_page(start_offset + src_end);
5964
5965                 cur = min_t(unsigned long, len, src_off_in_page + 1);
5966                 cur = min(cur, dst_off_in_page + 1);
5967                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5968                            dst_off_in_page - cur + 1,
5969                            src_off_in_page - cur + 1, cur);
5970
5971                 dst_end -= cur;
5972                 src_end -= cur;
5973                 len -= cur;
5974         }
5975 }
5976
5977 int try_release_extent_buffer(struct page *page)
5978 {
5979         struct extent_buffer *eb;
5980
5981         /*
5982          * We need to make sure nobody is attaching this page to an eb right
5983          * now.
5984          */
5985         spin_lock(&page->mapping->private_lock);
5986         if (!PagePrivate(page)) {
5987                 spin_unlock(&page->mapping->private_lock);
5988                 return 1;
5989         }
5990
5991         eb = (struct extent_buffer *)page->private;
5992         BUG_ON(!eb);
5993
5994         /*
5995          * This is a little awful but should be ok, we need to make sure that
5996          * the eb doesn't disappear out from under us while we're looking at
5997          * this page.
5998          */
5999         spin_lock(&eb->refs_lock);
6000         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
6001                 spin_unlock(&eb->refs_lock);
6002                 spin_unlock(&page->mapping->private_lock);
6003                 return 0;
6004         }
6005         spin_unlock(&page->mapping->private_lock);
6006
6007         /*
6008          * If tree ref isn't set then we know the ref on this eb is a real ref,
6009          * so just return, this page will likely be freed soon anyway.
6010          */
6011         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6012                 spin_unlock(&eb->refs_lock);
6013                 return 0;
6014         }
6015
6016         return release_extent_buffer(eb);
6017 }