1 #include <linux/bitops.h>
2 #include <linux/slab.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/spinlock.h>
8 #include <linux/blkdev.h>
9 #include <linux/swap.h>
10 #include <linux/writeback.h>
11 #include <linux/pagevec.h>
12 #include <linux/prefetch.h>
13 #include <linux/cleancache.h>
14 #include "extent_io.h"
15 #include "extent_map.h"
17 #include "btrfs_inode.h"
19 #include "check-integrity.h"
21 #include "rcu-string.h"
23 #include "transaction.h"
25 static struct kmem_cache *extent_state_cache;
26 static struct kmem_cache *extent_buffer_cache;
27 static struct bio_set *btrfs_bioset;
29 static inline bool extent_state_in_tree(const struct extent_state *state)
31 return !RB_EMPTY_NODE(&state->rb_node);
34 #ifdef CONFIG_BTRFS_DEBUG
35 static LIST_HEAD(buffers);
36 static LIST_HEAD(states);
38 static DEFINE_SPINLOCK(leak_lock);
41 void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
45 spin_lock_irqsave(&leak_lock, flags);
47 spin_unlock_irqrestore(&leak_lock, flags);
51 void btrfs_leak_debug_del(struct list_head *entry)
55 spin_lock_irqsave(&leak_lock, flags);
57 spin_unlock_irqrestore(&leak_lock, flags);
61 void btrfs_leak_debug_check(void)
63 struct extent_state *state;
64 struct extent_buffer *eb;
66 while (!list_empty(&states)) {
67 state = list_entry(states.next, struct extent_state, leak_list);
68 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
69 state->start, state->end, state->state,
70 extent_state_in_tree(state),
71 atomic_read(&state->refs));
72 list_del(&state->leak_list);
73 kmem_cache_free(extent_state_cache, state);
76 while (!list_empty(&buffers)) {
77 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
78 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
80 eb->start, eb->len, atomic_read(&eb->refs));
81 list_del(&eb->leak_list);
82 kmem_cache_free(extent_buffer_cache, eb);
86 #define btrfs_debug_check_extent_io_range(tree, start, end) \
87 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
88 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
89 struct extent_io_tree *tree, u64 start, u64 end)
97 inode = tree->mapping->host;
98 isize = i_size_read(inode);
99 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
100 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
101 "%s: ino %llu isize %llu odd range [%llu,%llu]",
102 caller, btrfs_ino(inode), isize, start, end);
106 #define btrfs_leak_debug_add(new, head) do {} while (0)
107 #define btrfs_leak_debug_del(entry) do {} while (0)
108 #define btrfs_leak_debug_check() do {} while (0)
109 #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
112 #define BUFFER_LRU_MAX 64
117 struct rb_node rb_node;
120 struct extent_page_data {
122 struct extent_io_tree *tree;
123 get_extent_t *get_extent;
124 unsigned long bio_flags;
126 /* tells writepage not to lock the state bits for this range
127 * it still does the unlocking
129 unsigned int extent_locked:1;
131 /* tells the submit_bio code to use a WRITE_SYNC */
132 unsigned int sync_io:1;
135 static void add_extent_changeset(struct extent_state *state, unsigned bits,
136 struct extent_changeset *changeset,
143 if (set && (state->state & bits) == bits)
145 if (!set && (state->state & bits) == 0)
147 changeset->bytes_changed += state->end - state->start + 1;
148 ret = ulist_add(changeset->range_changed, state->start, state->end,
154 static noinline void flush_write_bio(void *data);
155 static inline struct btrfs_fs_info *
156 tree_fs_info(struct extent_io_tree *tree)
160 return btrfs_sb(tree->mapping->host->i_sb);
163 int __init extent_io_init(void)
165 extent_state_cache = kmem_cache_create("btrfs_extent_state",
166 sizeof(struct extent_state), 0,
167 SLAB_MEM_SPREAD, NULL);
168 if (!extent_state_cache)
171 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
172 sizeof(struct extent_buffer), 0,
173 SLAB_MEM_SPREAD, NULL);
174 if (!extent_buffer_cache)
175 goto free_state_cache;
177 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
178 offsetof(struct btrfs_io_bio, bio));
180 goto free_buffer_cache;
182 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
188 bioset_free(btrfs_bioset);
192 kmem_cache_destroy(extent_buffer_cache);
193 extent_buffer_cache = NULL;
196 kmem_cache_destroy(extent_state_cache);
197 extent_state_cache = NULL;
201 void extent_io_exit(void)
203 btrfs_leak_debug_check();
206 * Make sure all delayed rcu free are flushed before we
210 kmem_cache_destroy(extent_state_cache);
211 kmem_cache_destroy(extent_buffer_cache);
213 bioset_free(btrfs_bioset);
216 void extent_io_tree_init(struct extent_io_tree *tree,
217 struct address_space *mapping)
219 tree->state = RB_ROOT;
221 tree->dirty_bytes = 0;
222 spin_lock_init(&tree->lock);
223 tree->mapping = mapping;
226 static struct extent_state *alloc_extent_state(gfp_t mask)
228 struct extent_state *state;
230 state = kmem_cache_alloc(extent_state_cache, mask);
234 state->failrec = NULL;
235 RB_CLEAR_NODE(&state->rb_node);
236 btrfs_leak_debug_add(&state->leak_list, &states);
237 atomic_set(&state->refs, 1);
238 init_waitqueue_head(&state->wq);
239 trace_alloc_extent_state(state, mask, _RET_IP_);
243 void free_extent_state(struct extent_state *state)
247 if (atomic_dec_and_test(&state->refs)) {
248 WARN_ON(extent_state_in_tree(state));
249 btrfs_leak_debug_del(&state->leak_list);
250 trace_free_extent_state(state, _RET_IP_);
251 kmem_cache_free(extent_state_cache, state);
255 static struct rb_node *tree_insert(struct rb_root *root,
256 struct rb_node *search_start,
258 struct rb_node *node,
259 struct rb_node ***p_in,
260 struct rb_node **parent_in)
263 struct rb_node *parent = NULL;
264 struct tree_entry *entry;
266 if (p_in && parent_in) {
272 p = search_start ? &search_start : &root->rb_node;
275 entry = rb_entry(parent, struct tree_entry, rb_node);
277 if (offset < entry->start)
279 else if (offset > entry->end)
286 rb_link_node(node, parent, p);
287 rb_insert_color(node, root);
291 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
292 struct rb_node **prev_ret,
293 struct rb_node **next_ret,
294 struct rb_node ***p_ret,
295 struct rb_node **parent_ret)
297 struct rb_root *root = &tree->state;
298 struct rb_node **n = &root->rb_node;
299 struct rb_node *prev = NULL;
300 struct rb_node *orig_prev = NULL;
301 struct tree_entry *entry;
302 struct tree_entry *prev_entry = NULL;
306 entry = rb_entry(prev, struct tree_entry, rb_node);
309 if (offset < entry->start)
311 else if (offset > entry->end)
324 while (prev && offset > prev_entry->end) {
325 prev = rb_next(prev);
326 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
333 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
334 while (prev && offset < prev_entry->start) {
335 prev = rb_prev(prev);
336 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
343 static inline struct rb_node *
344 tree_search_for_insert(struct extent_io_tree *tree,
346 struct rb_node ***p_ret,
347 struct rb_node **parent_ret)
349 struct rb_node *prev = NULL;
352 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
358 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
361 return tree_search_for_insert(tree, offset, NULL, NULL);
364 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
365 struct extent_state *other)
367 if (tree->ops && tree->ops->merge_extent_hook)
368 tree->ops->merge_extent_hook(tree->mapping->host, new,
373 * utility function to look for merge candidates inside a given range.
374 * Any extents with matching state are merged together into a single
375 * extent in the tree. Extents with EXTENT_IO in their state field
376 * are not merged because the end_io handlers need to be able to do
377 * operations on them without sleeping (or doing allocations/splits).
379 * This should be called with the tree lock held.
381 static void merge_state(struct extent_io_tree *tree,
382 struct extent_state *state)
384 struct extent_state *other;
385 struct rb_node *other_node;
387 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
390 other_node = rb_prev(&state->rb_node);
392 other = rb_entry(other_node, struct extent_state, rb_node);
393 if (other->end == state->start - 1 &&
394 other->state == state->state) {
395 merge_cb(tree, state, other);
396 state->start = other->start;
397 rb_erase(&other->rb_node, &tree->state);
398 RB_CLEAR_NODE(&other->rb_node);
399 free_extent_state(other);
402 other_node = rb_next(&state->rb_node);
404 other = rb_entry(other_node, struct extent_state, rb_node);
405 if (other->start == state->end + 1 &&
406 other->state == state->state) {
407 merge_cb(tree, state, other);
408 state->end = other->end;
409 rb_erase(&other->rb_node, &tree->state);
410 RB_CLEAR_NODE(&other->rb_node);
411 free_extent_state(other);
416 static void set_state_cb(struct extent_io_tree *tree,
417 struct extent_state *state, unsigned *bits)
419 if (tree->ops && tree->ops->set_bit_hook)
420 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
423 static void clear_state_cb(struct extent_io_tree *tree,
424 struct extent_state *state, unsigned *bits)
426 if (tree->ops && tree->ops->clear_bit_hook)
427 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
430 static void set_state_bits(struct extent_io_tree *tree,
431 struct extent_state *state, unsigned *bits,
432 struct extent_changeset *changeset);
435 * insert an extent_state struct into the tree. 'bits' are set on the
436 * struct before it is inserted.
438 * This may return -EEXIST if the extent is already there, in which case the
439 * state struct is freed.
441 * The tree lock is not taken internally. This is a utility function and
442 * probably isn't what you want to call (see set/clear_extent_bit).
444 static int insert_state(struct extent_io_tree *tree,
445 struct extent_state *state, u64 start, u64 end,
447 struct rb_node **parent,
448 unsigned *bits, struct extent_changeset *changeset)
450 struct rb_node *node;
453 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
455 state->start = start;
458 set_state_bits(tree, state, bits, changeset);
460 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
462 struct extent_state *found;
463 found = rb_entry(node, struct extent_state, rb_node);
464 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
466 found->start, found->end, start, end);
469 merge_state(tree, state);
473 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
476 if (tree->ops && tree->ops->split_extent_hook)
477 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
481 * split a given extent state struct in two, inserting the preallocated
482 * struct 'prealloc' as the newly created second half. 'split' indicates an
483 * offset inside 'orig' where it should be split.
486 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
487 * are two extent state structs in the tree:
488 * prealloc: [orig->start, split - 1]
489 * orig: [ split, orig->end ]
491 * The tree locks are not taken by this function. They need to be held
494 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
495 struct extent_state *prealloc, u64 split)
497 struct rb_node *node;
499 split_cb(tree, orig, split);
501 prealloc->start = orig->start;
502 prealloc->end = split - 1;
503 prealloc->state = orig->state;
506 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
507 &prealloc->rb_node, NULL, NULL);
509 free_extent_state(prealloc);
515 static struct extent_state *next_state(struct extent_state *state)
517 struct rb_node *next = rb_next(&state->rb_node);
519 return rb_entry(next, struct extent_state, rb_node);
525 * utility function to clear some bits in an extent state struct.
526 * it will optionally wake up any one waiting on this state (wake == 1).
528 * If no bits are set on the state struct after clearing things, the
529 * struct is freed and removed from the tree
531 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
532 struct extent_state *state,
533 unsigned *bits, int wake,
534 struct extent_changeset *changeset)
536 struct extent_state *next;
537 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
539 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
540 u64 range = state->end - state->start + 1;
541 WARN_ON(range > tree->dirty_bytes);
542 tree->dirty_bytes -= range;
544 clear_state_cb(tree, state, bits);
545 add_extent_changeset(state, bits_to_clear, changeset, 0);
546 state->state &= ~bits_to_clear;
549 if (state->state == 0) {
550 next = next_state(state);
551 if (extent_state_in_tree(state)) {
552 rb_erase(&state->rb_node, &tree->state);
553 RB_CLEAR_NODE(&state->rb_node);
554 free_extent_state(state);
559 merge_state(tree, state);
560 next = next_state(state);
565 static struct extent_state *
566 alloc_extent_state_atomic(struct extent_state *prealloc)
569 prealloc = alloc_extent_state(GFP_ATOMIC);
574 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
576 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
577 "Extent tree was modified by another "
578 "thread while locked.");
582 * clear some bits on a range in the tree. This may require splitting
583 * or inserting elements in the tree, so the gfp mask is used to
584 * indicate which allocations or sleeping are allowed.
586 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
587 * the given range from the tree regardless of state (ie for truncate).
589 * the range [start, end] is inclusive.
591 * This takes the tree lock, and returns 0 on success and < 0 on error.
593 static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
594 unsigned bits, int wake, int delete,
595 struct extent_state **cached_state,
596 gfp_t mask, struct extent_changeset *changeset)
598 struct extent_state *state;
599 struct extent_state *cached;
600 struct extent_state *prealloc = NULL;
601 struct rb_node *node;
606 btrfs_debug_check_extent_io_range(tree, start, end);
608 if (bits & EXTENT_DELALLOC)
609 bits |= EXTENT_NORESERVE;
612 bits |= ~EXTENT_CTLBITS;
613 bits |= EXTENT_FIRST_DELALLOC;
615 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
618 if (!prealloc && gfpflags_allow_blocking(mask)) {
620 * Don't care for allocation failure here because we might end
621 * up not needing the pre-allocated extent state at all, which
622 * is the case if we only have in the tree extent states that
623 * cover our input range and don't cover too any other range.
624 * If we end up needing a new extent state we allocate it later.
626 prealloc = alloc_extent_state(mask);
629 spin_lock(&tree->lock);
631 cached = *cached_state;
634 *cached_state = NULL;
638 if (cached && extent_state_in_tree(cached) &&
639 cached->start <= start && cached->end > start) {
641 atomic_dec(&cached->refs);
646 free_extent_state(cached);
649 * this search will find the extents that end after
652 node = tree_search(tree, start);
655 state = rb_entry(node, struct extent_state, rb_node);
657 if (state->start > end)
659 WARN_ON(state->end < start);
660 last_end = state->end;
662 /* the state doesn't have the wanted bits, go ahead */
663 if (!(state->state & bits)) {
664 state = next_state(state);
669 * | ---- desired range ---- |
671 * | ------------- state -------------- |
673 * We need to split the extent we found, and may flip
674 * bits on second half.
676 * If the extent we found extends past our range, we
677 * just split and search again. It'll get split again
678 * the next time though.
680 * If the extent we found is inside our range, we clear
681 * the desired bit on it.
684 if (state->start < start) {
685 prealloc = alloc_extent_state_atomic(prealloc);
687 err = split_state(tree, state, prealloc, start);
689 extent_io_tree_panic(tree, err);
694 if (state->end <= end) {
695 state = clear_state_bit(tree, state, &bits, wake,
702 * | ---- desired range ---- |
704 * We need to split the extent, and clear the bit
707 if (state->start <= end && state->end > end) {
708 prealloc = alloc_extent_state_atomic(prealloc);
710 err = split_state(tree, state, prealloc, end + 1);
712 extent_io_tree_panic(tree, err);
717 clear_state_bit(tree, prealloc, &bits, wake, changeset);
723 state = clear_state_bit(tree, state, &bits, wake, changeset);
725 if (last_end == (u64)-1)
727 start = last_end + 1;
728 if (start <= end && state && !need_resched())
734 spin_unlock(&tree->lock);
735 if (gfpflags_allow_blocking(mask))
740 spin_unlock(&tree->lock);
742 free_extent_state(prealloc);
748 static void wait_on_state(struct extent_io_tree *tree,
749 struct extent_state *state)
750 __releases(tree->lock)
751 __acquires(tree->lock)
754 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
755 spin_unlock(&tree->lock);
757 spin_lock(&tree->lock);
758 finish_wait(&state->wq, &wait);
762 * waits for one or more bits to clear on a range in the state tree.
763 * The range [start, end] is inclusive.
764 * The tree lock is taken by this function
766 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
769 struct extent_state *state;
770 struct rb_node *node;
772 btrfs_debug_check_extent_io_range(tree, start, end);
774 spin_lock(&tree->lock);
778 * this search will find all the extents that end after
781 node = tree_search(tree, start);
786 state = rb_entry(node, struct extent_state, rb_node);
788 if (state->start > end)
791 if (state->state & bits) {
792 start = state->start;
793 atomic_inc(&state->refs);
794 wait_on_state(tree, state);
795 free_extent_state(state);
798 start = state->end + 1;
803 if (!cond_resched_lock(&tree->lock)) {
804 node = rb_next(node);
809 spin_unlock(&tree->lock);
812 static void set_state_bits(struct extent_io_tree *tree,
813 struct extent_state *state,
814 unsigned *bits, struct extent_changeset *changeset)
816 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
818 set_state_cb(tree, state, bits);
819 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
820 u64 range = state->end - state->start + 1;
821 tree->dirty_bytes += range;
823 add_extent_changeset(state, bits_to_set, changeset, 1);
824 state->state |= bits_to_set;
827 static void cache_state_if_flags(struct extent_state *state,
828 struct extent_state **cached_ptr,
831 if (cached_ptr && !(*cached_ptr)) {
832 if (!flags || (state->state & flags)) {
834 atomic_inc(&state->refs);
839 static void cache_state(struct extent_state *state,
840 struct extent_state **cached_ptr)
842 return cache_state_if_flags(state, cached_ptr,
843 EXTENT_IOBITS | EXTENT_BOUNDARY);
847 * set some bits on a range in the tree. This may require allocations or
848 * sleeping, so the gfp mask is used to indicate what is allowed.
850 * If any of the exclusive bits are set, this will fail with -EEXIST if some
851 * part of the range already has the desired bits set. The start of the
852 * existing range is returned in failed_start in this case.
854 * [start, end] is inclusive This takes the tree lock.
857 static int __must_check
858 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
859 unsigned bits, unsigned exclusive_bits,
860 u64 *failed_start, struct extent_state **cached_state,
861 gfp_t mask, struct extent_changeset *changeset)
863 struct extent_state *state;
864 struct extent_state *prealloc = NULL;
865 struct rb_node *node;
867 struct rb_node *parent;
872 btrfs_debug_check_extent_io_range(tree, start, end);
874 bits |= EXTENT_FIRST_DELALLOC;
876 if (!prealloc && gfpflags_allow_blocking(mask)) {
878 * Don't care for allocation failure here because we might end
879 * up not needing the pre-allocated extent state at all, which
880 * is the case if we only have in the tree extent states that
881 * cover our input range and don't cover too any other range.
882 * If we end up needing a new extent state we allocate it later.
884 prealloc = alloc_extent_state(mask);
887 spin_lock(&tree->lock);
888 if (cached_state && *cached_state) {
889 state = *cached_state;
890 if (state->start <= start && state->end > start &&
891 extent_state_in_tree(state)) {
892 node = &state->rb_node;
897 * this search will find all the extents that end after
900 node = tree_search_for_insert(tree, start, &p, &parent);
902 prealloc = alloc_extent_state_atomic(prealloc);
904 err = insert_state(tree, prealloc, start, end,
905 &p, &parent, &bits, changeset);
907 extent_io_tree_panic(tree, err);
909 cache_state(prealloc, cached_state);
913 state = rb_entry(node, struct extent_state, rb_node);
915 last_start = state->start;
916 last_end = state->end;
919 * | ---- desired range ---- |
922 * Just lock what we found and keep going
924 if (state->start == start && state->end <= end) {
925 if (state->state & exclusive_bits) {
926 *failed_start = state->start;
931 set_state_bits(tree, state, &bits, changeset);
932 cache_state(state, cached_state);
933 merge_state(tree, state);
934 if (last_end == (u64)-1)
936 start = last_end + 1;
937 state = next_state(state);
938 if (start < end && state && state->start == start &&
945 * | ---- desired range ---- |
948 * | ------------- state -------------- |
950 * We need to split the extent we found, and may flip bits on
953 * If the extent we found extends past our
954 * range, we just split and search again. It'll get split
955 * again the next time though.
957 * If the extent we found is inside our range, we set the
960 if (state->start < start) {
961 if (state->state & exclusive_bits) {
962 *failed_start = start;
967 prealloc = alloc_extent_state_atomic(prealloc);
969 err = split_state(tree, state, prealloc, start);
971 extent_io_tree_panic(tree, err);
976 if (state->end <= end) {
977 set_state_bits(tree, state, &bits, changeset);
978 cache_state(state, cached_state);
979 merge_state(tree, state);
980 if (last_end == (u64)-1)
982 start = last_end + 1;
983 state = next_state(state);
984 if (start < end && state && state->start == start &&
991 * | ---- desired range ---- |
992 * | state | or | state |
994 * There's a hole, we need to insert something in it and
995 * ignore the extent we found.
997 if (state->start > start) {
999 if (end < last_start)
1002 this_end = last_start - 1;
1004 prealloc = alloc_extent_state_atomic(prealloc);
1008 * Avoid to free 'prealloc' if it can be merged with
1011 err = insert_state(tree, prealloc, start, this_end,
1012 NULL, NULL, &bits, changeset);
1014 extent_io_tree_panic(tree, err);
1016 cache_state(prealloc, cached_state);
1018 start = this_end + 1;
1022 * | ---- desired range ---- |
1024 * We need to split the extent, and set the bit
1027 if (state->start <= end && state->end > end) {
1028 if (state->state & exclusive_bits) {
1029 *failed_start = start;
1034 prealloc = alloc_extent_state_atomic(prealloc);
1036 err = split_state(tree, state, prealloc, end + 1);
1038 extent_io_tree_panic(tree, err);
1040 set_state_bits(tree, prealloc, &bits, changeset);
1041 cache_state(prealloc, cached_state);
1042 merge_state(tree, prealloc);
1050 spin_unlock(&tree->lock);
1051 if (gfpflags_allow_blocking(mask))
1056 spin_unlock(&tree->lock);
1058 free_extent_state(prealloc);
1064 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1065 unsigned bits, u64 * failed_start,
1066 struct extent_state **cached_state, gfp_t mask)
1068 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1069 cached_state, mask, NULL);
1074 * convert_extent_bit - convert all bits in a given range from one bit to
1076 * @tree: the io tree to search
1077 * @start: the start offset in bytes
1078 * @end: the end offset in bytes (inclusive)
1079 * @bits: the bits to set in this range
1080 * @clear_bits: the bits to clear in this range
1081 * @cached_state: state that we're going to cache
1083 * This will go through and set bits for the given range. If any states exist
1084 * already in this range they are set with the given bit and cleared of the
1085 * clear_bits. This is only meant to be used by things that are mergeable, ie
1086 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1087 * boundary bits like LOCK.
1089 * All allocations are done with GFP_NOFS.
1091 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1092 unsigned bits, unsigned clear_bits,
1093 struct extent_state **cached_state)
1095 struct extent_state *state;
1096 struct extent_state *prealloc = NULL;
1097 struct rb_node *node;
1099 struct rb_node *parent;
1103 bool first_iteration = true;
1105 btrfs_debug_check_extent_io_range(tree, start, end);
1110 * Best effort, don't worry if extent state allocation fails
1111 * here for the first iteration. We might have a cached state
1112 * that matches exactly the target range, in which case no
1113 * extent state allocations are needed. We'll only know this
1114 * after locking the tree.
1116 prealloc = alloc_extent_state(GFP_NOFS);
1117 if (!prealloc && !first_iteration)
1121 spin_lock(&tree->lock);
1122 if (cached_state && *cached_state) {
1123 state = *cached_state;
1124 if (state->start <= start && state->end > start &&
1125 extent_state_in_tree(state)) {
1126 node = &state->rb_node;
1132 * this search will find all the extents that end after
1135 node = tree_search_for_insert(tree, start, &p, &parent);
1137 prealloc = alloc_extent_state_atomic(prealloc);
1142 err = insert_state(tree, prealloc, start, end,
1143 &p, &parent, &bits, NULL);
1145 extent_io_tree_panic(tree, err);
1146 cache_state(prealloc, cached_state);
1150 state = rb_entry(node, struct extent_state, rb_node);
1152 last_start = state->start;
1153 last_end = state->end;
1156 * | ---- desired range ---- |
1159 * Just lock what we found and keep going
1161 if (state->start == start && state->end <= end) {
1162 set_state_bits(tree, state, &bits, NULL);
1163 cache_state(state, cached_state);
1164 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1165 if (last_end == (u64)-1)
1167 start = last_end + 1;
1168 if (start < end && state && state->start == start &&
1175 * | ---- desired range ---- |
1178 * | ------------- state -------------- |
1180 * We need to split the extent we found, and may flip bits on
1183 * If the extent we found extends past our
1184 * range, we just split and search again. It'll get split
1185 * again the next time though.
1187 * If the extent we found is inside our range, we set the
1188 * desired bit on it.
1190 if (state->start < start) {
1191 prealloc = alloc_extent_state_atomic(prealloc);
1196 err = split_state(tree, state, prealloc, start);
1198 extent_io_tree_panic(tree, err);
1202 if (state->end <= end) {
1203 set_state_bits(tree, state, &bits, NULL);
1204 cache_state(state, cached_state);
1205 state = clear_state_bit(tree, state, &clear_bits, 0,
1207 if (last_end == (u64)-1)
1209 start = last_end + 1;
1210 if (start < end && state && state->start == start &&
1217 * | ---- desired range ---- |
1218 * | state | or | state |
1220 * There's a hole, we need to insert something in it and
1221 * ignore the extent we found.
1223 if (state->start > start) {
1225 if (end < last_start)
1228 this_end = last_start - 1;
1230 prealloc = alloc_extent_state_atomic(prealloc);
1237 * Avoid to free 'prealloc' if it can be merged with
1240 err = insert_state(tree, prealloc, start, this_end,
1241 NULL, NULL, &bits, NULL);
1243 extent_io_tree_panic(tree, err);
1244 cache_state(prealloc, cached_state);
1246 start = this_end + 1;
1250 * | ---- desired range ---- |
1252 * We need to split the extent, and set the bit
1255 if (state->start <= end && state->end > end) {
1256 prealloc = alloc_extent_state_atomic(prealloc);
1262 err = split_state(tree, state, prealloc, end + 1);
1264 extent_io_tree_panic(tree, err);
1266 set_state_bits(tree, prealloc, &bits, NULL);
1267 cache_state(prealloc, cached_state);
1268 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1276 spin_unlock(&tree->lock);
1278 first_iteration = false;
1282 spin_unlock(&tree->lock);
1284 free_extent_state(prealloc);
1289 /* wrappers around set/clear extent bit */
1290 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1291 unsigned bits, struct extent_changeset *changeset)
1294 * We don't support EXTENT_LOCKED yet, as current changeset will
1295 * record any bits changed, so for EXTENT_LOCKED case, it will
1296 * either fail with -EEXIST or changeset will record the whole
1299 BUG_ON(bits & EXTENT_LOCKED);
1301 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1305 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1306 unsigned bits, int wake, int delete,
1307 struct extent_state **cached, gfp_t mask)
1309 return __clear_extent_bit(tree, start, end, bits, wake, delete,
1310 cached, mask, NULL);
1313 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1314 unsigned bits, struct extent_changeset *changeset)
1317 * Don't support EXTENT_LOCKED case, same reason as
1318 * set_record_extent_bits().
1320 BUG_ON(bits & EXTENT_LOCKED);
1322 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1327 * either insert or lock state struct between start and end use mask to tell
1328 * us if waiting is desired.
1330 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1331 struct extent_state **cached_state)
1337 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1338 EXTENT_LOCKED, &failed_start,
1339 cached_state, GFP_NOFS, NULL);
1340 if (err == -EEXIST) {
1341 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1342 start = failed_start;
1345 WARN_ON(start > end);
1350 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1355 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1356 &failed_start, NULL, GFP_NOFS, NULL);
1357 if (err == -EEXIST) {
1358 if (failed_start > start)
1359 clear_extent_bit(tree, start, failed_start - 1,
1360 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1366 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1368 unsigned long index = start >> PAGE_SHIFT;
1369 unsigned long end_index = end >> PAGE_SHIFT;
1372 while (index <= end_index) {
1373 page = find_get_page(inode->i_mapping, index);
1374 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1375 clear_page_dirty_for_io(page);
1381 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1383 unsigned long index = start >> PAGE_SHIFT;
1384 unsigned long end_index = end >> PAGE_SHIFT;
1387 while (index <= end_index) {
1388 page = find_get_page(inode->i_mapping, index);
1389 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1390 __set_page_dirty_nobuffers(page);
1391 account_page_redirty(page);
1398 * helper function to set both pages and extents in the tree writeback
1400 static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1402 unsigned long index = start >> PAGE_SHIFT;
1403 unsigned long end_index = end >> PAGE_SHIFT;
1406 while (index <= end_index) {
1407 page = find_get_page(tree->mapping, index);
1408 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1409 set_page_writeback(page);
1415 /* find the first state struct with 'bits' set after 'start', and
1416 * return it. tree->lock must be held. NULL will returned if
1417 * nothing was found after 'start'
1419 static struct extent_state *
1420 find_first_extent_bit_state(struct extent_io_tree *tree,
1421 u64 start, unsigned bits)
1423 struct rb_node *node;
1424 struct extent_state *state;
1427 * this search will find all the extents that end after
1430 node = tree_search(tree, start);
1435 state = rb_entry(node, struct extent_state, rb_node);
1436 if (state->end >= start && (state->state & bits))
1439 node = rb_next(node);
1448 * find the first offset in the io tree with 'bits' set. zero is
1449 * returned if we find something, and *start_ret and *end_ret are
1450 * set to reflect the state struct that was found.
1452 * If nothing was found, 1 is returned. If found something, return 0.
1454 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1455 u64 *start_ret, u64 *end_ret, unsigned bits,
1456 struct extent_state **cached_state)
1458 struct extent_state *state;
1462 spin_lock(&tree->lock);
1463 if (cached_state && *cached_state) {
1464 state = *cached_state;
1465 if (state->end == start - 1 && extent_state_in_tree(state)) {
1466 n = rb_next(&state->rb_node);
1468 state = rb_entry(n, struct extent_state,
1470 if (state->state & bits)
1474 free_extent_state(*cached_state);
1475 *cached_state = NULL;
1478 free_extent_state(*cached_state);
1479 *cached_state = NULL;
1482 state = find_first_extent_bit_state(tree, start, bits);
1485 cache_state_if_flags(state, cached_state, 0);
1486 *start_ret = state->start;
1487 *end_ret = state->end;
1491 spin_unlock(&tree->lock);
1496 * find a contiguous range of bytes in the file marked as delalloc, not
1497 * more than 'max_bytes'. start and end are used to return the range,
1499 * 1 is returned if we find something, 0 if nothing was in the tree
1501 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1502 u64 *start, u64 *end, u64 max_bytes,
1503 struct extent_state **cached_state)
1505 struct rb_node *node;
1506 struct extent_state *state;
1507 u64 cur_start = *start;
1509 u64 total_bytes = 0;
1511 spin_lock(&tree->lock);
1514 * this search will find all the extents that end after
1517 node = tree_search(tree, cur_start);
1525 state = rb_entry(node, struct extent_state, rb_node);
1526 if (found && (state->start != cur_start ||
1527 (state->state & EXTENT_BOUNDARY))) {
1530 if (!(state->state & EXTENT_DELALLOC)) {
1536 *start = state->start;
1537 *cached_state = state;
1538 atomic_inc(&state->refs);
1542 cur_start = state->end + 1;
1543 node = rb_next(node);
1544 total_bytes += state->end - state->start + 1;
1545 if (total_bytes >= max_bytes)
1551 spin_unlock(&tree->lock);
1555 static noinline void __unlock_for_delalloc(struct inode *inode,
1556 struct page *locked_page,
1560 struct page *pages[16];
1561 unsigned long index = start >> PAGE_SHIFT;
1562 unsigned long end_index = end >> PAGE_SHIFT;
1563 unsigned long nr_pages = end_index - index + 1;
1566 if (index == locked_page->index && end_index == index)
1569 while (nr_pages > 0) {
1570 ret = find_get_pages_contig(inode->i_mapping, index,
1571 min_t(unsigned long, nr_pages,
1572 ARRAY_SIZE(pages)), pages);
1573 for (i = 0; i < ret; i++) {
1574 if (pages[i] != locked_page)
1575 unlock_page(pages[i]);
1584 static noinline int lock_delalloc_pages(struct inode *inode,
1585 struct page *locked_page,
1589 unsigned long index = delalloc_start >> PAGE_SHIFT;
1590 unsigned long start_index = index;
1591 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1592 unsigned long pages_locked = 0;
1593 struct page *pages[16];
1594 unsigned long nrpages;
1598 /* the caller is responsible for locking the start index */
1599 if (index == locked_page->index && index == end_index)
1602 /* skip the page at the start index */
1603 nrpages = end_index - index + 1;
1604 while (nrpages > 0) {
1605 ret = find_get_pages_contig(inode->i_mapping, index,
1606 min_t(unsigned long,
1607 nrpages, ARRAY_SIZE(pages)), pages);
1612 /* now we have an array of pages, lock them all */
1613 for (i = 0; i < ret; i++) {
1615 * the caller is taking responsibility for
1618 if (pages[i] != locked_page) {
1619 lock_page(pages[i]);
1620 if (!PageDirty(pages[i]) ||
1621 pages[i]->mapping != inode->i_mapping) {
1623 unlock_page(pages[i]);
1637 if (ret && pages_locked) {
1638 __unlock_for_delalloc(inode, locked_page,
1640 ((u64)(start_index + pages_locked - 1)) <<
1647 * find a contiguous range of bytes in the file marked as delalloc, not
1648 * more than 'max_bytes'. start and end are used to return the range,
1650 * 1 is returned if we find something, 0 if nothing was in the tree
1652 STATIC u64 find_lock_delalloc_range(struct inode *inode,
1653 struct extent_io_tree *tree,
1654 struct page *locked_page, u64 *start,
1655 u64 *end, u64 max_bytes)
1660 struct extent_state *cached_state = NULL;
1665 /* step one, find a bunch of delalloc bytes starting at start */
1666 delalloc_start = *start;
1668 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1669 max_bytes, &cached_state);
1670 if (!found || delalloc_end <= *start) {
1671 *start = delalloc_start;
1672 *end = delalloc_end;
1673 free_extent_state(cached_state);
1678 * start comes from the offset of locked_page. We have to lock
1679 * pages in order, so we can't process delalloc bytes before
1682 if (delalloc_start < *start)
1683 delalloc_start = *start;
1686 * make sure to limit the number of pages we try to lock down
1688 if (delalloc_end + 1 - delalloc_start > max_bytes)
1689 delalloc_end = delalloc_start + max_bytes - 1;
1691 /* step two, lock all the pages after the page that has start */
1692 ret = lock_delalloc_pages(inode, locked_page,
1693 delalloc_start, delalloc_end);
1694 if (ret == -EAGAIN) {
1695 /* some of the pages are gone, lets avoid looping by
1696 * shortening the size of the delalloc range we're searching
1698 free_extent_state(cached_state);
1699 cached_state = NULL;
1701 max_bytes = PAGE_SIZE;
1709 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1711 /* step three, lock the state bits for the whole range */
1712 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1714 /* then test to make sure it is all still delalloc */
1715 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1716 EXTENT_DELALLOC, 1, cached_state);
1718 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1719 &cached_state, GFP_NOFS);
1720 __unlock_for_delalloc(inode, locked_page,
1721 delalloc_start, delalloc_end);
1725 free_extent_state(cached_state);
1726 *start = delalloc_start;
1727 *end = delalloc_end;
1732 void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1733 struct page *locked_page,
1734 unsigned clear_bits,
1735 unsigned long page_ops)
1737 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1739 struct page *pages[16];
1740 unsigned long index = start >> PAGE_SHIFT;
1741 unsigned long end_index = end >> PAGE_SHIFT;
1742 unsigned long nr_pages = end_index - index + 1;
1745 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1749 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1750 mapping_set_error(inode->i_mapping, -EIO);
1752 while (nr_pages > 0) {
1753 ret = find_get_pages_contig(inode->i_mapping, index,
1754 min_t(unsigned long,
1755 nr_pages, ARRAY_SIZE(pages)), pages);
1756 for (i = 0; i < ret; i++) {
1758 if (page_ops & PAGE_SET_PRIVATE2)
1759 SetPagePrivate2(pages[i]);
1761 if (pages[i] == locked_page) {
1765 if (page_ops & PAGE_CLEAR_DIRTY)
1766 clear_page_dirty_for_io(pages[i]);
1767 if (page_ops & PAGE_SET_WRITEBACK)
1768 set_page_writeback(pages[i]);
1769 if (page_ops & PAGE_SET_ERROR)
1770 SetPageError(pages[i]);
1771 if (page_ops & PAGE_END_WRITEBACK)
1772 end_page_writeback(pages[i]);
1773 if (page_ops & PAGE_UNLOCK)
1774 unlock_page(pages[i]);
1784 * count the number of bytes in the tree that have a given bit(s)
1785 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1786 * cached. The total number found is returned.
1788 u64 count_range_bits(struct extent_io_tree *tree,
1789 u64 *start, u64 search_end, u64 max_bytes,
1790 unsigned bits, int contig)
1792 struct rb_node *node;
1793 struct extent_state *state;
1794 u64 cur_start = *start;
1795 u64 total_bytes = 0;
1799 if (WARN_ON(search_end <= cur_start))
1802 spin_lock(&tree->lock);
1803 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1804 total_bytes = tree->dirty_bytes;
1808 * this search will find all the extents that end after
1811 node = tree_search(tree, cur_start);
1816 state = rb_entry(node, struct extent_state, rb_node);
1817 if (state->start > search_end)
1819 if (contig && found && state->start > last + 1)
1821 if (state->end >= cur_start && (state->state & bits) == bits) {
1822 total_bytes += min(search_end, state->end) + 1 -
1823 max(cur_start, state->start);
1824 if (total_bytes >= max_bytes)
1827 *start = max(cur_start, state->start);
1831 } else if (contig && found) {
1834 node = rb_next(node);
1839 spin_unlock(&tree->lock);
1844 * set the private field for a given byte offset in the tree. If there isn't
1845 * an extent_state there already, this does nothing.
1847 static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
1848 struct io_failure_record *failrec)
1850 struct rb_node *node;
1851 struct extent_state *state;
1854 spin_lock(&tree->lock);
1856 * this search will find all the extents that end after
1859 node = tree_search(tree, start);
1864 state = rb_entry(node, struct extent_state, rb_node);
1865 if (state->start != start) {
1869 state->failrec = failrec;
1871 spin_unlock(&tree->lock);
1875 static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
1876 struct io_failure_record **failrec)
1878 struct rb_node *node;
1879 struct extent_state *state;
1882 spin_lock(&tree->lock);
1884 * this search will find all the extents that end after
1887 node = tree_search(tree, start);
1892 state = rb_entry(node, struct extent_state, rb_node);
1893 if (state->start != start) {
1897 *failrec = state->failrec;
1899 spin_unlock(&tree->lock);
1904 * searches a range in the state tree for a given mask.
1905 * If 'filled' == 1, this returns 1 only if every extent in the tree
1906 * has the bits set. Otherwise, 1 is returned if any bit in the
1907 * range is found set.
1909 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1910 unsigned bits, int filled, struct extent_state *cached)
1912 struct extent_state *state = NULL;
1913 struct rb_node *node;
1916 spin_lock(&tree->lock);
1917 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
1918 cached->end > start)
1919 node = &cached->rb_node;
1921 node = tree_search(tree, start);
1922 while (node && start <= end) {
1923 state = rb_entry(node, struct extent_state, rb_node);
1925 if (filled && state->start > start) {
1930 if (state->start > end)
1933 if (state->state & bits) {
1937 } else if (filled) {
1942 if (state->end == (u64)-1)
1945 start = state->end + 1;
1948 node = rb_next(node);
1955 spin_unlock(&tree->lock);
1960 * helper function to set a given page up to date if all the
1961 * extents in the tree for that page are up to date
1963 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1965 u64 start = page_offset(page);
1966 u64 end = start + PAGE_SIZE - 1;
1967 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1968 SetPageUptodate(page);
1971 int free_io_failure(struct inode *inode, struct io_failure_record *rec)
1975 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1977 set_state_failrec(failure_tree, rec->start, NULL);
1978 ret = clear_extent_bits(failure_tree, rec->start,
1979 rec->start + rec->len - 1,
1980 EXTENT_LOCKED | EXTENT_DIRTY);
1984 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1985 rec->start + rec->len - 1,
1995 * this bypasses the standard btrfs submit functions deliberately, as
1996 * the standard behavior is to write all copies in a raid setup. here we only
1997 * want to write the one bad copy. so we do the mapping for ourselves and issue
1998 * submit_bio directly.
1999 * to avoid any synchronization issues, wait for the data after writing, which
2000 * actually prevents the read that triggered the error from finishing.
2001 * currently, there can be no more than two copies of every data bit. thus,
2002 * exactly one rewrite is required.
2004 int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2005 struct page *page, unsigned int pg_offset, int mirror_num)
2007 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2009 struct btrfs_device *dev;
2012 struct btrfs_bio *bbio = NULL;
2013 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2016 ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2017 BUG_ON(!mirror_num);
2019 /* we can't repair anything in raid56 yet */
2020 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2023 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2026 bio->bi_iter.bi_size = 0;
2027 map_length = length;
2030 * Avoid races with device replace and make sure our bbio has devices
2031 * associated to its stripes that don't go away while we are doing the
2032 * read repair operation.
2034 btrfs_bio_counter_inc_blocked(fs_info);
2035 ret = btrfs_map_block(fs_info, WRITE, logical,
2036 &map_length, &bbio, mirror_num);
2038 btrfs_bio_counter_dec(fs_info);
2042 BUG_ON(mirror_num != bbio->mirror_num);
2043 sector = bbio->stripes[mirror_num-1].physical >> 9;
2044 bio->bi_iter.bi_sector = sector;
2045 dev = bbio->stripes[mirror_num-1].dev;
2046 btrfs_put_bbio(bbio);
2047 if (!dev || !dev->bdev || !dev->writeable) {
2048 btrfs_bio_counter_dec(fs_info);
2052 bio->bi_bdev = dev->bdev;
2053 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
2054 bio_add_page(bio, page, length, pg_offset);
2056 if (btrfsic_submit_bio_wait(bio)) {
2057 /* try to remap that extent elsewhere? */
2058 btrfs_bio_counter_dec(fs_info);
2060 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2064 btrfs_info_rl_in_rcu(fs_info,
2065 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2066 btrfs_ino(inode), start,
2067 rcu_str_deref(dev->name), sector);
2068 btrfs_bio_counter_dec(fs_info);
2073 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2076 u64 start = eb->start;
2077 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2080 if (root->fs_info->sb->s_flags & MS_RDONLY)
2083 for (i = 0; i < num_pages; i++) {
2084 struct page *p = eb->pages[i];
2086 ret = repair_io_failure(root->fs_info->btree_inode, start,
2087 PAGE_SIZE, start, p,
2088 start - page_offset(p), mirror_num);
2098 * each time an IO finishes, we do a fast check in the IO failure tree
2099 * to see if we need to process or clean up an io_failure_record
2101 int clean_io_failure(struct inode *inode, u64 start, struct page *page,
2102 unsigned int pg_offset)
2105 struct io_failure_record *failrec;
2106 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2107 struct extent_state *state;
2112 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2113 (u64)-1, 1, EXTENT_DIRTY, 0);
2117 ret = get_state_failrec(&BTRFS_I(inode)->io_failure_tree, start,
2122 BUG_ON(!failrec->this_mirror);
2124 if (failrec->in_validation) {
2125 /* there was no real error, just free the record */
2126 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2130 if (fs_info->sb->s_flags & MS_RDONLY)
2133 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2134 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2137 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2139 if (state && state->start <= failrec->start &&
2140 state->end >= failrec->start + failrec->len - 1) {
2141 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2143 if (num_copies > 1) {
2144 repair_io_failure(inode, start, failrec->len,
2145 failrec->logical, page,
2146 pg_offset, failrec->failed_mirror);
2151 free_io_failure(inode, failrec);
2157 * Can be called when
2158 * - hold extent lock
2159 * - under ordered extent
2160 * - the inode is freeing
2162 void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
2164 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2165 struct io_failure_record *failrec;
2166 struct extent_state *state, *next;
2168 if (RB_EMPTY_ROOT(&failure_tree->state))
2171 spin_lock(&failure_tree->lock);
2172 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2174 if (state->start > end)
2177 ASSERT(state->end <= end);
2179 next = next_state(state);
2181 failrec = state->failrec;
2182 free_extent_state(state);
2187 spin_unlock(&failure_tree->lock);
2190 int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2191 struct io_failure_record **failrec_ret)
2193 struct io_failure_record *failrec;
2194 struct extent_map *em;
2195 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2196 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2197 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2201 ret = get_state_failrec(failure_tree, start, &failrec);
2203 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2207 failrec->start = start;
2208 failrec->len = end - start + 1;
2209 failrec->this_mirror = 0;
2210 failrec->bio_flags = 0;
2211 failrec->in_validation = 0;
2213 read_lock(&em_tree->lock);
2214 em = lookup_extent_mapping(em_tree, start, failrec->len);
2216 read_unlock(&em_tree->lock);
2221 if (em->start > start || em->start + em->len <= start) {
2222 free_extent_map(em);
2225 read_unlock(&em_tree->lock);
2231 logical = start - em->start;
2232 logical = em->block_start + logical;
2233 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2234 logical = em->block_start;
2235 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2236 extent_set_compress_type(&failrec->bio_flags,
2240 pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
2241 logical, start, failrec->len);
2243 failrec->logical = logical;
2244 free_extent_map(em);
2246 /* set the bits in the private failure tree */
2247 ret = set_extent_bits(failure_tree, start, end,
2248 EXTENT_LOCKED | EXTENT_DIRTY);
2250 ret = set_state_failrec(failure_tree, start, failrec);
2251 /* set the bits in the inode's tree */
2253 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2259 pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
2260 failrec->logical, failrec->start, failrec->len,
2261 failrec->in_validation);
2263 * when data can be on disk more than twice, add to failrec here
2264 * (e.g. with a list for failed_mirror) to make
2265 * clean_io_failure() clean all those errors at once.
2269 *failrec_ret = failrec;
2274 int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2275 struct io_failure_record *failrec, int failed_mirror)
2279 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2280 failrec->logical, failrec->len);
2281 if (num_copies == 1) {
2283 * we only have a single copy of the data, so don't bother with
2284 * all the retry and error correction code that follows. no
2285 * matter what the error is, it is very likely to persist.
2287 pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2288 num_copies, failrec->this_mirror, failed_mirror);
2293 * there are two premises:
2294 * a) deliver good data to the caller
2295 * b) correct the bad sectors on disk
2297 if (failed_bio->bi_vcnt > 1) {
2299 * to fulfill b), we need to know the exact failing sectors, as
2300 * we don't want to rewrite any more than the failed ones. thus,
2301 * we need separate read requests for the failed bio
2303 * if the following BUG_ON triggers, our validation request got
2304 * merged. we need separate requests for our algorithm to work.
2306 BUG_ON(failrec->in_validation);
2307 failrec->in_validation = 1;
2308 failrec->this_mirror = failed_mirror;
2311 * we're ready to fulfill a) and b) alongside. get a good copy
2312 * of the failed sector and if we succeed, we have setup
2313 * everything for repair_io_failure to do the rest for us.
2315 if (failrec->in_validation) {
2316 BUG_ON(failrec->this_mirror != failed_mirror);
2317 failrec->in_validation = 0;
2318 failrec->this_mirror = 0;
2320 failrec->failed_mirror = failed_mirror;
2321 failrec->this_mirror++;
2322 if (failrec->this_mirror == failed_mirror)
2323 failrec->this_mirror++;
2326 if (failrec->this_mirror > num_copies) {
2327 pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2328 num_copies, failrec->this_mirror, failed_mirror);
2336 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2337 struct io_failure_record *failrec,
2338 struct page *page, int pg_offset, int icsum,
2339 bio_end_io_t *endio_func, void *data)
2342 struct btrfs_io_bio *btrfs_failed_bio;
2343 struct btrfs_io_bio *btrfs_bio;
2345 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2349 bio->bi_end_io = endio_func;
2350 bio->bi_iter.bi_sector = failrec->logical >> 9;
2351 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2352 bio->bi_iter.bi_size = 0;
2353 bio->bi_private = data;
2355 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2356 if (btrfs_failed_bio->csum) {
2357 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2358 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2360 btrfs_bio = btrfs_io_bio(bio);
2361 btrfs_bio->csum = btrfs_bio->csum_inline;
2363 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2367 bio_add_page(bio, page, failrec->len, pg_offset);
2373 * this is a generic handler for readpage errors (default
2374 * readpage_io_failed_hook). if other copies exist, read those and write back
2375 * good data to the failed position. does not investigate in remapping the
2376 * failed extent elsewhere, hoping the device will be smart enough to do this as
2380 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2381 struct page *page, u64 start, u64 end,
2384 struct io_failure_record *failrec;
2385 struct inode *inode = page->mapping->host;
2386 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2391 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2393 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2397 ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
2399 free_io_failure(inode, failrec);
2403 if (failed_bio->bi_vcnt > 1)
2404 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2406 read_mode = READ_SYNC;
2408 phy_offset >>= inode->i_sb->s_blocksize_bits;
2409 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2410 start - page_offset(page),
2411 (int)phy_offset, failed_bio->bi_end_io,
2414 free_io_failure(inode, failrec);
2417 bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
2419 pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
2420 read_mode, failrec->this_mirror, failrec->in_validation);
2422 ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
2423 failrec->bio_flags, 0);
2425 free_io_failure(inode, failrec);
2432 /* lots and lots of room for performance fixes in the end_bio funcs */
2434 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2436 int uptodate = (err == 0);
2437 struct extent_io_tree *tree;
2440 tree = &BTRFS_I(page->mapping->host)->io_tree;
2442 if (tree->ops && tree->ops->writepage_end_io_hook) {
2443 ret = tree->ops->writepage_end_io_hook(page, start,
2444 end, NULL, uptodate);
2450 ClearPageUptodate(page);
2452 ret = ret < 0 ? ret : -EIO;
2453 mapping_set_error(page->mapping, ret);
2458 * after a writepage IO is done, we need to:
2459 * clear the uptodate bits on error
2460 * clear the writeback bits in the extent tree for this IO
2461 * end_page_writeback if the page has no more pending IO
2463 * Scheduling is not allowed, so the extent state tree is expected
2464 * to have one and only one object corresponding to this IO.
2466 static void end_bio_extent_writepage(struct bio *bio)
2468 struct bio_vec *bvec;
2473 bio_for_each_segment_all(bvec, bio, i) {
2474 struct page *page = bvec->bv_page;
2476 /* We always issue full-page reads, but if some block
2477 * in a page fails to read, blk_update_request() will
2478 * advance bv_offset and adjust bv_len to compensate.
2479 * Print a warning for nonzero offsets, and an error
2480 * if they don't add up to a full page. */
2481 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2482 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2483 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2484 "partial page write in btrfs with offset %u and length %u",
2485 bvec->bv_offset, bvec->bv_len);
2487 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2488 "incomplete page write in btrfs with offset %u and "
2490 bvec->bv_offset, bvec->bv_len);
2493 start = page_offset(page);
2494 end = start + bvec->bv_offset + bvec->bv_len - 1;
2496 end_extent_writepage(page, bio->bi_error, start, end);
2497 end_page_writeback(page);
2504 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2507 struct extent_state *cached = NULL;
2508 u64 end = start + len - 1;
2510 if (uptodate && tree->track_uptodate)
2511 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2512 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2516 * after a readpage IO is done, we need to:
2517 * clear the uptodate bits on error
2518 * set the uptodate bits if things worked
2519 * set the page up to date if all extents in the tree are uptodate
2520 * clear the lock bit in the extent tree
2521 * unlock the page if there are no other extents locked for it
2523 * Scheduling is not allowed, so the extent state tree is expected
2524 * to have one and only one object corresponding to this IO.
2526 static void end_bio_extent_readpage(struct bio *bio)
2528 struct bio_vec *bvec;
2529 int uptodate = !bio->bi_error;
2530 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2531 struct extent_io_tree *tree;
2536 u64 extent_start = 0;
2542 bio_for_each_segment_all(bvec, bio, i) {
2543 struct page *page = bvec->bv_page;
2544 struct inode *inode = page->mapping->host;
2546 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2547 "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
2548 bio->bi_error, io_bio->mirror_num);
2549 tree = &BTRFS_I(inode)->io_tree;
2551 /* We always issue full-page reads, but if some block
2552 * in a page fails to read, blk_update_request() will
2553 * advance bv_offset and adjust bv_len to compensate.
2554 * Print a warning for nonzero offsets, and an error
2555 * if they don't add up to a full page. */
2556 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2557 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2558 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2559 "partial page read in btrfs with offset %u and length %u",
2560 bvec->bv_offset, bvec->bv_len);
2562 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2563 "incomplete page read in btrfs with offset %u and "
2565 bvec->bv_offset, bvec->bv_len);
2568 start = page_offset(page);
2569 end = start + bvec->bv_offset + bvec->bv_len - 1;
2572 mirror = io_bio->mirror_num;
2573 if (likely(uptodate && tree->ops &&
2574 tree->ops->readpage_end_io_hook)) {
2575 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2581 clean_io_failure(inode, start, page, 0);
2584 if (likely(uptodate))
2587 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2588 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2589 if (!ret && !bio->bi_error)
2593 * The generic bio_readpage_error handles errors the
2594 * following way: If possible, new read requests are
2595 * created and submitted and will end up in
2596 * end_bio_extent_readpage as well (if we're lucky, not
2597 * in the !uptodate case). In that case it returns 0 and
2598 * we just go on with the next page in our bio. If it
2599 * can't handle the error it will return -EIO and we
2600 * remain responsible for that page.
2602 ret = bio_readpage_error(bio, offset, page, start, end,
2605 uptodate = !bio->bi_error;
2611 if (likely(uptodate)) {
2612 loff_t i_size = i_size_read(inode);
2613 pgoff_t end_index = i_size >> PAGE_SHIFT;
2616 /* Zero out the end if this page straddles i_size */
2617 off = i_size & (PAGE_SIZE-1);
2618 if (page->index == end_index && off)
2619 zero_user_segment(page, off, PAGE_SIZE);
2620 SetPageUptodate(page);
2622 ClearPageUptodate(page);
2628 if (unlikely(!uptodate)) {
2630 endio_readpage_release_extent(tree,
2636 endio_readpage_release_extent(tree, start,
2637 end - start + 1, 0);
2638 } else if (!extent_len) {
2639 extent_start = start;
2640 extent_len = end + 1 - start;
2641 } else if (extent_start + extent_len == start) {
2642 extent_len += end + 1 - start;
2644 endio_readpage_release_extent(tree, extent_start,
2645 extent_len, uptodate);
2646 extent_start = start;
2647 extent_len = end + 1 - start;
2652 endio_readpage_release_extent(tree, extent_start, extent_len,
2655 io_bio->end_io(io_bio, bio->bi_error);
2660 * this allocates from the btrfs_bioset. We're returning a bio right now
2661 * but you can call btrfs_io_bio for the appropriate container_of magic
2664 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2667 struct btrfs_io_bio *btrfs_bio;
2670 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2672 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2673 while (!bio && (nr_vecs /= 2)) {
2674 bio = bio_alloc_bioset(gfp_flags,
2675 nr_vecs, btrfs_bioset);
2680 bio->bi_bdev = bdev;
2681 bio->bi_iter.bi_sector = first_sector;
2682 btrfs_bio = btrfs_io_bio(bio);
2683 btrfs_bio->csum = NULL;
2684 btrfs_bio->csum_allocated = NULL;
2685 btrfs_bio->end_io = NULL;
2690 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2692 struct btrfs_io_bio *btrfs_bio;
2695 new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2697 btrfs_bio = btrfs_io_bio(new);
2698 btrfs_bio->csum = NULL;
2699 btrfs_bio->csum_allocated = NULL;
2700 btrfs_bio->end_io = NULL;
2705 /* this also allocates from the btrfs_bioset */
2706 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2708 struct btrfs_io_bio *btrfs_bio;
2711 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2713 btrfs_bio = btrfs_io_bio(bio);
2714 btrfs_bio->csum = NULL;
2715 btrfs_bio->csum_allocated = NULL;
2716 btrfs_bio->end_io = NULL;
2722 static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
2723 unsigned long bio_flags)
2726 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2727 struct page *page = bvec->bv_page;
2728 struct extent_io_tree *tree = bio->bi_private;
2731 start = page_offset(page) + bvec->bv_offset;
2733 bio->bi_private = NULL;
2736 if (tree->ops && tree->ops->submit_bio_hook)
2737 ret = tree->ops->submit_bio_hook(page->mapping->host, bio,
2738 mirror_num, bio_flags, start);
2740 btrfsic_submit_bio(bio);
2746 static int merge_bio(struct extent_io_tree *tree, struct page *page,
2747 unsigned long offset, size_t size, struct bio *bio,
2748 unsigned long bio_flags)
2751 if (tree->ops && tree->ops->merge_bio_hook)
2752 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2758 static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
2759 struct writeback_control *wbc,
2760 struct page *page, sector_t sector,
2761 size_t size, unsigned long offset,
2762 struct block_device *bdev,
2763 struct bio **bio_ret,
2764 unsigned long max_pages,
2765 bio_end_io_t end_io_func,
2767 unsigned long prev_bio_flags,
2768 unsigned long bio_flags,
2769 bool force_bio_submit)
2774 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2775 size_t page_size = min_t(size_t, size, PAGE_SIZE);
2777 if (bio_ret && *bio_ret) {
2780 contig = bio->bi_iter.bi_sector == sector;
2782 contig = bio_end_sector(bio) == sector;
2784 if (prev_bio_flags != bio_flags || !contig ||
2786 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2787 bio_add_page(bio, page, page_size, offset) < page_size) {
2788 ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
2796 wbc_account_io(wbc, page, page_size);
2801 bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES,
2802 GFP_NOFS | __GFP_HIGH);
2806 bio_add_page(bio, page, page_size, offset);
2807 bio->bi_end_io = end_io_func;
2808 bio->bi_private = tree;
2809 bio_set_op_attrs(bio, op, op_flags);
2811 wbc_init_bio(wbc, bio);
2812 wbc_account_io(wbc, page, page_size);
2818 ret = submit_one_bio(bio, mirror_num, bio_flags);
2823 static void attach_extent_buffer_page(struct extent_buffer *eb,
2826 if (!PagePrivate(page)) {
2827 SetPagePrivate(page);
2829 set_page_private(page, (unsigned long)eb);
2831 WARN_ON(page->private != (unsigned long)eb);
2835 void set_page_extent_mapped(struct page *page)
2837 if (!PagePrivate(page)) {
2838 SetPagePrivate(page);
2840 set_page_private(page, EXTENT_PAGE_PRIVATE);
2844 static struct extent_map *
2845 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2846 u64 start, u64 len, get_extent_t *get_extent,
2847 struct extent_map **em_cached)
2849 struct extent_map *em;
2851 if (em_cached && *em_cached) {
2853 if (extent_map_in_tree(em) && start >= em->start &&
2854 start < extent_map_end(em)) {
2855 atomic_inc(&em->refs);
2859 free_extent_map(em);
2863 em = get_extent(inode, page, pg_offset, start, len, 0);
2864 if (em_cached && !IS_ERR_OR_NULL(em)) {
2866 atomic_inc(&em->refs);
2872 * basic readpage implementation. Locked extent state structs are inserted
2873 * into the tree that are removed when the IO is done (by the end_io
2875 * XXX JDM: This needs looking at to ensure proper page locking
2876 * return 0 on success, otherwise return error
2878 static int __do_readpage(struct extent_io_tree *tree,
2880 get_extent_t *get_extent,
2881 struct extent_map **em_cached,
2882 struct bio **bio, int mirror_num,
2883 unsigned long *bio_flags, int read_flags,
2886 struct inode *inode = page->mapping->host;
2887 u64 start = page_offset(page);
2888 u64 page_end = start + PAGE_SIZE - 1;
2892 u64 last_byte = i_size_read(inode);
2896 struct extent_map *em;
2897 struct block_device *bdev;
2900 size_t pg_offset = 0;
2902 size_t disk_io_size;
2903 size_t blocksize = inode->i_sb->s_blocksize;
2904 unsigned long this_bio_flag = 0;
2906 set_page_extent_mapped(page);
2909 if (!PageUptodate(page)) {
2910 if (cleancache_get_page(page) == 0) {
2911 BUG_ON(blocksize != PAGE_SIZE);
2912 unlock_extent(tree, start, end);
2917 if (page->index == last_byte >> PAGE_SHIFT) {
2919 size_t zero_offset = last_byte & (PAGE_SIZE - 1);
2922 iosize = PAGE_SIZE - zero_offset;
2923 userpage = kmap_atomic(page);
2924 memset(userpage + zero_offset, 0, iosize);
2925 flush_dcache_page(page);
2926 kunmap_atomic(userpage);
2929 while (cur <= end) {
2930 unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
2931 bool force_bio_submit = false;
2933 if (cur >= last_byte) {
2935 struct extent_state *cached = NULL;
2937 iosize = PAGE_SIZE - pg_offset;
2938 userpage = kmap_atomic(page);
2939 memset(userpage + pg_offset, 0, iosize);
2940 flush_dcache_page(page);
2941 kunmap_atomic(userpage);
2942 set_extent_uptodate(tree, cur, cur + iosize - 1,
2944 unlock_extent_cached(tree, cur,
2949 em = __get_extent_map(inode, page, pg_offset, cur,
2950 end - cur + 1, get_extent, em_cached);
2951 if (IS_ERR_OR_NULL(em)) {
2953 unlock_extent(tree, cur, end);
2956 extent_offset = cur - em->start;
2957 BUG_ON(extent_map_end(em) <= cur);
2960 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2961 this_bio_flag |= EXTENT_BIO_COMPRESSED;
2962 extent_set_compress_type(&this_bio_flag,
2966 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2967 cur_end = min(extent_map_end(em) - 1, end);
2968 iosize = ALIGN(iosize, blocksize);
2969 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2970 disk_io_size = em->block_len;
2971 sector = em->block_start >> 9;
2973 sector = (em->block_start + extent_offset) >> 9;
2974 disk_io_size = iosize;
2977 block_start = em->block_start;
2978 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2979 block_start = EXTENT_MAP_HOLE;
2982 * If we have a file range that points to a compressed extent
2983 * and it's followed by a consecutive file range that points to
2984 * to the same compressed extent (possibly with a different
2985 * offset and/or length, so it either points to the whole extent
2986 * or only part of it), we must make sure we do not submit a
2987 * single bio to populate the pages for the 2 ranges because
2988 * this makes the compressed extent read zero out the pages
2989 * belonging to the 2nd range. Imagine the following scenario:
2992 * [0 - 8K] [8K - 24K]
2995 * points to extent X, points to extent X,
2996 * offset 4K, length of 8K offset 0, length 16K
2998 * [extent X, compressed length = 4K uncompressed length = 16K]
3000 * If the bio to read the compressed extent covers both ranges,
3001 * it will decompress extent X into the pages belonging to the
3002 * first range and then it will stop, zeroing out the remaining
3003 * pages that belong to the other range that points to extent X.
3004 * So here we make sure we submit 2 bios, one for the first
3005 * range and another one for the third range. Both will target
3006 * the same physical extent from disk, but we can't currently
3007 * make the compressed bio endio callback populate the pages
3008 * for both ranges because each compressed bio is tightly
3009 * coupled with a single extent map, and each range can have
3010 * an extent map with a different offset value relative to the
3011 * uncompressed data of our extent and different lengths. This
3012 * is a corner case so we prioritize correctness over
3013 * non-optimal behavior (submitting 2 bios for the same extent).
3015 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3016 prev_em_start && *prev_em_start != (u64)-1 &&
3017 *prev_em_start != em->orig_start)
3018 force_bio_submit = true;
3021 *prev_em_start = em->orig_start;
3023 free_extent_map(em);
3026 /* we've found a hole, just zero and go on */
3027 if (block_start == EXTENT_MAP_HOLE) {
3029 struct extent_state *cached = NULL;
3031 userpage = kmap_atomic(page);
3032 memset(userpage + pg_offset, 0, iosize);
3033 flush_dcache_page(page);
3034 kunmap_atomic(userpage);
3036 set_extent_uptodate(tree, cur, cur + iosize - 1,
3038 unlock_extent_cached(tree, cur,
3042 pg_offset += iosize;
3045 /* the get_extent function already copied into the page */
3046 if (test_range_bit(tree, cur, cur_end,
3047 EXTENT_UPTODATE, 1, NULL)) {
3048 check_page_uptodate(tree, page);
3049 unlock_extent(tree, cur, cur + iosize - 1);
3051 pg_offset += iosize;
3054 /* we have an inline extent but it didn't get marked up
3055 * to date. Error out
3057 if (block_start == EXTENT_MAP_INLINE) {
3059 unlock_extent(tree, cur, cur + iosize - 1);
3061 pg_offset += iosize;
3066 ret = submit_extent_page(REQ_OP_READ, read_flags, tree, NULL,
3067 page, sector, disk_io_size, pg_offset,
3069 end_bio_extent_readpage, mirror_num,
3075 *bio_flags = this_bio_flag;
3078 unlock_extent(tree, cur, cur + iosize - 1);
3082 pg_offset += iosize;
3086 if (!PageError(page))
3087 SetPageUptodate(page);
3093 static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3094 struct page *pages[], int nr_pages,
3096 get_extent_t *get_extent,
3097 struct extent_map **em_cached,
3098 struct bio **bio, int mirror_num,
3099 unsigned long *bio_flags,
3102 struct inode *inode;
3103 struct btrfs_ordered_extent *ordered;
3106 inode = pages[0]->mapping->host;
3108 lock_extent(tree, start, end);
3109 ordered = btrfs_lookup_ordered_range(inode, start,
3113 unlock_extent(tree, start, end);
3114 btrfs_start_ordered_extent(inode, ordered, 1);
3115 btrfs_put_ordered_extent(ordered);
3118 for (index = 0; index < nr_pages; index++) {
3119 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3120 mirror_num, bio_flags, 0, prev_em_start);
3121 put_page(pages[index]);
3125 static void __extent_readpages(struct extent_io_tree *tree,
3126 struct page *pages[],
3127 int nr_pages, get_extent_t *get_extent,