return tree_search_for_insert(tree, offset, NULL, NULL);
}
-static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
- struct extent_state *other)
-{
- if (tree->ops && tree->ops->merge_extent_hook)
- tree->ops->merge_extent_hook(tree->private_data, new, other);
-}
-
/*
* utility function to look for merge candidates inside a given range.
* Any extents with matching state are merged together into a single
other = rb_entry(other_node, struct extent_state, rb_node);
if (other->end == state->start - 1 &&
other->state == state->state) {
- merge_cb(tree, state, other);
+ if (tree->private_data &&
+ is_data_inode(tree->private_data))
+ btrfs_merge_delalloc_extent(tree->private_data,
+ state, other);
state->start = other->start;
rb_erase(&other->rb_node, &tree->state);
RB_CLEAR_NODE(&other->rb_node);
other = rb_entry(other_node, struct extent_state, rb_node);
if (other->start == state->end + 1 &&
other->state == state->state) {
- merge_cb(tree, state, other);
+ if (tree->private_data &&
+ is_data_inode(tree->private_data))
+ btrfs_merge_delalloc_extent(tree->private_data,
+ state, other);
state->end = other->end;
rb_erase(&other->rb_node, &tree->state);
RB_CLEAR_NODE(&other->rb_node);
}
}
-static void clear_state_cb(struct extent_io_tree *tree,
- struct extent_state *state, unsigned *bits)
-{
- if (tree->ops && tree->ops->clear_bit_hook)
- tree->ops->clear_bit_hook(tree->private_data, state, bits);
-}
-
static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state, unsigned *bits,
struct extent_changeset *changeset);
return 0;
}
-static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
- u64 split)
-{
- if (tree->ops && tree->ops->split_extent_hook)
- tree->ops->split_extent_hook(tree->private_data, orig, split);
-}
-
/*
* split a given extent state struct in two, inserting the preallocated
* struct 'prealloc' as the newly created second half. 'split' indicates an
{
struct rb_node *node;
- split_cb(tree, orig, split);
+ if (tree->private_data && is_data_inode(tree->private_data))
+ btrfs_split_delalloc_extent(tree->private_data, orig, split);
prealloc->start = orig->start;
prealloc->end = split - 1;
WARN_ON(range > tree->dirty_bytes);
tree->dirty_bytes -= range;
}
- clear_state_cb(tree, state, bits);
+
+ if (tree->private_data && is_data_inode(tree->private_data))
+ btrfs_clear_delalloc_extent(tree->private_data, state, bits);
+
ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
BUG_ON(ret < 0);
state->state &= ~bits_to_clear;
*
* 1 is returned if we find something, 0 if nothing was in the tree
*/
-static noinline_for_stack u64 find_lock_delalloc_range(struct inode *inode,
+EXPORT_FOR_TESTS
+noinline_for_stack u64 find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree,
struct page *locked_page, u64 *start,
- u64 *end, u64 max_bytes)
+ u64 *end)
{
+ u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
u64 delalloc_start;
u64 delalloc_end;
u64 found;
/* step two, lock all the pages after the page that has start */
ret = lock_delalloc_pages(inode, locked_page,
delalloc_start, delalloc_end);
+ ASSERT(!ret || ret == -EAGAIN);
if (ret == -EAGAIN) {
/* some of the pages are gone, lets avoid looping by
* shortening the size of the delalloc range we're searching
goto out_failed;
}
}
- BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
/* step three, lock the state bits for the whole range */
lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
return found;
}
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-u64 btrfs_find_lock_delalloc_range(struct inode *inode,
- struct extent_io_tree *tree,
- struct page *locked_page, u64 *start,
- u64 *end, u64 max_bytes)
-{
- return find_lock_delalloc_range(inode, tree, locked_page, start, end,
- max_bytes);
-}
-#endif
-
static int __process_pages_contig(struct address_space *mapping,
struct page *locked_page,
pgoff_t start_index, pgoff_t end_index,
int uptodate = (err == 0);
int ret = 0;
- btrfs_writepage_endio_finish_ordered(page, start, end, NULL, uptodate);
+ btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
if (!uptodate) {
ClearPageUptodate(page);
* This returns < 0 if there were errors (page still locked)
*/
static noinline_for_stack int writepage_delalloc(struct inode *inode,
- struct page *page, struct writeback_control *wbc,
- struct extent_page_data *epd,
- u64 delalloc_start,
- unsigned long *nr_written)
+ struct page *page, struct writeback_control *wbc,
+ u64 delalloc_start, unsigned long *nr_written)
{
- struct extent_io_tree *tree = epd->tree;
+ struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
u64 page_end = delalloc_start + PAGE_SIZE - 1;
u64 nr_delalloc;
u64 delalloc_to_write = 0;
int ret;
int page_started = 0;
- if (epd->extent_locked)
- return 0;
while (delalloc_end < page_end) {
nr_delalloc = find_lock_delalloc_range(inode, tree,
page,
&delalloc_start,
- &delalloc_end,
- BTRFS_MAX_EXTENT_SIZE);
+ &delalloc_end);
if (nr_delalloc == 0) {
delalloc_start = delalloc_end + 1;
continue;
end = page_end;
if (i_size <= start) {
- btrfs_writepage_endio_finish_ordered(page, start, page_end,
- NULL, 1);
+ btrfs_writepage_endio_finish_ordered(page, start, page_end, 1);
goto done;
}
if (cur >= i_size) {
btrfs_writepage_endio_finish_ordered(page, cur,
- page_end, NULL, 1);
+ page_end, 1);
break;
}
em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
if (!compressed)
btrfs_writepage_endio_finish_ordered(page, cur,
cur + iosize - 1,
- NULL, 1);
+ 1);
else if (compressed) {
/* we don't want to end_page_writeback on
* a compressed extent. this happens
set_page_extent_mapped(page);
- ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
- if (ret == 1)
- goto done_unlocked;
- if (ret)
- goto done;
+ if (!epd->extent_locked) {
+ ret = writepage_delalloc(inode, page, wbc, start, &nr_written);
+ if (ret == 1)
+ goto done_unlocked;
+ if (ret)
+ goto done;
+ }
ret = __extent_writepage_io(inode, page, wbc, epd,
i_size, nr_written, write_flags, &nr);
range_whole = 1;
scanned = 1;
}
- if (wbc->sync_mode == WB_SYNC_ALL)
+
+ /*
+ * We do the tagged writepage as long as the snapshot flush bit is set
+ * and we are the first one who do the filemap_flush() on this inode.
+ *
+ * The nr_to_write == LONG_MAX is needed to make sure other flushers do
+ * not race in and drop the bit.
+ */
+ if (range_whole && wbc->nr_to_write == LONG_MAX &&
+ test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+ &BTRFS_I(inode)->runtime_flags))
+ wbc->tagged_writepages = 1;
+
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !nr_to_write_done && (index <= end) &&
ret = __extent_writepage(page, &wbc_writepages, &epd);
else {
btrfs_writepage_endio_finish_ordered(page, start,
- start + PAGE_SIZE - 1,
- NULL, 1);
+ start + PAGE_SIZE - 1, 1);
unlock_page(page);
}
put_page(page);
check_buffer_tree_ref(eb);
set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
- /*
- * We will free dummy extent buffer's if they come into
- * free_extent_buffer with a ref count of 2, but if we are using this we
- * want the buffers to stay in memory until we're done with them, so
- * bump the ref count again.
- */
- atomic_inc(&eb->refs);
return eb;
free_eb:
btrfs_release_extent_buffer(eb);
while (1) {
refs = atomic_read(&eb->refs);
- if (refs <= 3)
+ if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
+ || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
+ refs == 1))
break;
old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
if (old == refs)
}
spin_lock(&eb->refs_lock);
- if (atomic_read(&eb->refs) == 2 &&
- test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))
- atomic_dec(&eb->refs);
-
if (atomic_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
!extent_buffer_under_io(eb) &&