Btrfs: remove duplicates of filemap_ helpers
authorChristoph Hellwig <hch@lst.de>
Thu, 1 Oct 2009 16:58:30 +0000 (12:58 -0400)
committerChris Mason <chris.mason@oracle.com>
Thu, 1 Oct 2009 16:58:30 +0000 (12:58 -0400)
Use filemap_fdatawrite_range and filemap_fdatawait_range instead of
local copies of the functions.  For filemap_fdatawait_range that
also means replacing the awkward old wait_on_page_writeback_range
calling convention with the regular filemap byte offsets.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/disk-io.c
fs/btrfs/file.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h

index d20dc05208febce95e6f0753abe3ec90373ba2fc..af0435f79fa605e96f0461efd496599c576eb7e2 100644 (file)
@@ -822,16 +822,14 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
 
 int btrfs_write_tree_block(struct extent_buffer *buf)
 {
-       return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
-                                     buf->start + buf->len - 1, WB_SYNC_ALL);
+       return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
+                                       buf->start + buf->len - 1);
 }
 
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
 {
-       return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
-                                 buf->start >> PAGE_CACHE_SHIFT,
-                                 (buf->start + buf->len - 1) >>
-                                  PAGE_CACHE_SHIFT);
+       return filemap_fdatawait_range(buf->first_page->mapping,
+                                      buf->start, buf->start + buf->len - 1);
 }
 
 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
index 7351bdbca26f9eef18d024683067a80f5786c2c3..ca784a7fbeba1151939fc66a5b9c259c1788c35c 100644 (file)
@@ -1022,9 +1022,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
                }
 
                if (will_write) {
-                       btrfs_fdatawrite_range(inode->i_mapping, pos,
-                                              pos + write_bytes - 1,
-                                              WB_SYNC_ALL);
+                       filemap_fdatawrite_range(inode->i_mapping, pos,
+                                                pos + write_bytes - 1);
                } else {
                        balance_dirty_pages_ratelimited_nr(inode->i_mapping,
                                                           num_pages);
index b5d6d24726b0014463f15041bbc9ddd74fdb110e..897fba835f897a0d02f1d4677447fdcf70d115d7 100644 (file)
@@ -458,7 +458,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
         * start IO on any dirty ones so the wait doesn't stall waiting
         * for pdflush to find them
         */
-       btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL);
+       filemap_fdatawrite_range(inode->i_mapping, start, end);
        if (wait) {
                wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
                                                 &entry->flags));
@@ -488,17 +488,15 @@ again:
        /* start IO across the range first to instantiate any delalloc
         * extents
         */
-       btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
+       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
        /* The compression code will leave pages locked but return from
         * writepage without setting the page writeback.  Starting again
         * with WB_SYNC_ALL will end up waiting for the IO to actually start.
         */
-       btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
+       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
-       btrfs_wait_on_page_writeback_range(inode->i_mapping,
-                                          start >> PAGE_CACHE_SHIFT,
-                                          orig_end >> PAGE_CACHE_SHIFT);
+       filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 
        end = orig_end;
        found = 0;
@@ -716,89 +714,6 @@ out:
 }
 
 
-/**
- * taken from mm/filemap.c because it isn't exported
- *
- * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
- * @mapping:   address space structure to write
- * @start:     offset in bytes where the range starts
- * @end:       offset in bytes where the range ends (inclusive)
- * @sync_mode: enable synchronous operation
- *
- * Start writeback against all of a mapping's dirty pages that lie
- * within the byte offsets <start, end> inclusive.
- *
- * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
- * opposed to a regular memory cleansing writeback.  The difference between
- * these two operations is that if a dirty page/buffer is encountered, it must
- * be waited upon, and not just skipped over.
- */
-int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
-                          loff_t end, int sync_mode)
-{
-       struct writeback_control wbc = {
-               .sync_mode = sync_mode,
-               .nr_to_write = mapping->nrpages * 2,
-               .range_start = start,
-               .range_end = end,
-       };
-       return btrfs_writepages(mapping, &wbc);
-}
-
-/**
- * taken from mm/filemap.c because it isn't exported
- *
- * wait_on_page_writeback_range - wait for writeback to complete
- * @mapping:   target address_space
- * @start:     beginning page index
- * @end:       ending page index
- *
- * Wait for writeback to complete against pages indexed by start->end
- * inclusive
- */
-int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
-                                      pgoff_t start, pgoff_t end)
-{
-       struct pagevec pvec;
-       int nr_pages;
-       int ret = 0;
-       pgoff_t index;
-
-       if (end < start)
-               return 0;
-
-       pagevec_init(&pvec, 0);
-       index = start;
-       while ((index <= end) &&
-                       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                       PAGECACHE_TAG_WRITEBACK,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
-               unsigned i;
-
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-
-                       /* until radix tree lookup accepts end_index */
-                       if (page->index > end)
-                               continue;
-
-                       wait_on_page_writeback(page);
-                       if (PageError(page))
-                               ret = -EIO;
-               }
-               pagevec_release(&pvec);
-               cond_resched();
-       }
-
-       /* Check for outstanding write errors */
-       if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
-               ret = -ENOSPC;
-       if (test_and_clear_bit(AS_EIO, &mapping->flags))
-               ret = -EIO;
-
-       return ret;
-}
-
 /*
  * add a given inode to the list of inodes that must be fully on
  * disk before a transaction commit finishes.
index 993a7ea45c702a580c784908584408684913e233..f82e87488ca8f47d158c37281a3aafa746d5031d 100644 (file)
@@ -153,10 +153,6 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
 int btrfs_ordered_update_i_size(struct inode *inode,
                                struct btrfs_ordered_extent *ordered);
 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
-                                      pgoff_t start, pgoff_t end);
-int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
-                          loff_t end, int sync_mode);
 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
 int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
 int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,