Btrfs: return an error from btrfs_wait_ordered_range
authorJosef Bacik <jbacik@fusionio.com>
Fri, 25 Oct 2013 20:13:35 +0000 (16:13 -0400)
committerChris Mason <chris.mason@fusionio.com>
Tue, 12 Nov 2013 03:07:35 +0000 (22:07 -0500)
I noticed that if the free space cache has an error writing out it's data it
won't actually error out, it will just carry on.  This is because it doesn't
check the return value of btrfs_wait_ordered_range, which didn't actually return
anything.  So fix this in order to keep us from making free space cache look
valid when it really isnt.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/relocation.c

index 14b41d569a3e6f1e60947403a233be280fbd21ea..3a20a12513b233f22b76f38f25656af2edeb806b 100644 (file)
@@ -1280,6 +1280,7 @@ again:
                }
                wait_on_page_writeback(pages[i]);
        }
+       faili = num_pages - 1;
        err = 0;
        if (start_pos < inode->i_size) {
                struct btrfs_ordered_extent *ordered;
@@ -1298,8 +1299,10 @@ again:
                                unlock_page(pages[i]);
                                page_cache_release(pages[i]);
                        }
-                       btrfs_wait_ordered_range(inode, start_pos,
-                                                last_pos - start_pos);
+                       err = btrfs_wait_ordered_range(inode, start_pos,
+                                                      last_pos - start_pos);
+                       if (err)
+                               goto fail;
                        goto again;
                }
                if (ordered)
@@ -1808,8 +1811,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        atomic_inc(&root->log_batch);
        full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                             &BTRFS_I(inode)->runtime_flags);
-       if (full_sync)
-               btrfs_wait_ordered_range(inode, start, end - start + 1);
+       if (full_sync) {
+               ret = btrfs_wait_ordered_range(inode, start, end - start + 1);
+               if (ret) {
+                       mutex_unlock(&inode->i_mutex);
+                       goto out;
+               }
+       }
        atomic_inc(&root->log_batch);
 
        /*
@@ -1875,27 +1883,20 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        mutex_unlock(&inode->i_mutex);
 
        if (ret != BTRFS_NO_LOG_SYNC) {
-               if (ret > 0) {
-                       /*
-                        * If we didn't already wait for ordered extents we need
-                        * to do that now.
-                        */
-                       if (!full_sync)
-                               btrfs_wait_ordered_range(inode, start,
-                                                        end - start + 1);
-                       ret = btrfs_commit_transaction(trans, root);
-               } else {
+               if (!ret) {
                        ret = btrfs_sync_log(trans, root);
-                       if (ret == 0) {
+                       if (!ret) {
                                ret = btrfs_end_transaction(trans, root);
-                       } else {
-                               if (!full_sync)
-                                       btrfs_wait_ordered_range(inode, start,
-                                                                end -
-                                                                start + 1);
-                               ret = btrfs_commit_transaction(trans, root);
+                               goto out;
                        }
                }
+               if (!full_sync) {
+                       ret = btrfs_wait_ordered_range(inode, start,
+                                                      end - start + 1);
+                       if (ret)
+                               goto out;
+               }
+               ret = btrfs_commit_transaction(trans, root);
        } else {
                ret = btrfs_end_transaction(trans, root);
        }
@@ -2066,7 +2067,9 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
        bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
                          ((offset + len - 1) >> PAGE_CACHE_SHIFT));
 
-       btrfs_wait_ordered_range(inode, offset, len);
+       ret = btrfs_wait_ordered_range(inode, offset, len);
+       if (ret)
+               return ret;
 
        mutex_lock(&inode->i_mutex);
        /*
@@ -2135,8 +2138,12 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                        btrfs_put_ordered_extent(ordered);
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
                                     lockend, &cached_state, GFP_NOFS);
-               btrfs_wait_ordered_range(inode, lockstart,
-                                        lockend - lockstart + 1);
+               ret = btrfs_wait_ordered_range(inode, lockstart,
+                                              lockend - lockstart + 1);
+               if (ret) {
+                       mutex_unlock(&inode->i_mutex);
+                       return ret;
+               }
        }
 
        path = btrfs_alloc_path();
@@ -2307,7 +2314,10 @@ static long btrfs_fallocate(struct file *file, int mode,
         * wait for ordered IO before we have any locks.  We'll loop again
         * below with the locks held.
         */
-       btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
+       ret = btrfs_wait_ordered_range(inode, alloc_start,
+                                      alloc_end - alloc_start);
+       if (ret)
+               goto out;
 
        locked_end = alloc_end - 1;
        while (1) {
@@ -2331,8 +2341,10 @@ static long btrfs_fallocate(struct file *file, int mode,
                         * we can't wait on the range with the transaction
                         * running or with the extent lock held
                         */
-                       btrfs_wait_ordered_range(inode, alloc_start,
-                                                alloc_end - alloc_start);
+                       ret = btrfs_wait_ordered_range(inode, alloc_start,
+                                                      alloc_end - alloc_start);
+                       if (ret)
+                               goto out;
                } else {
                        if (ordered)
                                btrfs_put_ordered_extent(ordered);
index 4772f3a8e7e59ef00856dc44364c2c30723160ce..d7c445c30a16270c7199f90c9251cacf35d5ea26 100644 (file)
@@ -1008,8 +1008,13 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        if (ret)
                goto out;
 
-
-       btrfs_wait_ordered_range(inode, 0, (u64)-1);
+       ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+       if (ret) {
+               clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+                                EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
+                                GFP_NOFS);
+               goto out;
+       }
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
        key.offset = offset;
index 14c6ab74cf8cf7e934603f504a5e74a08a4f5ad4..f1fbf903bf9b491ef4e489946d6f7b5c7d8fe03e 100644 (file)
@@ -7236,7 +7236,9 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
         * outstanding dirty pages are on disk.
         */
        count = iov_length(iov, nr_segs);
-       btrfs_wait_ordered_range(inode, offset, count);
+       ret = btrfs_wait_ordered_range(inode, offset, count);
+       if (ret)
+               return ret;
 
        if (rw & WRITE) {
                /*
@@ -7577,7 +7579,10 @@ static int btrfs_truncate(struct inode *inode)
        u64 mask = root->sectorsize - 1;
        u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
 
-       btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
+       ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
+                                      (u64)-1);
+       if (ret)
+               return ret;
 
        /*
         * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
index 1a36a0c3ee27724d78646b60d3f44f722f78c07b..bbb1a38646864d8c55f6880a207ddc37c2f6ce90 100644 (file)
@@ -734,8 +734,9 @@ void btrfs_start_ordered_extent(struct inode *inode,
 /*
  * Used to wait on ordered extents across a large range of bytes.
  */
-void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 {
+       int ret = 0;
        u64 end;
        u64 orig_end;
        struct btrfs_ordered_extent *ordered;
@@ -751,8 +752,9 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
        /* start IO across the range first to instantiate any delalloc
         * extents
         */
-       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
-
+       ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
+       if (ret)
+               return ret;
        /*
         * So with compression we will find and lock a dirty page and clear the
         * first one as dirty, setup an async extent, and immediately return
@@ -768,10 +770,15 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
         * right and you are wrong.
         */
        if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
-                    &BTRFS_I(inode)->runtime_flags))
-               filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
-
-       filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+                    &BTRFS_I(inode)->runtime_flags)) {
+               ret = filemap_fdatawrite_range(inode->i_mapping, start,
+                                              orig_end);
+               if (ret)
+                       return ret;
+       }
+       ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
+       if (ret)
+               return ret;
 
        end = orig_end;
        while (1) {
@@ -788,11 +795,14 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
                }
                btrfs_start_ordered_extent(inode, ordered, 1);
                end = ordered->file_offset;
+               if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+                       ret = -EIO;
                btrfs_put_ordered_extent(ordered);
-               if (end == 0 || end == start)
+               if (ret || end == 0 || end == start)
                        break;
                end--;
        }
+       return ret;
 }
 
 /*
index 0c0b35612d7ad1fc5f5c7db5d267e70f319d8529..3982db1c1b363ad86a1d28c88f4d5d280865d2a1 100644 (file)
@@ -180,7 +180,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
                                                         u64 file_offset);
 void btrfs_start_ordered_extent(struct inode *inode,
                                struct btrfs_ordered_extent *entry, int wait);
-void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
+int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
 struct btrfs_ordered_extent *
 btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
index a945374993f7d54a4768a92e25fb2ff2054b22a2..729c91e151a3290b88cc51bde656664d355066f5 100644 (file)
@@ -4257,7 +4257,12 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
                        rc->extents_found);
 
                if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
-                       btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1);
+                       ret = btrfs_wait_ordered_range(rc->data_inode, 0,
+                                                      (u64)-1);
+                       if (ret) {
+                               err = ret;
+                               goto out;
+                       }
                        invalidate_mapping_pages(rc->data_inode->i_mapping,
                                                 0, -1);
                        rc->stage = UPDATE_DATA_PTRS;