btrfs: write_dev_flush does not return ENOMEM anymore
authorAnand Jain <anand.jain@oracle.com>
Tue, 13 Jun 2017 09:32:29 +0000 (17:32 +0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 19 Jun 2017 16:26:04 +0000 (18:26 +0200)
Since commit "btrfs: btrfs_io_bio_alloc never fails, skip error handling"
write_dev_flush will not return ENOMEM in the sending part. We do not
need to check for it in the callers.

Signed-off-by: Anand Jain <anand.jain@oracle.com>
Reviewed-by: David Sterba <dsterba@suse.com>
[ updated changelog ]
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/disk-io.c

index 8b57c280e5cd4af8db9e845224007d3b49f66fe0..1e90469cc0d281072a6b495bd32c5d1dbc349fd0 100644 (file)
@@ -3505,13 +3505,6 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
        if (wait) {
                bio = device->flush_bio;
-               if (!bio)
-                       /*
-                        * This means the alloc has failed with ENOMEM, however
-                        * here we return 0, as its not a device error.
-                        */
-                       return 0;
-
                wait_for_completion(&device->flush_wait);
 
                if (bio->bi_error) {
@@ -3548,25 +3541,16 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
 static int check_barrier_error(struct btrfs_fs_devices *fsdevs)
 {
-       int submit_flush_error = 0;
        int dev_flush_error = 0;
        struct btrfs_device *dev;
-       int tolerance;
 
        list_for_each_entry_rcu(dev, &fsdevs->devices, dev_list) {
-               if (!dev->bdev) {
-                       submit_flush_error++;
-                       dev_flush_error++;
-                       continue;
-               }
-               if (dev->last_flush_error == -ENOMEM)
-                       submit_flush_error++;
-               if (dev->last_flush_error && dev->last_flush_error != -ENOMEM)
+               if (!dev->bdev || dev->last_flush_error)
                        dev_flush_error++;
        }
 
-       tolerance = fsdevs->fs_info->num_tolerated_disk_barrier_failures;
-       if (submit_flush_error > tolerance || dev_flush_error > tolerance)
+       if (dev_flush_error >
+           fsdevs->fs_info->num_tolerated_disk_barrier_failures)
                return -EIO;
 
        return 0;
@@ -3596,10 +3580,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
                if (!dev->in_fs_metadata || !dev->writeable)
                        continue;
 
-               ret = write_dev_flush(dev, 0);
-               if (ret)
-                       errors_send++;
-               dev->last_flush_error = ret;
+               write_dev_flush(dev, 0);
+               dev->last_flush_error = 0;
        }
 
        /* wait for all the barriers */
@@ -3620,16 +3602,6 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
                }
        }
 
-       /*
-        * Try hard in case of flush. Lets say, in RAID1 we have
-        * the following situation
-        *  dev1: EIO dev2: ENOMEM
-        * this is not a fatal error as we hope to recover from
-        * ENOMEM in the next attempt to flush.
-        * But the following is considered as fatal
-        *  dev1: ENOMEM dev2: ENOMEM
-        *  dev1: bdev == NULL dev2: ENOMEM
-        */
        if (errors_send || errors_wait) {
                /*
                 * At some point we need the status of all disks