Btrfs: replace tree->mapping with tree->private_data
[sfrench/cifs-2.6.git] / fs / btrfs / disk-io.c
index 8685d67185d01bf90bcd2cf6d7cdd168e044c777..d8ad30fdeee64c7de3670d973a946a626efe37ea 100644 (file)
@@ -118,7 +118,8 @@ void btrfs_end_io_wq_exit(void)
  * just before they are sent down the IO stack.
  */
 struct async_submit_bio {
-       struct inode *inode;
+       void *private_data;
+       struct btrfs_fs_info *fs_info;
        struct bio *bio;
        struct list_head list;
        extent_submit_bio_hook_t *submit_bio_start;
@@ -871,7 +872,7 @@ static void run_one_async_start(struct btrfs_work *work)
        int ret;
 
        async = container_of(work, struct  async_submit_bio, work);
-       ret = async->submit_bio_start(async->inode, async->bio,
+       ret = async->submit_bio_start(async->private_data, async->bio,
                                      async->mirror_num, async->bio_flags,
                                      async->bio_offset);
        if (ret)
@@ -885,7 +886,7 @@ static void run_one_async_done(struct btrfs_work *work)
        int limit;
 
        async = container_of(work, struct  async_submit_bio, work);
-       fs_info = BTRFS_I(async->inode)->root->fs_info;
+       fs_info = async->fs_info;
 
        limit = btrfs_async_submit_limit(fs_info);
        limit = limit * 2 / 3;
@@ -904,7 +905,7 @@ static void run_one_async_done(struct btrfs_work *work)
                return;
        }
 
-       async->submit_bio_done(async->inode, async->bio, async->mirror_num,
+       async->submit_bio_done(async->private_data, async->bio, async->mirror_num,
                               async->bio_flags, async->bio_offset);
 }
 
@@ -916,10 +917,9 @@ static void run_one_async_free(struct btrfs_work *work)
        kfree(async);
 }
 
-int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-                       struct bio *bio, int mirror_num,
-                       unsigned long bio_flags,
-                       u64 bio_offset,
+int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
+                       int mirror_num, unsigned long bio_flags,
+                       u64 bio_offset, void *private_data,
                        extent_submit_bio_hook_t *submit_bio_start,
                        extent_submit_bio_hook_t *submit_bio_done)
 {
@@ -929,7 +929,8 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        if (!async)
                return -ENOMEM;
 
-       async->inode = inode;
+       async->private_data = private_data;
+       async->fs_info = fs_info;
        async->bio = bio;
        async->mirror_num = mirror_num;
        async->submit_bio_start = submit_bio_start;
@@ -975,7 +976,7 @@ static int btree_csum_one_bio(struct bio *bio)
        return ret;
 }
 
-static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
+static int __btree_submit_bio_start(void *private_data, struct bio *bio,
                                    int mirror_num, unsigned long bio_flags,
                                    u64 bio_offset)
 {
@@ -986,10 +987,11 @@ static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
        return btree_csum_one_bio(bio);
 }
 
-static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
+static int __btree_submit_bio_done(void *private_data, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags,
                                 u64 bio_offset)
 {
+       struct inode *inode = private_data;
        int ret;
 
        /*
@@ -1015,10 +1017,11 @@ static int check_async_write(unsigned long bio_flags)
        return 1;
 }
 
-static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
+static int btree_submit_bio_hook(void *private_data, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags,
                                 u64 bio_offset)
 {
+       struct inode *inode = private_data;
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int async = check_async_write(bio_flags);
        int ret;
@@ -1043,8 +1046,8 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
                 * kthread helpers are used to submit writes so that
                 * checksumming can happen in parallel across all CPUs
                 */
-               ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0,
-                                         bio_offset,
+               ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
+                                         bio_offset, private_data,
                                          __btree_submit_bio_start,
                                          __btree_submit_bio_done);
        }
@@ -1347,8 +1350,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        root->log_transid_committed = -1;
        root->last_log_commit = 0;
        if (!dummy)
-               extent_io_tree_init(&root->dirty_log_pages,
-                                    fs_info->btree_inode->i_mapping);
+               extent_io_tree_init(&root->dirty_log_pages, NULL);
 
        memset(&root->root_key, 0, sizeof(root->root_key));
        memset(&root->root_item, 0, sizeof(root->root_item));
@@ -2309,7 +2311,7 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
        inode->i_mapping->a_ops = &btree_aops;
 
        RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
-       extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping);
+       extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
        BTRFS_I(inode)->io_tree.track_uptodate = 0;
        extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
 
@@ -2626,7 +2628,6 @@ int open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->fs_roots_radix_lock);
        spin_lock_init(&fs_info->delayed_iput_lock);
        spin_lock_init(&fs_info->defrag_inodes_lock);
-       spin_lock_init(&fs_info->free_chunk_lock);
        spin_lock_init(&fs_info->tree_mod_seq_lock);
        spin_lock_init(&fs_info->super_lock);
        spin_lock_init(&fs_info->qgroup_op_lock);
@@ -2667,7 +2668,7 @@ int open_ctree(struct super_block *sb,
        fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
        fs_info->metadata_ratio = 0;
        fs_info->defrag_inodes = RB_ROOT;
-       fs_info->free_chunk_space = 0;
+       atomic64_set(&fs_info->free_chunk_space, 0);
        fs_info->tree_mod_log = RB_ROOT;
        fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
        fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
@@ -2704,10 +2705,8 @@ int open_ctree(struct super_block *sb,
        fs_info->block_group_cache_tree = RB_ROOT;
        fs_info->first_logical_byte = (u64)-1;
 
-       extent_io_tree_init(&fs_info->freed_extents[0],
-                            fs_info->btree_inode->i_mapping);
-       extent_io_tree_init(&fs_info->freed_extents[1],
-                            fs_info->btree_inode->i_mapping);
+       extent_io_tree_init(&fs_info->freed_extents[0], NULL);
+       extent_io_tree_init(&fs_info->freed_extents[1], NULL);
        fs_info->pinned_extents = &fs_info->freed_extents[0];
        set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
 
@@ -3467,10 +3466,12 @@ static int write_dev_supers(struct btrfs_device *device,
                 * we fua the first super.  The others we allow
                 * to go down lazy.
                 */
-               if (i == 0)
-                       ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
-               else
+               if (i == 0) {
+                       ret = btrfsic_submit_bh(REQ_OP_WRITE,
+                                               REQ_SYNC | REQ_FUA, bh);
+               } else {
                        ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+               }
                if (ret)
                        errors++;
        }
@@ -3507,6 +3508,10 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
        if (wait) {
                bio = device->flush_bio;
                if (!bio)
+                       /*
+                        * This means the alloc has failed with ENOMEM, however
+                        * here we return 0, as its not a device error.
+                        */
                        return 0;
 
                wait_for_completion(&device->flush_wait);
@@ -3535,7 +3540,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
        bio->bi_end_io = btrfs_end_empty_barrier;
        bio->bi_bdev = device->bdev;
-       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
        init_completion(&device->flush_wait);
        bio->bi_private = &device->flush_wait;
        device->flush_bio = bio;
@@ -3546,6 +3551,32 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
        return 0;
 }
 
+static int check_barrier_error(struct btrfs_fs_devices *fsdevs)
+{
+       int submit_flush_error = 0;
+       int dev_flush_error = 0;
+       struct btrfs_device *dev;
+       int tolerance;
+
+       list_for_each_entry_rcu(dev, &fsdevs->devices, dev_list) {
+               if (!dev->bdev) {
+                       submit_flush_error++;
+                       dev_flush_error++;
+                       continue;
+               }
+               if (dev->last_flush_error == -ENOMEM)
+                       submit_flush_error++;
+               if (dev->last_flush_error && dev->last_flush_error != -ENOMEM)
+                       dev_flush_error++;
+       }
+
+       tolerance = fsdevs->fs_info->num_tolerated_disk_barrier_failures;
+       if (submit_flush_error > tolerance || dev_flush_error > tolerance)
+               return -EIO;
+
+       return 0;
+}
+
 /*
  * send an empty flush down to each device in parallel,
  * then wait for them
@@ -3573,6 +3604,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
                ret = write_dev_flush(dev, 0);
                if (ret)
                        errors_send++;
+               dev->last_flush_error = ret;
        }
 
        /* wait for all the barriers */
@@ -3587,12 +3619,30 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
                        continue;
 
                ret = write_dev_flush(dev, 1);
-               if (ret)
+               if (ret) {
+                       dev->last_flush_error = ret;
                        errors_wait++;
+               }
+       }
+
+       /*
+        * Try hard in case of flush. Lets say, in RAID1 we have
+        * the following situation
+        *  dev1: EIO dev2: ENOMEM
+        * this is not a fatal error as we hope to recover from
+        * ENOMEM in the next attempt to flush.
+        * But the following is considered as fatal
+        *  dev1: ENOMEM dev2: ENOMEM
+        *  dev1: bdev == NULL dev2: ENOMEM
+        */
+       if (errors_send || errors_wait) {
+               /*
+                * At some point we need the status of all disks
+                * to arrive at the volume status. So error checking
+                * is being pushed to a separate loop.
+                */
+               return check_barrier_error(info->fs_devices);
        }
-       if (errors_send > info->num_tolerated_disk_barrier_failures ||
-           errors_wait > info->num_tolerated_disk_barrier_failures)
-               return -EIO;
        return 0;
 }
 
@@ -4636,6 +4686,12 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
        return 0;
 }
 
+static struct btrfs_fs_info *btree_fs_info(void *private_data)
+{
+       struct inode *inode = private_data;
+       return btrfs_sb(inode->i_sb);
+}
+
 static const struct extent_io_ops btree_extent_io_ops = {
        /* mandatory callbacks */
        .submit_bio_hook = btree_submit_bio_hook,
@@ -4643,6 +4699,8 @@ static const struct extent_io_ops btree_extent_io_ops = {
        /* note we're sharing with inode.c for the merge bio hook */
        .merge_bio_hook = btrfs_merge_bio_hook,
        .readpage_io_failed_hook = btree_io_failed_hook,
+       .set_range_writeback = btrfs_set_range_writeback,
+       .tree_fs_info = btree_fs_info,
 
        /* optional callbacks */
 };