btrfs: rename __btrfs_close_devices to close_fs_devices
[sfrench/cifs-2.6.git] / fs / btrfs / volumes.c
index 292266f6ab9c9d8dfa18422998ec1884ae57369d..9400f3935d275bc288c0a1c21589df6ebf6d6e52 100644 (file)
@@ -197,6 +197,41 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
  *     device_list_mutex
  *       chunk_mutex
  *     balance_mutex
+ *
+ *
+ * Exclusive operations, BTRFS_FS_EXCL_OP
+ * ======================================
+ *
+ * Maintains the exclusivity of the following operations that apply to the
+ * whole filesystem and cannot run in parallel.
+ *
+ * - Balance (*)
+ * - Device add
+ * - Device remove
+ * - Device replace (*)
+ * - Resize
+ *
+ * The device operations (as above) can be in one of the following states:
+ *
+ * - Running state
+ * - Paused state
+ * - Completed state
+ *
+ * Only device operations marked with (*) can go into the Paused state for the
+ * following reasons:
+ *
+ * - ioctl (only Balance can be Paused through ioctl)
+ * - filesystem remounted as read-only
+ * - filesystem unmounted and mounted as read-only
+ * - system power-cycle and filesystem mounted as read-only
+ * - filesystem or device errors leading to forced read-only
+ *
+ * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
+ * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
+ * A device operation in Paused or Running state can be canceled or resumed
+ * either by ioctl (Balance only) or when remounted as read-write.
+ * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
+ * completed.
  */
 
 DEFINE_MUTEX(uuid_mutex);
@@ -227,7 +262,7 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
        INIT_LIST_HEAD(&fs_devs->devices);
        INIT_LIST_HEAD(&fs_devs->resized_devices);
        INIT_LIST_HEAD(&fs_devs->alloc_list);
-       INIT_LIST_HEAD(&fs_devs->list);
+       INIT_LIST_HEAD(&fs_devs->fs_list);
        if (fsid)
                memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
 
@@ -273,8 +308,8 @@ void __exit btrfs_cleanup_fs_uuids(void)
 
        while (!list_empty(&fs_uuids)) {
                fs_devices = list_entry(fs_uuids.next,
-                                       struct btrfs_fs_devices, list);
-               list_del(&fs_devices->list);
+                                       struct btrfs_fs_devices, fs_list);
+               list_del(&fs_devices->fs_list);
                free_fs_devices(fs_devices);
        }
 }
@@ -343,7 +378,7 @@ static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
 {
        struct btrfs_fs_devices *fs_devices;
 
-       list_for_each_entry(fs_devices, &fs_uuids, list) {
+       list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
                if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
                        return fs_devices;
        }
@@ -607,7 +642,7 @@ static void btrfs_free_stale_devices(const char *path,
        struct btrfs_fs_devices *fs_devs, *tmp_fs_devs;
        struct btrfs_device *dev, *tmp_dev;
 
-       list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, list) {
+       list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, fs_list) {
 
                if (fs_devs->opened)
                        continue;
@@ -632,7 +667,7 @@ static void btrfs_free_stale_devices(const char *path,
                        /* delete the stale device */
                        if (fs_devs->num_devices == 1) {
                                btrfs_sysfs_remove_fsid(fs_devs);
-                               list_del(&fs_devs->list);
+                               list_del(&fs_devs->fs_list);
                                free_fs_devices(fs_devs);
                                break;
                        } else {
@@ -732,7 +767,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
                if (IS_ERR(fs_devices))
                        return ERR_CAST(fs_devices);
 
-               list_add(&fs_devices->list, &fs_uuids);
+               list_add(&fs_devices->fs_list, &fs_uuids);
 
                device = NULL;
        } else {
@@ -1005,7 +1040,7 @@ static void btrfs_prepare_close_one_device(struct btrfs_device *device)
        new_device->fs_devices = device->fs_devices;
 }
 
-static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
 {
        struct btrfs_device *device, *tmp;
        struct list_head pending_put;
@@ -1050,7 +1085,7 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
        int ret;
 
        mutex_lock(&uuid_mutex);
-       ret = __btrfs_close_devices(fs_devices);
+       ret = close_fs_devices(fs_devices);
        if (!fs_devices->opened) {
                seed_devices = fs_devices->seed;
                fs_devices->seed = NULL;
@@ -1060,7 +1095,7 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
        while (seed_devices) {
                fs_devices = seed_devices;
                seed_devices = fs_devices->seed;
-               __btrfs_close_devices(fs_devices);
+               close_fs_devices(fs_devices);
                free_fs_devices(fs_devices);
        }
        return ret;
@@ -1069,14 +1104,13 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                                fmode_t flags, void *holder)
 {
-       struct list_head *head = &fs_devices->devices;
        struct btrfs_device *device;
        struct btrfs_device *latest_dev = NULL;
        int ret = 0;
 
        flags |= FMODE_EXCL;
 
-       list_for_each_entry(device, head, dev_list) {
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
                /* Just open everything we can; ignore failures here */
                if (btrfs_open_one_device(fs_devices, device, flags, holder))
                        continue;
@@ -2030,7 +2064,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
                        fs_devices = fs_devices->seed;
                }
                cur_devices->seed = NULL;
-               __btrfs_close_devices(cur_devices);
+               close_fs_devices(cur_devices);
                free_fs_devices(cur_devices);
        }
 
@@ -2112,7 +2146,7 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
                        tmp_fs_devices = tmp_fs_devices->seed;
                }
                fs_devices->seed = NULL;
-               __btrfs_close_devices(fs_devices);
+               close_fs_devices(fs_devices);
                free_fs_devices(fs_devices);
        }
 }
@@ -2259,7 +2293,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
                return PTR_ERR(old_devices);
        }
 
-       list_add(&old_devices->list, &fs_uuids);
+       list_add(&old_devices->fs_list, &fs_uuids);
 
        memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
        seed_devices->opened = 1;
@@ -4052,6 +4086,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
                return 0;
        }
 
+       /*
+        * A ro->rw remount sequence should continue with the paused balance
+        * regardless of who pauses it, system or the user as of now, so set
+        * the resume flag.
+        */
+       spin_lock(&fs_info->balance_lock);
+       fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
+       spin_unlock(&fs_info->balance_lock);
+
        tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
        return PTR_ERR_OR_ZERO(tsk);
 }
@@ -6684,7 +6727,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
        }
 
        if (!fs_devices->seeding) {
-               __btrfs_close_devices(fs_devices);
+               close_fs_devices(fs_devices);
                free_fs_devices(fs_devices);
                fs_devices = ERR_PTR(-EINVAL);
                goto out;