btrfs: replace btrfs_set_lock_blocking_rw with appropriate helpers
[sfrench/cifs-2.6.git] / fs / btrfs / disk-io.c
index 8da2f380d3c0e45e83bff51c23b5c8adc88cb049..74a696d9cd683fd47ca77fb5176a000e19e223e4 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/semaphore.h>
 #include <linux/error-injection.h>
 #include <linux/crc32c.h>
+#include <linux/sched/mm.h>
 #include <asm/unaligned.h>
 #include "ctree.h"
 #include "disk-io.h"
@@ -341,7 +342,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
 
        if (need_lock) {
                btrfs_tree_read_lock(eb);
-               btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+               btrfs_set_lock_blocking_read(eb);
        }
 
        lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
@@ -1175,6 +1176,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        INIT_LIST_HEAD(&root->delalloc_root);
        INIT_LIST_HEAD(&root->ordered_extents);
        INIT_LIST_HEAD(&root->ordered_root);
+       INIT_LIST_HEAD(&root->reloc_dirty_list);
        INIT_LIST_HEAD(&root->logged_list[0]);
        INIT_LIST_HEAD(&root->logged_list[1]);
        spin_lock_init(&root->inode_lock);
@@ -1218,6 +1220,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        root->anon_dev = 0;
 
        spin_lock_init(&root->root_item_lock);
+       btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
 }
 
 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
@@ -1258,10 +1261,17 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
        struct btrfs_root *tree_root = fs_info->tree_root;
        struct btrfs_root *root;
        struct btrfs_key key;
+       unsigned int nofs_flag;
        int ret = 0;
        uuid_le uuid = NULL_UUID_LE;
 
+       /*
+        * We're holding a transaction handle, so use a NOFS memory allocation
+        * context to avoid deadlock if reclaim happens.
+        */
+       nofs_flag = memalloc_nofs_save();
        root = btrfs_alloc_root(fs_info, GFP_KERNEL);
+       memalloc_nofs_restore(nofs_flag);
        if (!root)
                return ERR_PTR(-ENOMEM);
 
@@ -1682,6 +1692,8 @@ static int cleaner_kthread(void *arg)
        while (1) {
                again = 0;
 
+               set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
+
                /* Make the cleaner go to sleep early. */
                if (btrfs_need_cleaner_sleep(fs_info))
                        goto sleep;
@@ -1728,6 +1740,7 @@ static int cleaner_kthread(void *arg)
                 */
                btrfs_delete_unused_bgs(fs_info);
 sleep:
+               clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
                if (kthread_should_park())
                        kthread_parkme();
                if (kthread_should_stop())
@@ -4201,6 +4214,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
                spin_lock(&fs_info->ordered_root_lock);
        }
        spin_unlock(&fs_info->ordered_root_lock);
+
+       /*
+        * We need this here because if we've been flipped read-only we won't
+        * get sync() from the umount, so we need to make sure any ordered
+        * extents that haven't had their dirty pages IO start writeout yet
+        * actually get run and error out properly.
+        */
+       btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
 }
 
 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
@@ -4227,16 +4248,9 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 
                head = rb_entry(node, struct btrfs_delayed_ref_head,
                                href_node);
-               if (!mutex_trylock(&head->mutex)) {
-                       refcount_inc(&head->refs);
-                       spin_unlock(&delayed_refs->lock);
-
-                       mutex_lock(&head->mutex);
-                       mutex_unlock(&head->mutex);
-                       btrfs_put_delayed_ref_head(head);
-                       spin_lock(&delayed_refs->lock);
+               if (btrfs_delayed_ref_lock(delayed_refs, head))
                        continue;
-               }
+
                spin_lock(&head->lock);
                while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
                        ref = rb_entry(n, struct btrfs_delayed_ref_node,
@@ -4252,12 +4266,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                if (head->must_insert_reserved)
                        pin_bytes = true;
                btrfs_free_delayed_extent_op(head->extent_op);
-               delayed_refs->num_heads--;
-               if (head->processing == 0)
-                       delayed_refs->num_heads_ready--;
-               atomic_dec(&delayed_refs->num_entries);
-               rb_erase_cached(&head->href_node, &delayed_refs->href_root);
-               RB_CLEAR_NODE(&head->href_node);
+               btrfs_delete_ref_head(delayed_refs, head);
                spin_unlock(&head->lock);
                spin_unlock(&delayed_refs->lock);
                mutex_unlock(&head->mutex);
@@ -4265,6 +4274,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                if (pin_bytes)
                        btrfs_pin_extent(fs_info, head->bytenr,
                                         head->num_bytes, 1);
+               btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
                btrfs_put_delayed_ref_head(head);
                cond_resched();
                spin_lock(&delayed_refs->lock);