btrfs: remove btrfs_dev_replace::read_locks
[sfrench/cifs-2.6.git] / fs / btrfs / disk-io.c
index 05dc3c17cb62aa38dc7a18adc886475ac22fd80b..79903243877bcc2d7ebd76900ef6b4bda9d701ba 100644 (file)
@@ -125,8 +125,8 @@ struct async_submit_bio {
  * Different roots are used for different purposes and may nest inside each
  * other and they require separate keysets.  As lockdep keys should be
  * static, assign keysets according to the purpose of the root as indicated
- * by btrfs_root->objectid.  This ensures that all special purpose roots
- * have separate keysets.
+ * by btrfs_root->root_key.objectid.  This ensures that all special purpose
+ * roots have separate keysets.
  *
  * Lock-nesting across peer nodes is always done with the immediate parent
  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
@@ -1148,7 +1148,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        root->state = 0;
        root->orphan_cleanup_state = 0;
 
-       root->objectid = objectid;
        root->last_trans = 0;
        root->highest_objectid = 0;
        root->nr_delalloc_inodes = 0;
@@ -2156,7 +2155,6 @@ static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
 {
        mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
        rwlock_init(&fs_info->dev_replace.lock);
-       atomic_set(&fs_info->dev_replace.read_locks, 0);
        atomic_set(&fs_info->dev_replace.blocking_readers, 0);
        init_waitqueue_head(&fs_info->replace_wait);
        init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
@@ -4204,7 +4202,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                return ret;
        }
 
-       while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
+       while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
                struct btrfs_delayed_ref_head *head;
                struct rb_node *n;
                bool pin_bytes = false;
@@ -4222,11 +4220,11 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                        continue;
                }
                spin_lock(&head->lock);
-               while ((n = rb_first(&head->ref_tree)) != NULL) {
+               while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
                        ref = rb_entry(n, struct btrfs_delayed_ref_node,
                                       ref_node);
                        ref->in_tree = 0;
-                       rb_erase(&ref->ref_node, &head->ref_tree);
+                       rb_erase_cached(&ref->ref_node, &head->ref_tree);
                        RB_CLEAR_NODE(&ref->ref_node);
                        if (!list_empty(&ref->add_list))
                                list_del(&ref->add_list);
@@ -4240,7 +4238,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                if (head->processing == 0)
                        delayed_refs->num_heads_ready--;
                atomic_dec(&delayed_refs->num_entries);
-               rb_erase(&head->href_node, &delayed_refs->href_root);
+               rb_erase_cached(&head->href_node, &delayed_refs->href_root);
                RB_CLEAR_NODE(&head->href_node);
                spin_unlock(&head->lock);
                spin_unlock(&delayed_refs->lock);