btrfs: move btrfs_raid_group values to btrfs_raid_attr table
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
index 51b5e2da708c4e77566cb686aea81b4a1cfa3497..fdd6ac9ee2c63b5d606af87416359636d6c7b4ee 100644 (file)
@@ -744,12 +744,12 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
 }
 
 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
-                            u64 owner, u64 root_objectid)
+                            bool metadata, u64 root_objectid)
 {
        struct btrfs_space_info *space_info;
        u64 flags;
 
-       if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+       if (metadata) {
                if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
                        flags = BTRFS_BLOCK_GROUP_SYSTEM;
                else
@@ -2200,8 +2200,11 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
                                                 &old_ref_mod, &new_ref_mod);
        }
 
-       if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
-               add_pinned_bytes(fs_info, -num_bytes, owner, root_objectid);
+       if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0) {
+               bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
+
+               add_pinned_bytes(fs_info, -num_bytes, metadata, root_objectid);
+       }
 
        return ret;
 }
@@ -2594,8 +2597,8 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
        delayed_refs->num_heads--;
        rb_erase(&head->href_node, &delayed_refs->href_root);
        RB_CLEAR_NODE(&head->href_node);
-       spin_unlock(&delayed_refs->lock);
        spin_unlock(&head->lock);
+       spin_unlock(&delayed_refs->lock);
        atomic_dec(&delayed_refs->num_entries);
 
        trace_run_delayed_ref_head(fs_info, head, 0);
@@ -2700,8 +2703,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                 * insert_inline_extent_backref()).
                 */
                spin_lock(&locked_ref->lock);
-               btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
-                                        locked_ref);
+               btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
 
                /*
                 * locked_ref is the head node, so we have to go one
@@ -2710,7 +2712,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                ref = select_delayed_ref(locked_ref);
 
                if (ref && ref->seq &&
-                   btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
+                   btrfs_check_delayed_seq(fs_info, ref->seq)) {
                        spin_unlock(&locked_ref->lock);
                        unselect_delayed_ref_head(delayed_refs, locked_ref);
                        locked_ref = NULL;
@@ -4122,7 +4124,7 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  * returns target flags in extended format or 0 if restripe for this
  * chunk_type is not in progress
  *
- * should be called with either volume_mutex or balance_lock held
+ * should be called with balance_lock held
  */
 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
 {
@@ -4178,7 +4180,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
        /* First, mask out the RAID levels which aren't possible */
        for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
                if (num_devices >= btrfs_raid_array[raid_type].devs_min)
-                       allowed |= btrfs_raid_group[raid_type];
+                       allowed |= btrfs_raid_array[raid_type].bg_flag;
        }
        allowed &= flags;
 
@@ -4678,12 +4680,14 @@ again:
        trans->allocating_chunk = false;
 
        spin_lock(&space_info->lock);
-       if (ret < 0 && ret != -ENOSPC)
-               goto out;
-       if (ret)
-               space_info->full = 1;
-       else
+       if (ret < 0) {
+               if (ret == -ENOSPC)
+                       space_info->full = 1;
+               else
+                       goto out;
+       } else {
                ret = 1;
+       }
 
        space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
 out:
@@ -7266,7 +7270,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
        }
 out:
        if (pin)
-               add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
+               add_pinned_bytes(fs_info, buf->len, true,
                                 root->root_key.objectid);
 
        if (last_ref) {
@@ -7320,8 +7324,11 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
                                                 &old_ref_mod, &new_ref_mod);
        }
 
-       if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
-               add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
+       if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0) {
+               bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
+
+               add_pinned_bytes(fs_info, num_bytes, metadata, root_objectid);
+       }
 
        return ret;
 }
@@ -7373,24 +7380,6 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
        return ret;
 }
 
-static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
-       [BTRFS_RAID_RAID10]     = "raid10",
-       [BTRFS_RAID_RAID1]      = "raid1",
-       [BTRFS_RAID_DUP]        = "dup",
-       [BTRFS_RAID_RAID0]      = "raid0",
-       [BTRFS_RAID_SINGLE]     = "single",
-       [BTRFS_RAID_RAID5]      = "raid5",
-       [BTRFS_RAID_RAID6]      = "raid6",
-};
-
-static const char *get_raid_name(enum btrfs_raid_types type)
-{
-       if (type >= BTRFS_NR_RAID_TYPES)
-               return NULL;
-
-       return btrfs_raid_type_names[type];
-}
-
 enum btrfs_loop_type {
        LOOP_CACHING_NOWAIT = 0,
        LOOP_CACHING_WAIT = 1,