btrfs: factor out devs_max setting in __btrfs_alloc_chunk
[sfrench/cifs-2.6.git] / fs / btrfs / volumes.c
index 1c2a6e4b39da7205a17f8c31ae0971a8abeed821..2e0860d14ad2f19a00fc9eb870fd5b6dbe659d55 100644 (file)
@@ -1818,7 +1818,7 @@ static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
        struct rb_node *n;
        u64 ret = 0;
 
-       em_tree = &fs_info->mapping_tree.map_tree;
+       em_tree = &fs_info->mapping_tree;
        read_lock(&em_tree->lock);
        n = rb_last(&em_tree->map.rb_root);
        if (n) {
@@ -2941,7 +2941,7 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
        struct extent_map_tree *em_tree;
        struct extent_map *em;
 
-       em_tree = &fs_info->mapping_tree.map_tree;
+       em_tree = &fs_info->mapping_tree;
        read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, length);
        read_unlock(&em_tree->lock);
@@ -3474,6 +3474,18 @@ static int chunk_devid_filter(struct extent_buffer *leaf,
        return 1;
 }
 
+static u64 calc_data_stripes(u64 type, int num_stripes)
+{
+       const int index = btrfs_bg_flags_to_raid_index(type);
+       const int ncopies = btrfs_raid_array[index].ncopies;
+       const int nparity = btrfs_raid_array[index].nparity;
+
+       if (nparity)
+               return num_stripes - nparity;
+       else
+               return num_stripes / ncopies;
+}
+
 /* [pstart, pend) */
 static int chunk_drange_filter(struct extent_buffer *leaf,
                               struct btrfs_chunk *chunk,
@@ -3483,22 +3495,15 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
        int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
        u64 stripe_offset;
        u64 stripe_length;
+       u64 type;
        int factor;
        int i;
 
        if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
                return 0;
 
-       if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
-            BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
-               factor = num_stripes / 2;
-       } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
-               factor = num_stripes - 1;
-       } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
-               factor = num_stripes - 2;
-       } else {
-               factor = num_stripes;
-       }
+       type = btrfs_chunk_type(leaf, chunk);
+       factor = calc_data_stripes(type, num_stripes);
 
        for (i = 0; i < num_stripes; i++) {
                stripe = btrfs_stripe_nr(chunk, i);
@@ -4047,6 +4052,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
        u64 num_devices;
        unsigned seq;
        bool reducing_integrity;
+       int i;
 
        if (btrfs_fs_closing(fs_info) ||
            atomic_read(&fs_info->balance_pause_req) ||
@@ -4076,15 +4082,11 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
        }
 
        num_devices = btrfs_num_devices(fs_info);
+       allowed = 0;
+       for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
+               if (num_devices >= btrfs_raid_array[i].devs_min)
+                       allowed |= btrfs_raid_array[i].bg_flag;
 
-       allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
-       if (num_devices > 1)
-               allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
-       if (num_devices > 2)
-               allowed |= BTRFS_BLOCK_GROUP_RAID5;
-       if (num_devices > 3)
-               allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
-                           BTRFS_BLOCK_GROUP_RAID6);
        if (validate_convert_profile(&bctl->data, allowed)) {
                int index = btrfs_bg_flags_to_raid_index(bctl->data.target);
 
@@ -4113,11 +4115,16 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
                goto out;
        }
 
-       /* allow to reduce meta or sys integrity only if force set */
-       allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
-                       BTRFS_BLOCK_GROUP_RAID10 |
-                       BTRFS_BLOCK_GROUP_RAID5 |
-                       BTRFS_BLOCK_GROUP_RAID6;
+       /*
+        * Allow to reduce metadata or system integrity only if force set for
+        * profiles with redundancy (copies, parity)
+        */
+       allowed = 0;
+       for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
+               if (btrfs_raid_array[i].ncopies >= 2 ||
+                   btrfs_raid_array[i].tolerated_failures >= 1)
+                       allowed |= btrfs_raid_array[i].bg_flag;
+       }
        do {
                seq = read_seqbegin(&fs_info->profiles_lock);
 
@@ -4949,6 +4956,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        sub_stripes = btrfs_raid_array[index].sub_stripes;
        dev_stripes = btrfs_raid_array[index].dev_stripes;
        devs_max = btrfs_raid_array[index].devs_max;
+       if (!devs_max)
+               devs_max = BTRFS_MAX_DEVS(info);
        devs_min = btrfs_raid_array[index].devs_min;
        devs_increment = btrfs_raid_array[index].devs_increment;
        ncopies = btrfs_raid_array[index].ncopies;
@@ -4957,8 +4966,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        if (type & BTRFS_BLOCK_GROUP_DATA) {
                max_stripe_size = SZ_1G;
                max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
-               if (!devs_max)
-                       devs_max = BTRFS_MAX_DEVS(info);
        } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
                /* for larger filesystems, use larger metadata chunks */
                if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
@@ -4966,13 +4973,9 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                else
                        max_stripe_size = SZ_256M;
                max_chunk_size = max_stripe_size;
-               if (!devs_max)
-                       devs_max = BTRFS_MAX_DEVS(info);
        } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
                max_stripe_size = SZ_32M;
                max_chunk_size = 2 * max_stripe_size;
-               if (!devs_max)
-                       devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
        } else {
                btrfs_err(info, "invalid chunk type 0x%llx requested",
                       type);
@@ -5143,7 +5146,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        em->block_len = em->len;
        em->orig_block_len = stripe_size;
 
-       em_tree = &info->mapping_tree.map_tree;
+       em_tree = &info->mapping_tree;
        write_lock(&em_tree->lock);
        ret = add_extent_mapping(em_tree, em, 0);
        if (ret) {
@@ -5324,20 +5327,9 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
 
 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
 {
-       int max_errors;
-
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
-                        BTRFS_BLOCK_GROUP_RAID10 |
-                        BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_DUP)) {
-               max_errors = 1;
-       } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
-               max_errors = 2;
-       } else {
-               max_errors = 0;
-       }
+       const int index = btrfs_bg_flags_to_raid_index(map->type);
 
-       return max_errors;
+       return btrfs_raid_array[index].tolerated_failures;
 }
 
 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
@@ -5378,21 +5370,16 @@ end:
        return readonly;
 }
 
-void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
-{
-       extent_map_tree_init(&tree->map_tree);
-}
-
-void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
+void btrfs_mapping_tree_free(struct extent_map_tree *tree)
 {
        struct extent_map *em;
 
        while (1) {
-               write_lock(&tree->map_tree.lock);
-               em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
+               write_lock(&tree->lock);
+               em = lookup_extent_mapping(tree, 0, (u64)-1);
                if (em)
-                       remove_extent_mapping(&tree->map_tree, em);
-               write_unlock(&tree->map_tree.lock);
+                       remove_extent_mapping(tree, em);
+               write_unlock(&tree->lock);
                if (!em)
                        break;
                /* once for us */
@@ -6687,7 +6674,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
                          struct btrfs_chunk *chunk)
 {
        struct btrfs_fs_info *fs_info = leaf->fs_info;
-       struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+       struct extent_map_tree *map_tree = &fs_info->mapping_tree;
        struct map_lookup *map;
        struct extent_map *em;
        u64 logical;
@@ -6712,9 +6699,9 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
                        return ret;
        }
 
-       read_lock(&map_tree->map_tree.lock);
-       em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
-       read_unlock(&map_tree->map_tree.lock);
+       read_lock(&map_tree->lock);
+       em = lookup_extent_mapping(map_tree, logical, 1);
+       read_unlock(&map_tree->lock);
 
        /* already mapped? */
        if (em && em->start <= logical && em->start + em->len > logical) {
@@ -6783,9 +6770,9 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
 
        }
 
-       write_lock(&map_tree->map_tree.lock);
-       ret = add_extent_mapping(&map_tree->map_tree, em, 0);
-       write_unlock(&map_tree->map_tree.lock);
+       write_lock(&map_tree->lock);
+       ret = add_extent_mapping(map_tree, em, 0);
+       write_unlock(&map_tree->lock);
        if (ret < 0) {
                btrfs_err(fs_info,
                          "failed to add chunk map, start=%llu len=%llu: %d",
@@ -7103,14 +7090,14 @@ out_short_read:
 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
                                        struct btrfs_device *failing_dev)
 {
-       struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+       struct extent_map_tree *map_tree = &fs_info->mapping_tree;
        struct extent_map *em;
        u64 next_start = 0;
        bool ret = true;
 
-       read_lock(&map_tree->map_tree.lock);
-       em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1);
-       read_unlock(&map_tree->map_tree.lock);
+       read_lock(&map_tree->lock);
+       em = lookup_extent_mapping(map_tree, 0, (u64)-1);
+       read_unlock(&map_tree->lock);
        /* No chunk at all? Return false anyway */
        if (!em) {
                ret = false;
@@ -7148,10 +7135,10 @@ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
                next_start = extent_map_end(em);
                free_extent_map(em);
 
-               read_lock(&map_tree->map_tree.lock);
-               em = lookup_extent_mapping(&map_tree->map_tree, next_start,
+               read_lock(&map_tree->lock);
+               em = lookup_extent_mapping(map_tree, next_start,
                                           (u64)(-1) - next_start);
-               read_unlock(&map_tree->map_tree.lock);
+               read_unlock(&map_tree->lock);
        }
 out:
        return ret;
@@ -7600,10 +7587,9 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
  */
 int btrfs_bg_type_to_factor(u64 flags)
 {
-       if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
-                    BTRFS_BLOCK_GROUP_RAID10))
-               return 2;
-       return 1;
+       const int index = btrfs_bg_flags_to_raid_index(flags);
+
+       return btrfs_raid_array[index].ncopies;
 }
 
 
@@ -7612,7 +7598,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
                                 u64 chunk_offset, u64 devid,
                                 u64 physical_offset, u64 physical_len)
 {
-       struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
+       struct extent_map_tree *em_tree = &fs_info->mapping_tree;
        struct extent_map *em;
        struct map_lookup *map;
        struct btrfs_device *dev;
@@ -7701,7 +7687,7 @@ out:
 
 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
 {
-       struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
+       struct extent_map_tree *em_tree = &fs_info->mapping_tree;
        struct extent_map *em;
        struct rb_node *node;
        int ret = 0;