btrfs: don't use global reserve for chunk allocation
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
index 9e74ce2d4cf23cf991058252e1ca3d9e0cbb2871..d637f4c4bcd076138414e9ec779785d808eb8053 100644 (file)
@@ -4399,21 +4399,12 @@ static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
                              struct btrfs_space_info *sinfo, int force)
 {
-       struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
        u64 bytes_used = btrfs_space_info_used(sinfo, false);
        u64 thresh;
 
        if (force == CHUNK_ALLOC_FORCE)
                return 1;
 
-       /*
-        * We need to take into account the global rsv because for all intents
-        * and purposes it's used space.  Don't worry about locking the
-        * global_rsv, it doesn't change except when the transaction commits.
-        */
-       if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
-               bytes_used += calc_global_rsv_need_space(global_rsv);
-
        /*
         * in limited mode, we want to have some free space up to
         * about 1% of the FS size.
@@ -4960,6 +4951,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
                btrfs_end_transaction(trans);
                break;
        case ALLOC_CHUNK:
+       case ALLOC_CHUNK_FORCE:
                trans = btrfs_join_transaction(root);
                if (IS_ERR(trans)) {
                        ret = PTR_ERR(trans);
@@ -4967,7 +4959,8 @@ static void flush_space(struct btrfs_fs_info *fs_info,
                }
                ret = do_chunk_alloc(trans,
                                     btrfs_metadata_alloc_profile(fs_info),
-                                    CHUNK_ALLOC_NO_FORCE);
+                                    (state == ALLOC_CHUNK) ?
+                                     CHUNK_ALLOC_NO_FORCE : CHUNK_ALLOC_FORCE);
                btrfs_end_transaction(trans);
                if (ret > 0 || ret == -ENOSPC)
                        ret = 0;
@@ -5111,6 +5104,19 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                                commit_cycles--;
                }
 
+               /*
+                * We don't want to force a chunk allocation until we've tried
+                * pretty hard to reclaim space.  Think of the case where we
+                * freed up a bunch of space and so have a lot of pinned space
+                * to reclaim.  We would rather use that than possibly create a
+                * underutilized metadata chunk.  So if this is our first run
+                * through the flushing state machine skip ALLOC_CHUNK_FORCE and
+                * commit the transaction.  If nothing has changed the next go
+                * around then we can force a chunk allocation.
+                */
+               if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
+                       flush_state++;
+
                if (flush_state > COMMIT_TRANS) {
                        commit_cycles++;
                        if (commit_cycles > 2) {