btrfs: Remove unused flush var in shrink_delalloc
[sfrench/cifs-2.6.git] / fs / btrfs / extent-tree.c
index c1618ab9fecfb06a50861f26e4d445fe9d63190e..63c679e9b5411c878d28c282e1d9cf7acf65a1a2 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/ratelimit.h>
 #include <linux/percpu_counter.h>
 #include <linux/lockdep.h>
-#include "hash.h"
+#include <linux/crc32c.h>
 #include "tree-log.h"
 #include "disk-io.h"
 #include "print-tree.h"
@@ -535,13 +535,11 @@ static noinline void caching_thread(struct btrfs_work *work)
        struct btrfs_block_group_cache *block_group;
        struct btrfs_fs_info *fs_info;
        struct btrfs_caching_control *caching_ctl;
-       struct btrfs_root *extent_root;
        int ret;
 
        caching_ctl = container_of(work, struct btrfs_caching_control, work);
        block_group = caching_ctl->block_group;
        fs_info = block_group->fs_info;
-       extent_root = fs_info->extent_root;
 
        mutex_lock(&caching_ctl->mutex);
        down_read(&fs_info->commit_root_sem);
@@ -1203,11 +1201,11 @@ static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
        __le64 lenum;
 
        lenum = cpu_to_le64(root_objectid);
-       high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
+       high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
        lenum = cpu_to_le64(owner);
-       low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+       low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
        lenum = cpu_to_le64(offset);
-       low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+       low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
 
        return ((u64)high_crc << 31) ^ (u64)low_crc;
 }
@@ -3086,7 +3084,7 @@ again:
 
        if (run_all) {
                if (!list_empty(&trans->new_bgs))
-                       btrfs_create_pending_block_groups(trans, fs_info);
+                       btrfs_create_pending_block_groups(trans);
 
                spin_lock(&delayed_refs->lock);
                node = rb_first(&delayed_refs->href_root);
@@ -3660,9 +3658,9 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
  * the commit latency by getting rid of the easy block groups while
  * we're still allowing others to join the commit.
  */
-int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
-                                  struct btrfs_fs_info *fs_info)
+int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
 {
+       struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_block_group_cache *cache;
        struct btrfs_transaction *cur_trans = trans->transaction;
        int ret = 0;
@@ -3686,7 +3684,7 @@ again:
         * make sure all the block groups on our dirty list actually
         * exist
         */
-       btrfs_create_pending_block_groups(trans, fs_info);
+       btrfs_create_pending_block_groups(trans);
 
        if (!path) {
                path = btrfs_alloc_path();
@@ -3741,8 +3739,9 @@ again:
                                should_put = 0;
 
                                /*
-                                * the cache_write_mutex is protecting
-                                * the io_list
+                                * The cache_write_mutex is protecting the
+                                * io_list, also refer to the definition of
+                                * btrfs_transaction::io_bgs for more details
                                 */
                                list_add_tail(&cache->io_list, io);
                        } else {
@@ -3934,6 +3933,10 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
        }
        spin_unlock(&cur_trans->dirty_bgs_lock);
 
+       /*
+        * Refer to the definition of io_bgs member for details why it's safe
+        * to use it without any locking
+        */
        while (!list_empty(io)) {
                cache = list_first_entry(io, struct btrfs_block_group_cache,
                                         io_list);
@@ -4333,8 +4336,7 @@ again:
 
                /* commit the current transaction and try again */
 commit_trans:
-               if (need_commit &&
-                   !atomic_read(&fs_info->open_ioctl_trans)) {
+               if (need_commit) {
                        need_commit--;
 
                        if (need_commit > 0) {
@@ -4706,7 +4708,7 @@ out:
         */
        if (trans->can_flush_pending_bgs &&
            trans->chunk_bytes_reserved >= (u64)SZ_2M) {
-               btrfs_create_pending_block_groups(trans, fs_info);
+               btrfs_create_pending_block_groups(trans);
                btrfs_trans_release_chunk_metadata(trans);
        }
        return ret;
@@ -4827,7 +4829,6 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
        long time_left;
        unsigned long nr_pages;
        int loops;
-       enum btrfs_reserve_flush_enum flush;
 
        /* Calc the number of the pages we need flush for space reservation */
        items = calc_reclaim_items_nr(fs_info, to_reclaim);
@@ -4868,10 +4869,6 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
                           atomic_read(&fs_info->async_delalloc_pages) <=
                           (int)max_reclaim);
 skip_async:
-               if (!trans)
-                       flush = BTRFS_RESERVE_FLUSH_ALL;
-               else
-                       flush = BTRFS_RESERVE_NO_FLUSH;
                spin_lock(&space_info->lock);
                if (list_empty(&space_info->tickets) &&
                    list_empty(&space_info->priority_tickets)) {
@@ -4994,7 +4991,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
                        ret = PTR_ERR(trans);
                        break;
                }
-               ret = btrfs_run_delayed_items_nr(trans, fs_info, nr);
+               ret = btrfs_run_delayed_items_nr(trans, nr);
                btrfs_end_transaction(trans);
                break;
        case FLUSH_DELALLOC:
@@ -5389,10 +5386,15 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
                    !block_rsv_use_bytes(global_rsv, orig_bytes))
                        ret = 0;
        }
-       if (ret == -ENOSPC)
+       if (ret == -ENOSPC) {
                trace_btrfs_space_reservation(fs_info, "space_info:enospc",
                                              block_rsv->space_info->flags,
                                              orig_bytes, 1);
+
+               if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
+                       dump_space_info(fs_info, block_rsv->space_info,
+                                       orig_bytes, 0);
+       }
        return ret;
 }
 
@@ -5893,24 +5895,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
        WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
 }
 
-void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
-                                 struct btrfs_fs_info *fs_info)
-{
-       if (!trans->block_rsv) {
-               ASSERT(!trans->bytes_reserved);
-               return;
-       }
-
-       if (!trans->bytes_reserved)
-               return;
-
-       ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
-       trace_btrfs_space_reservation(fs_info, "transaction",
-                                     trans->transid, trans->bytes_reserved, 0);
-       btrfs_block_rsv_release(fs_info, trans->block_rsv,
-                               trans->bytes_reserved);
-       trans->bytes_reserved = 0;
-}
 
 /*
  * To be called after all the new block groups attached to the transaction
@@ -5952,7 +5936,7 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
         */
        u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 
-       trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), 
+       trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
                        num_bytes, 1);
        return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 }
@@ -6069,13 +6053,13 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
        if (btrfs_is_free_space_inode(inode)) {
                flush = BTRFS_RESERVE_NO_FLUSH;
                delalloc_lock = false;
-       } else if (current->journal_info) {
-               flush = BTRFS_RESERVE_FLUSH_LIMIT;
-       }
+       } else {
+               if (current->journal_info)
+                       flush = BTRFS_RESERVE_FLUSH_LIMIT;
 
-       if (flush != BTRFS_RESERVE_NO_FLUSH &&
-           btrfs_transaction_in_commit(fs_info))
-               schedule_timeout(1);
+               if (btrfs_transaction_in_commit(fs_info))
+                       schedule_timeout(1);
+       }
 
        if (delalloc_lock)
                mutex_lock(&inode->delalloc_mutex);
@@ -7353,29 +7337,6 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
        return ret;
 }
 
-int __get_raid_index(u64 flags)
-{
-       if (flags & BTRFS_BLOCK_GROUP_RAID10)
-               return BTRFS_RAID_RAID10;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID1)
-               return BTRFS_RAID_RAID1;
-       else if (flags & BTRFS_BLOCK_GROUP_DUP)
-               return BTRFS_RAID_DUP;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID0)
-               return BTRFS_RAID_RAID0;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID5)
-               return BTRFS_RAID_RAID5;
-       else if (flags & BTRFS_BLOCK_GROUP_RAID6)
-               return BTRFS_RAID_RAID6;
-
-       return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
-}
-
-int get_block_group_index(struct btrfs_block_group_cache *cache)
-{
-       return __get_raid_index(cache->flags);
-}
-
 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
        [BTRFS_RAID_RAID10]     = "raid10",
        [BTRFS_RAID_RAID1]      = "raid1",
@@ -7490,7 +7451,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
        u64 empty_cluster = 0;
        struct btrfs_space_info *space_info;
        int loop = 0;
-       int index = __get_raid_index(flags);
+       int index = btrfs_bg_flags_to_raid_index(flags);
        bool failed_cluster_refill = false;
        bool failed_alloc = false;
        bool use_cluster = true;
@@ -7576,7 +7537,8 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
                                btrfs_put_block_group(block_group);
                                up_read(&space_info->groups_sem);
                        } else {
-                               index = get_block_group_index(block_group);
+                               index = btrfs_bg_flags_to_raid_index(
+                                               block_group->flags);
                                btrfs_lock_block_group(block_group, delalloc);
                                goto have_block_group;
                        }
@@ -7586,7 +7548,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
        }
 search:
        have_caching_bg = false;
-       if (index == 0 || index == __get_raid_index(flags))
+       if (index == 0 || index == btrfs_bg_flags_to_raid_index(flags))
                full_search = true;
        down_read(&space_info->groups_sem);
        list_for_each_entry(block_group, &space_info->block_groups[index],
@@ -7844,7 +7806,8 @@ checks:
 loop:
                failed_cluster_refill = false;
                failed_alloc = false;
-               BUG_ON(index != get_block_group_index(block_group));
+               BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
+                      index);
                btrfs_release_block_group(block_group, delalloc);
                cond_resched();
        }
@@ -7998,6 +7961,51 @@ again:
        up_read(&info->groups_sem);
 }
 
+/*
+ * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
+ *                       hole that is at least as big as @num_bytes.
+ *
+ * @root           -   The root that will contain this extent
+ *
+ * @ram_bytes      -   The amount of space in ram that @num_bytes take. This
+ *                     is used for accounting purposes. This value differs
+ *                     from @num_bytes only in the case of compressed extents.
+ *
+ * @num_bytes      -   Number of bytes to allocate on-disk.
+ *
+ * @min_alloc_size -   Indicates the minimum amount of space that the
+ *                     allocator should try to satisfy. In some cases
+ *                     @num_bytes may be larger than what is required and if
+ *                     the filesystem is fragmented then allocation fails.
+ *                     However, the presence of @min_alloc_size gives a
+ *                     chance to try and satisfy the smaller allocation.
+ *
+ * @empty_size     -   A hint that you plan on doing more COW. This is the
+ *                     size in bytes the allocator should try to find free
+ *                     next to the block it returns.  This is just a hint and
+ *                     may be ignored by the allocator.
+ *
+ * @hint_byte      -   Hint to the allocator to start searching above the byte
+ *                     address passed. It might be ignored.
+ *
+ * @ins            -   This key is modified to record the found hole. It will
+ *                     have the following values:
+ *                     ins->objectid == start position
+ *                     ins->flags = BTRFS_EXTENT_ITEM_KEY
+ *                     ins->offset == the size of the hole.
+ *
+ * @is_data        -   Boolean flag indicating whether an extent is
+ *                     allocated for data (true) or metadata (false)
+ *
+ * @delalloc       -   Boolean flag indicating whether this allocation is for
+ *                     delalloc or not. If 'true' data_rwsem of block groups
+ *                     is going to be acquired.
+ *
+ *
+ * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
+ * case -ENOSPC is returned then @ins->offset will contain the size of the
+ * largest available hole the allocator managed to find.
+ */
 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
                         u64 num_bytes, u64 min_alloc_size,
                         u64 empty_size, u64 hint_byte,
@@ -9650,7 +9658,7 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
         */
        target = get_restripe_target(fs_info, block_group->flags);
        if (target) {
-               index = __get_raid_index(extended_to_chunk(target));
+               index = btrfs_bg_flags_to_raid_index(extended_to_chunk(target));
        } else {
                /*
                 * this is just a balance, so if we were marked as full
@@ -9664,7 +9672,7 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
                        goto out;
                }
 
-               index = get_block_group_index(block_group);
+               index = btrfs_bg_flags_to_raid_index(block_group->flags);
        }
 
        if (index == BTRFS_RAID_RAID10) {
@@ -9916,7 +9924,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
 static void link_block_group(struct btrfs_block_group_cache *cache)
 {
        struct btrfs_space_info *space_info = cache->space_info;
-       int index = get_block_group_index(cache);
+       int index = btrfs_bg_flags_to_raid_index(cache->flags);
        bool first = false;
 
        down_write(&space_info->groups_sem);
@@ -10169,9 +10177,9 @@ error:
        return ret;
 }
 
-void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
-                                      struct btrfs_fs_info *fs_info)
+void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
 {
+       struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_block_group_cache *block_group, *tmp;
        struct btrfs_root *extent_root = fs_info->extent_root;
        struct btrfs_block_group_item item;
@@ -10336,7 +10344,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                                  block_group->key.offset);
 
        memcpy(&key, &block_group->key, sizeof(key));
-       index = get_block_group_index(block_group);
+       index = btrfs_bg_flags_to_raid_index(block_group->flags);
        if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
                                  BTRFS_BLOCK_GROUP_RAID1 |
                                  BTRFS_BLOCK_GROUP_RAID10))