btrfs: introduce delayed_refs_rsv
[sfrench/cifs-2.6.git] / fs / btrfs / delayed-ref.c
index 5149165b49a45d96e2e62091b26b5d54dea1f815..cad36c99a483ca8f6c508c32c3ad2ce94289f114 100644 (file)
@@ -164,14 +164,27 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
        return NULL;
 }
 
+static struct btrfs_delayed_ref_head *find_first_ref_head(
+               struct btrfs_delayed_ref_root *dr)
+{
+       struct rb_node *n;
+       struct btrfs_delayed_ref_head *entry;
+
+       n = rb_first_cached(&dr->href_root);
+       if (!n)
+               return NULL;
+
+       entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
+
+       return entry;
+}
+
 /*
- * find an head entry based on bytenr. This returns the delayed ref
- * head if it was able to find one, or NULL if nothing was in that spot.
- * If return_bigger is given, the next bigger entry is returned if no exact
- * match is found. But if no bigger one is found then the first node of the
- * ref head tree will be returned.
+ * Find a head entry based on bytenr. This returns the delayed ref head if it
+ * was able to find one, or NULL if nothing was in that spot.  If return_bigger
+ * is given, the next bigger entry is returned if no exact match is found.
  */
-static struct btrfs_delayed_ref_headfind_ref_head(
+static struct btrfs_delayed_ref_head *find_ref_head(
                struct btrfs_delayed_ref_root *dr, u64 bytenr,
                bool return_bigger)
 {
@@ -195,10 +208,9 @@ static struct btrfs_delayed_ref_head* find_ref_head(
                if (bytenr > entry->bytenr) {
                        n = rb_next(&entry->href_node);
                        if (!n)
-                               n = rb_first_cached(&dr->href_root);
+                               return NULL;
                        entry = rb_entry(n, struct btrfs_delayed_ref_head,
                                         href_node);
-                       return entry;
                }
                return entry;
        }
@@ -239,8 +251,6 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
        ref->in_tree = 0;
        btrfs_put_delayed_ref(ref);
        atomic_dec(&delayed_refs->num_entries);
-       if (trans->delayed_ref_updates)
-               trans->delayed_ref_updates--;
 }
 
 static bool merge_ref(struct btrfs_trans_handle *trans,
@@ -355,33 +365,25 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
                struct btrfs_delayed_ref_root *delayed_refs)
 {
        struct btrfs_delayed_ref_head *head;
-       u64 start;
-       bool loop = false;
 
 again:
-       start = delayed_refs->run_delayed_start;
-       head = find_ref_head(delayed_refs, start, true);
-       if (!head && !loop) {
+       head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
+                            true);
+       if (!head && delayed_refs->run_delayed_start != 0) {
                delayed_refs->run_delayed_start = 0;
-               start = 0;
-               loop = true;
-               head = find_ref_head(delayed_refs, start, true);
-               if (!head)
-                       return NULL;
-       } else if (!head && loop) {
-               return NULL;
+               head = find_first_ref_head(delayed_refs);
        }
+       if (!head)
+               return NULL;
 
        while (head->processing) {
                struct rb_node *node;
 
                node = rb_next(&head->href_node);
                if (!node) {
-                       if (loop)
+                       if (delayed_refs->run_delayed_start == 0)
                                return NULL;
                        delayed_refs->run_delayed_start = 0;
-                       start = 0;
-                       loop = true;
                        goto again;
                }
                head = rb_entry(node, struct btrfs_delayed_ref_head,
@@ -396,6 +398,20 @@ again:
        return head;
 }
 
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+                          struct btrfs_delayed_ref_head *head)
+{
+       lockdep_assert_held(&delayed_refs->lock);
+       lockdep_assert_held(&head->lock);
+
+       rb_erase_cached(&head->href_node, &delayed_refs->href_root);
+       RB_CLEAR_NODE(&head->href_node);
+       atomic_dec(&delayed_refs->num_entries);
+       delayed_refs->num_heads--;
+       if (head->processing == 0)
+               delayed_refs->num_heads_ready--;
+}
+
 /*
  * Helper to insert the ref_node to the tail or merge with tail.
  *
@@ -449,7 +465,6 @@ inserted:
        if (ref->action == BTRFS_ADD_DELAYED_REF)
                list_add_tail(&ref->add_list, &href->ref_add_list);
        atomic_inc(&root->num_entries);
-       trans->delayed_ref_updates++;
        spin_unlock(&href->lock);
        return ret;
 }
@@ -458,12 +473,14 @@ inserted:
  * helper function to update the accounting in the head ref
  * existing and update must have the same bytenr
  */
-static noinline void
-update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
+static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
                         struct btrfs_delayed_ref_head *existing,
                         struct btrfs_delayed_ref_head *update,
                         int *old_ref_mod_ret)
 {
+       struct btrfs_delayed_ref_root *delayed_refs =
+               &trans->transaction->delayed_refs;
+       struct btrfs_fs_info *fs_info = trans->fs_info;
        int old_ref_mod;
 
        BUG_ON(existing->is_data != update->is_data);
@@ -521,10 +538,18 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
         * versa we need to make sure to adjust pending_csums accordingly.
         */
        if (existing->is_data) {
-               if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
+               u64 csum_leaves =
+                       btrfs_csum_bytes_to_leaves(fs_info,
+                                                  existing->num_bytes);
+
+               if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
                        delayed_refs->pending_csums -= existing->num_bytes;
-               if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
+                       btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
+               }
+               if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
                        delayed_refs->pending_csums += existing->num_bytes;
+                       trans->delayed_ref_updates += csum_leaves;
+               }
        }
        spin_unlock(&existing->lock);
 }
@@ -630,7 +655,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
                        && head_ref->qgroup_reserved
                        && existing->qgroup_ref_root
                        && existing->qgroup_reserved);
-               update_existing_head_ref(delayed_refs, existing, head_ref,
+               update_existing_head_ref(trans, existing, head_ref,
                                         old_ref_mod);
                /*
                 * we've updated the existing ref, free the newly
@@ -641,8 +666,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
        } else {
                if (old_ref_mod)
                        *old_ref_mod = 0;
-               if (head_ref->is_data && head_ref->ref_mod < 0)
+               if (head_ref->is_data && head_ref->ref_mod < 0) {
                        delayed_refs->pending_csums += head_ref->num_bytes;
+                       trans->delayed_ref_updates +=
+                               btrfs_csum_bytes_to_leaves(trans->fs_info,
+                                                          head_ref->num_bytes);
+               }
                delayed_refs->num_heads++;
                delayed_refs->num_heads_ready++;
                atomic_inc(&delayed_refs->num_entries);
@@ -778,6 +807,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
        ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
        spin_unlock(&delayed_refs->lock);
 
+       /*
+        * Need to update the delayed_refs_rsv with any changes we may have
+        * made.
+        */
+       btrfs_update_delayed_refs_rsv(trans);
+
        trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
                                   action == BTRFS_ADD_DELAYED_EXTENT ?
                                   BTRFS_ADD_DELAYED_REF : action);
@@ -859,6 +894,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
        ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
        spin_unlock(&delayed_refs->lock);
 
+       /*
+        * Need to update the delayed_refs_rsv with any changes we may have
+        * made.
+        */
+       btrfs_update_delayed_refs_rsv(trans);
+
        trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
                                   action == BTRFS_ADD_DELAYED_EXTENT ?
                                   BTRFS_ADD_DELAYED_REF : action);
@@ -895,6 +936,12 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
                             NULL, NULL, NULL);
 
        spin_unlock(&delayed_refs->lock);
+
+       /*
+        * Need to update the delayed_refs_rsv with any changes we may have
+        * made.
+        */
+       btrfs_update_delayed_refs_rsv(trans);
        return 0;
 }