f93d2695e423b04ca626c47d2f2ba24a620837f5
[sfrench/cifs-2.6.git] / fs / btrfs / delayed-inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Fujitsu.  All rights reserved.
4  * Written by Miao Xie <miaox@cn.fujitsu.com>
5  */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "ctree.h"
10 #include "fs.h"
11 #include "messages.h"
12 #include "misc.h"
13 #include "delayed-inode.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "qgroup.h"
17 #include "locking.h"
18 #include "inode-item.h"
19 #include "space-info.h"
20 #include "accessors.h"
21
22 #define BTRFS_DELAYED_WRITEBACK         512
23 #define BTRFS_DELAYED_BACKGROUND        128
24 #define BTRFS_DELAYED_BATCH             16
25
26 static struct kmem_cache *delayed_node_cache;
27
28 int __init btrfs_delayed_inode_init(void)
29 {
30         delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
31                                         sizeof(struct btrfs_delayed_node),
32                                         0,
33                                         SLAB_MEM_SPREAD,
34                                         NULL);
35         if (!delayed_node_cache)
36                 return -ENOMEM;
37         return 0;
38 }
39
40 void __cold btrfs_delayed_inode_exit(void)
41 {
42         kmem_cache_destroy(delayed_node_cache);
43 }
44
45 static inline void btrfs_init_delayed_node(
46                                 struct btrfs_delayed_node *delayed_node,
47                                 struct btrfs_root *root, u64 inode_id)
48 {
49         delayed_node->root = root;
50         delayed_node->inode_id = inode_id;
51         refcount_set(&delayed_node->refs, 0);
52         delayed_node->ins_root = RB_ROOT_CACHED;
53         delayed_node->del_root = RB_ROOT_CACHED;
54         mutex_init(&delayed_node->mutex);
55         INIT_LIST_HEAD(&delayed_node->n_list);
56         INIT_LIST_HEAD(&delayed_node->p_list);
57 }
58
59 static struct btrfs_delayed_node *btrfs_get_delayed_node(
60                 struct btrfs_inode *btrfs_inode)
61 {
62         struct btrfs_root *root = btrfs_inode->root;
63         u64 ino = btrfs_ino(btrfs_inode);
64         struct btrfs_delayed_node *node;
65
66         node = READ_ONCE(btrfs_inode->delayed_node);
67         if (node) {
68                 refcount_inc(&node->refs);
69                 return node;
70         }
71
72         spin_lock(&root->inode_lock);
73         node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
74
75         if (node) {
76                 if (btrfs_inode->delayed_node) {
77                         refcount_inc(&node->refs);      /* can be accessed */
78                         BUG_ON(btrfs_inode->delayed_node != node);
79                         spin_unlock(&root->inode_lock);
80                         return node;
81                 }
82
83                 /*
84                  * It's possible that we're racing into the middle of removing
85                  * this node from the radix tree.  In this case, the refcount
86                  * was zero and it should never go back to one.  Just return
87                  * NULL like it was never in the radix at all; our release
88                  * function is in the process of removing it.
89                  *
90                  * Some implementations of refcount_inc refuse to bump the
91                  * refcount once it has hit zero.  If we don't do this dance
92                  * here, refcount_inc() may decide to just WARN_ONCE() instead
93                  * of actually bumping the refcount.
94                  *
95                  * If this node is properly in the radix, we want to bump the
96                  * refcount twice, once for the inode and once for this get
97                  * operation.
98                  */
99                 if (refcount_inc_not_zero(&node->refs)) {
100                         refcount_inc(&node->refs);
101                         btrfs_inode->delayed_node = node;
102                 } else {
103                         node = NULL;
104                 }
105
106                 spin_unlock(&root->inode_lock);
107                 return node;
108         }
109         spin_unlock(&root->inode_lock);
110
111         return NULL;
112 }
113
114 /* Will return either the node or PTR_ERR(-ENOMEM) */
115 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
116                 struct btrfs_inode *btrfs_inode)
117 {
118         struct btrfs_delayed_node *node;
119         struct btrfs_root *root = btrfs_inode->root;
120         u64 ino = btrfs_ino(btrfs_inode);
121         int ret;
122
123 again:
124         node = btrfs_get_delayed_node(btrfs_inode);
125         if (node)
126                 return node;
127
128         node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
129         if (!node)
130                 return ERR_PTR(-ENOMEM);
131         btrfs_init_delayed_node(node, root, ino);
132
133         /* cached in the btrfs inode and can be accessed */
134         refcount_set(&node->refs, 2);
135
136         ret = radix_tree_preload(GFP_NOFS);
137         if (ret) {
138                 kmem_cache_free(delayed_node_cache, node);
139                 return ERR_PTR(ret);
140         }
141
142         spin_lock(&root->inode_lock);
143         ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
144         if (ret == -EEXIST) {
145                 spin_unlock(&root->inode_lock);
146                 kmem_cache_free(delayed_node_cache, node);
147                 radix_tree_preload_end();
148                 goto again;
149         }
150         btrfs_inode->delayed_node = node;
151         spin_unlock(&root->inode_lock);
152         radix_tree_preload_end();
153
154         return node;
155 }
156
157 /*
158  * Call it when holding delayed_node->mutex
159  *
160  * If mod = 1, add this node into the prepared list.
161  */
162 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
163                                      struct btrfs_delayed_node *node,
164                                      int mod)
165 {
166         spin_lock(&root->lock);
167         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
168                 if (!list_empty(&node->p_list))
169                         list_move_tail(&node->p_list, &root->prepare_list);
170                 else if (mod)
171                         list_add_tail(&node->p_list, &root->prepare_list);
172         } else {
173                 list_add_tail(&node->n_list, &root->node_list);
174                 list_add_tail(&node->p_list, &root->prepare_list);
175                 refcount_inc(&node->refs);      /* inserted into list */
176                 root->nodes++;
177                 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
178         }
179         spin_unlock(&root->lock);
180 }
181
182 /* Call it when holding delayed_node->mutex */
183 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
184                                        struct btrfs_delayed_node *node)
185 {
186         spin_lock(&root->lock);
187         if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
188                 root->nodes--;
189                 refcount_dec(&node->refs);      /* not in the list */
190                 list_del_init(&node->n_list);
191                 if (!list_empty(&node->p_list))
192                         list_del_init(&node->p_list);
193                 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
194         }
195         spin_unlock(&root->lock);
196 }
197
198 static struct btrfs_delayed_node *btrfs_first_delayed_node(
199                         struct btrfs_delayed_root *delayed_root)
200 {
201         struct list_head *p;
202         struct btrfs_delayed_node *node = NULL;
203
204         spin_lock(&delayed_root->lock);
205         if (list_empty(&delayed_root->node_list))
206                 goto out;
207
208         p = delayed_root->node_list.next;
209         node = list_entry(p, struct btrfs_delayed_node, n_list);
210         refcount_inc(&node->refs);
211 out:
212         spin_unlock(&delayed_root->lock);
213
214         return node;
215 }
216
217 static struct btrfs_delayed_node *btrfs_next_delayed_node(
218                                                 struct btrfs_delayed_node *node)
219 {
220         struct btrfs_delayed_root *delayed_root;
221         struct list_head *p;
222         struct btrfs_delayed_node *next = NULL;
223
224         delayed_root = node->root->fs_info->delayed_root;
225         spin_lock(&delayed_root->lock);
226         if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
227                 /* not in the list */
228                 if (list_empty(&delayed_root->node_list))
229                         goto out;
230                 p = delayed_root->node_list.next;
231         } else if (list_is_last(&node->n_list, &delayed_root->node_list))
232                 goto out;
233         else
234                 p = node->n_list.next;
235
236         next = list_entry(p, struct btrfs_delayed_node, n_list);
237         refcount_inc(&next->refs);
238 out:
239         spin_unlock(&delayed_root->lock);
240
241         return next;
242 }
243
244 static void __btrfs_release_delayed_node(
245                                 struct btrfs_delayed_node *delayed_node,
246                                 int mod)
247 {
248         struct btrfs_delayed_root *delayed_root;
249
250         if (!delayed_node)
251                 return;
252
253         delayed_root = delayed_node->root->fs_info->delayed_root;
254
255         mutex_lock(&delayed_node->mutex);
256         if (delayed_node->count)
257                 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
258         else
259                 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
260         mutex_unlock(&delayed_node->mutex);
261
262         if (refcount_dec_and_test(&delayed_node->refs)) {
263                 struct btrfs_root *root = delayed_node->root;
264
265                 spin_lock(&root->inode_lock);
266                 /*
267                  * Once our refcount goes to zero, nobody is allowed to bump it
268                  * back up.  We can delete it now.
269                  */
270                 ASSERT(refcount_read(&delayed_node->refs) == 0);
271                 radix_tree_delete(&root->delayed_nodes_tree,
272                                   delayed_node->inode_id);
273                 spin_unlock(&root->inode_lock);
274                 kmem_cache_free(delayed_node_cache, delayed_node);
275         }
276 }
277
278 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
279 {
280         __btrfs_release_delayed_node(node, 0);
281 }
282
283 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
284                                         struct btrfs_delayed_root *delayed_root)
285 {
286         struct list_head *p;
287         struct btrfs_delayed_node *node = NULL;
288
289         spin_lock(&delayed_root->lock);
290         if (list_empty(&delayed_root->prepare_list))
291                 goto out;
292
293         p = delayed_root->prepare_list.next;
294         list_del_init(p);
295         node = list_entry(p, struct btrfs_delayed_node, p_list);
296         refcount_inc(&node->refs);
297 out:
298         spin_unlock(&delayed_root->lock);
299
300         return node;
301 }
302
303 static inline void btrfs_release_prepared_delayed_node(
304                                         struct btrfs_delayed_node *node)
305 {
306         __btrfs_release_delayed_node(node, 1);
307 }
308
309 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
310                                            struct btrfs_delayed_node *node,
311                                            enum btrfs_delayed_item_type type)
312 {
313         struct btrfs_delayed_item *item;
314
315         item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
316         if (item) {
317                 item->data_len = data_len;
318                 item->type = type;
319                 item->bytes_reserved = 0;
320                 item->delayed_node = node;
321                 RB_CLEAR_NODE(&item->rb_node);
322                 INIT_LIST_HEAD(&item->log_list);
323                 item->logged = false;
324                 refcount_set(&item->refs, 1);
325         }
326         return item;
327 }
328
329 /*
330  * __btrfs_lookup_delayed_item - look up the delayed item by key
331  * @delayed_node: pointer to the delayed node
332  * @index:        the dir index value to lookup (offset of a dir index key)
333  *
334  * Note: if we don't find the right item, we will return the prev item and
335  * the next item.
336  */
337 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
338                                 struct rb_root *root,
339                                 u64 index)
340 {
341         struct rb_node *node = root->rb_node;
342         struct btrfs_delayed_item *delayed_item = NULL;
343
344         while (node) {
345                 delayed_item = rb_entry(node, struct btrfs_delayed_item,
346                                         rb_node);
347                 if (delayed_item->index < index)
348                         node = node->rb_right;
349                 else if (delayed_item->index > index)
350                         node = node->rb_left;
351                 else
352                         return delayed_item;
353         }
354
355         return NULL;
356 }
357
358 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
359                                     struct btrfs_delayed_item *ins)
360 {
361         struct rb_node **p, *node;
362         struct rb_node *parent_node = NULL;
363         struct rb_root_cached *root;
364         struct btrfs_delayed_item *item;
365         bool leftmost = true;
366
367         if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
368                 root = &delayed_node->ins_root;
369         else
370                 root = &delayed_node->del_root;
371
372         p = &root->rb_root.rb_node;
373         node = &ins->rb_node;
374
375         while (*p) {
376                 parent_node = *p;
377                 item = rb_entry(parent_node, struct btrfs_delayed_item,
378                                  rb_node);
379
380                 if (item->index < ins->index) {
381                         p = &(*p)->rb_right;
382                         leftmost = false;
383                 } else if (item->index > ins->index) {
384                         p = &(*p)->rb_left;
385                 } else {
386                         return -EEXIST;
387                 }
388         }
389
390         rb_link_node(node, parent_node, p);
391         rb_insert_color_cached(node, root, leftmost);
392
393         if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
394             ins->index >= delayed_node->index_cnt)
395                 delayed_node->index_cnt = ins->index + 1;
396
397         delayed_node->count++;
398         atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
399         return 0;
400 }
401
402 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
403 {
404         int seq = atomic_inc_return(&delayed_root->items_seq);
405
406         /* atomic_dec_return implies a barrier */
407         if ((atomic_dec_return(&delayed_root->items) <
408             BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
409                 cond_wake_up_nomb(&delayed_root->wait);
410 }
411
412 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
413 {
414         struct rb_root_cached *root;
415         struct btrfs_delayed_root *delayed_root;
416
417         /* Not inserted, ignore it. */
418         if (RB_EMPTY_NODE(&delayed_item->rb_node))
419                 return;
420
421         delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
422
423         BUG_ON(!delayed_root);
424
425         if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
426                 root = &delayed_item->delayed_node->ins_root;
427         else
428                 root = &delayed_item->delayed_node->del_root;
429
430         rb_erase_cached(&delayed_item->rb_node, root);
431         RB_CLEAR_NODE(&delayed_item->rb_node);
432         delayed_item->delayed_node->count--;
433
434         finish_one_item(delayed_root);
435 }
436
437 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
438 {
439         if (item) {
440                 __btrfs_remove_delayed_item(item);
441                 if (refcount_dec_and_test(&item->refs))
442                         kfree(item);
443         }
444 }
445
446 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
447                                         struct btrfs_delayed_node *delayed_node)
448 {
449         struct rb_node *p;
450         struct btrfs_delayed_item *item = NULL;
451
452         p = rb_first_cached(&delayed_node->ins_root);
453         if (p)
454                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
455
456         return item;
457 }
458
459 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
460                                         struct btrfs_delayed_node *delayed_node)
461 {
462         struct rb_node *p;
463         struct btrfs_delayed_item *item = NULL;
464
465         p = rb_first_cached(&delayed_node->del_root);
466         if (p)
467                 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
468
469         return item;
470 }
471
472 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
473                                                 struct btrfs_delayed_item *item)
474 {
475         struct rb_node *p;
476         struct btrfs_delayed_item *next = NULL;
477
478         p = rb_next(&item->rb_node);
479         if (p)
480                 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
481
482         return next;
483 }
484
485 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
486                                                struct btrfs_delayed_item *item)
487 {
488         struct btrfs_block_rsv *src_rsv;
489         struct btrfs_block_rsv *dst_rsv;
490         struct btrfs_fs_info *fs_info = trans->fs_info;
491         u64 num_bytes;
492         int ret;
493
494         if (!trans->bytes_reserved)
495                 return 0;
496
497         src_rsv = trans->block_rsv;
498         dst_rsv = &fs_info->delayed_block_rsv;
499
500         num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
501
502         /*
503          * Here we migrate space rsv from transaction rsv, since have already
504          * reserved space when starting a transaction.  So no need to reserve
505          * qgroup space here.
506          */
507         ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
508         if (!ret) {
509                 trace_btrfs_space_reservation(fs_info, "delayed_item",
510                                               item->delayed_node->inode_id,
511                                               num_bytes, 1);
512                 /*
513                  * For insertions we track reserved metadata space by accounting
514                  * for the number of leaves that will be used, based on the delayed
515                  * node's index_items_size field.
516                  */
517                 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
518                         item->bytes_reserved = num_bytes;
519         }
520
521         return ret;
522 }
523
524 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
525                                                 struct btrfs_delayed_item *item)
526 {
527         struct btrfs_block_rsv *rsv;
528         struct btrfs_fs_info *fs_info = root->fs_info;
529
530         if (!item->bytes_reserved)
531                 return;
532
533         rsv = &fs_info->delayed_block_rsv;
534         /*
535          * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
536          * to release/reserve qgroup space.
537          */
538         trace_btrfs_space_reservation(fs_info, "delayed_item",
539                                       item->delayed_node->inode_id,
540                                       item->bytes_reserved, 0);
541         btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
542 }
543
544 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
545                                               unsigned int num_leaves)
546 {
547         struct btrfs_fs_info *fs_info = node->root->fs_info;
548         const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
549
550         /* There are no space reservations during log replay, bail out. */
551         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
552                 return;
553
554         trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
555                                       bytes, 0);
556         btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
557 }
558
559 static int btrfs_delayed_inode_reserve_metadata(
560                                         struct btrfs_trans_handle *trans,
561                                         struct btrfs_root *root,
562                                         struct btrfs_delayed_node *node)
563 {
564         struct btrfs_fs_info *fs_info = root->fs_info;
565         struct btrfs_block_rsv *src_rsv;
566         struct btrfs_block_rsv *dst_rsv;
567         u64 num_bytes;
568         int ret;
569
570         src_rsv = trans->block_rsv;
571         dst_rsv = &fs_info->delayed_block_rsv;
572
573         num_bytes = btrfs_calc_metadata_size(fs_info, 1);
574
575         /*
576          * btrfs_dirty_inode will update the inode under btrfs_join_transaction
577          * which doesn't reserve space for speed.  This is a problem since we
578          * still need to reserve space for this update, so try to reserve the
579          * space.
580          *
581          * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
582          * we always reserve enough to update the inode item.
583          */
584         if (!src_rsv || (!trans->bytes_reserved &&
585                          src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
586                 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
587                                           BTRFS_QGROUP_RSV_META_PREALLOC, true);
588                 if (ret < 0)
589                         return ret;
590                 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
591                                           BTRFS_RESERVE_NO_FLUSH);
592                 /* NO_FLUSH could only fail with -ENOSPC */
593                 ASSERT(ret == 0 || ret == -ENOSPC);
594                 if (ret)
595                         btrfs_qgroup_free_meta_prealloc(root, num_bytes);
596         } else {
597                 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
598         }
599
600         if (!ret) {
601                 trace_btrfs_space_reservation(fs_info, "delayed_inode",
602                                               node->inode_id, num_bytes, 1);
603                 node->bytes_reserved = num_bytes;
604         }
605
606         return ret;
607 }
608
609 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
610                                                 struct btrfs_delayed_node *node,
611                                                 bool qgroup_free)
612 {
613         struct btrfs_block_rsv *rsv;
614
615         if (!node->bytes_reserved)
616                 return;
617
618         rsv = &fs_info->delayed_block_rsv;
619         trace_btrfs_space_reservation(fs_info, "delayed_inode",
620                                       node->inode_id, node->bytes_reserved, 0);
621         btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
622         if (qgroup_free)
623                 btrfs_qgroup_free_meta_prealloc(node->root,
624                                 node->bytes_reserved);
625         else
626                 btrfs_qgroup_convert_reserved_meta(node->root,
627                                 node->bytes_reserved);
628         node->bytes_reserved = 0;
629 }
630
631 /*
632  * Insert a single delayed item or a batch of delayed items, as many as possible
633  * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
634  * in the rbtree, and if there's a gap between two consecutive dir index items,
635  * then it means at some point we had delayed dir indexes to add but they got
636  * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
637  * into the subvolume tree. Dir index keys also have their offsets coming from a
638  * monotonically increasing counter, so we can't get new keys with an offset that
639  * fits within a gap between delayed dir index items.
640  */
641 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
642                                      struct btrfs_root *root,
643                                      struct btrfs_path *path,
644                                      struct btrfs_delayed_item *first_item)
645 {
646         struct btrfs_fs_info *fs_info = root->fs_info;
647         struct btrfs_delayed_node *node = first_item->delayed_node;
648         LIST_HEAD(item_list);
649         struct btrfs_delayed_item *curr;
650         struct btrfs_delayed_item *next;
651         const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
652         struct btrfs_item_batch batch;
653         struct btrfs_key first_key;
654         const u32 first_data_size = first_item->data_len;
655         int total_size;
656         char *ins_data = NULL;
657         int ret;
658         bool continuous_keys_only = false;
659
660         lockdep_assert_held(&node->mutex);
661
662         /*
663          * During normal operation the delayed index offset is continuously
664          * increasing, so we can batch insert all items as there will not be any
665          * overlapping keys in the tree.
666          *
667          * The exception to this is log replay, where we may have interleaved
668          * offsets in the tree, so our batch needs to be continuous keys only in
669          * order to ensure we do not end up with out of order items in our leaf.
670          */
671         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
672                 continuous_keys_only = true;
673
674         /*
675          * For delayed items to insert, we track reserved metadata bytes based
676          * on the number of leaves that we will use.
677          * See btrfs_insert_delayed_dir_index() and
678          * btrfs_delayed_item_reserve_metadata()).
679          */
680         ASSERT(first_item->bytes_reserved == 0);
681
682         list_add_tail(&first_item->tree_list, &item_list);
683         batch.total_data_size = first_data_size;
684         batch.nr = 1;
685         total_size = first_data_size + sizeof(struct btrfs_item);
686         curr = first_item;
687
688         while (true) {
689                 int next_size;
690
691                 next = __btrfs_next_delayed_item(curr);
692                 if (!next)
693                         break;
694
695                 /*
696                  * We cannot allow gaps in the key space if we're doing log
697                  * replay.
698                  */
699                 if (continuous_keys_only && (next->index != curr->index + 1))
700                         break;
701
702                 ASSERT(next->bytes_reserved == 0);
703
704                 next_size = next->data_len + sizeof(struct btrfs_item);
705                 if (total_size + next_size > max_size)
706                         break;
707
708                 list_add_tail(&next->tree_list, &item_list);
709                 batch.nr++;
710                 total_size += next_size;
711                 batch.total_data_size += next->data_len;
712                 curr = next;
713         }
714
715         if (batch.nr == 1) {
716                 first_key.objectid = node->inode_id;
717                 first_key.type = BTRFS_DIR_INDEX_KEY;
718                 first_key.offset = first_item->index;
719                 batch.keys = &first_key;
720                 batch.data_sizes = &first_data_size;
721         } else {
722                 struct btrfs_key *ins_keys;
723                 u32 *ins_sizes;
724                 int i = 0;
725
726                 ins_data = kmalloc(batch.nr * sizeof(u32) +
727                                    batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
728                 if (!ins_data) {
729                         ret = -ENOMEM;
730                         goto out;
731                 }
732                 ins_sizes = (u32 *)ins_data;
733                 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
734                 batch.keys = ins_keys;
735                 batch.data_sizes = ins_sizes;
736                 list_for_each_entry(curr, &item_list, tree_list) {
737                         ins_keys[i].objectid = node->inode_id;
738                         ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
739                         ins_keys[i].offset = curr->index;
740                         ins_sizes[i] = curr->data_len;
741                         i++;
742                 }
743         }
744
745         ret = btrfs_insert_empty_items(trans, root, path, &batch);
746         if (ret)
747                 goto out;
748
749         list_for_each_entry(curr, &item_list, tree_list) {
750                 char *data_ptr;
751
752                 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
753                 write_extent_buffer(path->nodes[0], &curr->data,
754                                     (unsigned long)data_ptr, curr->data_len);
755                 path->slots[0]++;
756         }
757
758         /*
759          * Now release our path before releasing the delayed items and their
760          * metadata reservations, so that we don't block other tasks for more
761          * time than needed.
762          */
763         btrfs_release_path(path);
764
765         ASSERT(node->index_item_leaves > 0);
766
767         /*
768          * For normal operations we will batch an entire leaf's worth of delayed
769          * items, so if there are more items to process we can decrement
770          * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
771          *
772          * However for log replay we may not have inserted an entire leaf's
773          * worth of items, we may have not had continuous items, so decrementing
774          * here would mess up the index_item_leaves accounting.  For this case
775          * only clean up the accounting when there are no items left.
776          */
777         if (next && !continuous_keys_only) {
778                 /*
779                  * We inserted one batch of items into a leaf a there are more
780                  * items to flush in a future batch, now release one unit of
781                  * metadata space from the delayed block reserve, corresponding
782                  * the leaf we just flushed to.
783                  */
784                 btrfs_delayed_item_release_leaves(node, 1);
785                 node->index_item_leaves--;
786         } else if (!next) {
787                 /*
788                  * There are no more items to insert. We can have a number of
789                  * reserved leaves > 1 here - this happens when many dir index
790                  * items are added and then removed before they are flushed (file
791                  * names with a very short life, never span a transaction). So
792                  * release all remaining leaves.
793                  */
794                 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
795                 node->index_item_leaves = 0;
796         }
797
798         list_for_each_entry_safe(curr, next, &item_list, tree_list) {
799                 list_del(&curr->tree_list);
800                 btrfs_release_delayed_item(curr);
801         }
802 out:
803         kfree(ins_data);
804         return ret;
805 }
806
807 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
808                                       struct btrfs_path *path,
809                                       struct btrfs_root *root,
810                                       struct btrfs_delayed_node *node)
811 {
812         int ret = 0;
813
814         while (ret == 0) {
815                 struct btrfs_delayed_item *curr;
816
817                 mutex_lock(&node->mutex);
818                 curr = __btrfs_first_delayed_insertion_item(node);
819                 if (!curr) {
820                         mutex_unlock(&node->mutex);
821                         break;
822                 }
823                 ret = btrfs_insert_delayed_item(trans, root, path, curr);
824                 mutex_unlock(&node->mutex);
825         }
826
827         return ret;
828 }
829
830 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
831                                     struct btrfs_root *root,
832                                     struct btrfs_path *path,
833                                     struct btrfs_delayed_item *item)
834 {
835         const u64 ino = item->delayed_node->inode_id;
836         struct btrfs_fs_info *fs_info = root->fs_info;
837         struct btrfs_delayed_item *curr, *next;
838         struct extent_buffer *leaf = path->nodes[0];
839         LIST_HEAD(batch_list);
840         int nitems, slot, last_slot;
841         int ret;
842         u64 total_reserved_size = item->bytes_reserved;
843
844         ASSERT(leaf != NULL);
845
846         slot = path->slots[0];
847         last_slot = btrfs_header_nritems(leaf) - 1;
848         /*
849          * Our caller always gives us a path pointing to an existing item, so
850          * this can not happen.
851          */
852         ASSERT(slot <= last_slot);
853         if (WARN_ON(slot > last_slot))
854                 return -ENOENT;
855
856         nitems = 1;
857         curr = item;
858         list_add_tail(&curr->tree_list, &batch_list);
859
860         /*
861          * Keep checking if the next delayed item matches the next item in the
862          * leaf - if so, we can add it to the batch of items to delete from the
863          * leaf.
864          */
865         while (slot < last_slot) {
866                 struct btrfs_key key;
867
868                 next = __btrfs_next_delayed_item(curr);
869                 if (!next)
870                         break;
871
872                 slot++;
873                 btrfs_item_key_to_cpu(leaf, &key, slot);
874                 if (key.objectid != ino ||
875                     key.type != BTRFS_DIR_INDEX_KEY ||
876                     key.offset != next->index)
877                         break;
878                 nitems++;
879                 curr = next;
880                 list_add_tail(&curr->tree_list, &batch_list);
881                 total_reserved_size += curr->bytes_reserved;
882         }
883
884         ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
885         if (ret)
886                 return ret;
887
888         /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
889         if (total_reserved_size > 0) {
890                 /*
891                  * Check btrfs_delayed_item_reserve_metadata() to see why we
892                  * don't need to release/reserve qgroup space.
893                  */
894                 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
895                                               total_reserved_size, 0);
896                 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
897                                         total_reserved_size, NULL);
898         }
899
900         list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
901                 list_del(&curr->tree_list);
902                 btrfs_release_delayed_item(curr);
903         }
904
905         return 0;
906 }
907
908 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
909                                       struct btrfs_path *path,
910                                       struct btrfs_root *root,
911                                       struct btrfs_delayed_node *node)
912 {
913         struct btrfs_key key;
914         int ret = 0;
915
916         key.objectid = node->inode_id;
917         key.type = BTRFS_DIR_INDEX_KEY;
918
919         while (ret == 0) {
920                 struct btrfs_delayed_item *item;
921
922                 mutex_lock(&node->mutex);
923                 item = __btrfs_first_delayed_deletion_item(node);
924                 if (!item) {
925                         mutex_unlock(&node->mutex);
926                         break;
927                 }
928
929                 key.offset = item->index;
930                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
931                 if (ret > 0) {
932                         /*
933                          * There's no matching item in the leaf. This means we
934                          * have already deleted this item in a past run of the
935                          * delayed items. We ignore errors when running delayed
936                          * items from an async context, through a work queue job
937                          * running btrfs_async_run_delayed_root(), and don't
938                          * release delayed items that failed to complete. This
939                          * is because we will retry later, and at transaction
940                          * commit time we always run delayed items and will
941                          * then deal with errors if they fail to run again.
942                          *
943                          * So just release delayed items for which we can't find
944                          * an item in the tree, and move to the next item.
945                          */
946                         btrfs_release_path(path);
947                         btrfs_release_delayed_item(item);
948                         ret = 0;
949                 } else if (ret == 0) {
950                         ret = btrfs_batch_delete_items(trans, root, path, item);
951                         btrfs_release_path(path);
952                 }
953
954                 /*
955                  * We unlock and relock on each iteration, this is to prevent
956                  * blocking other tasks for too long while we are being run from
957                  * the async context (work queue job). Those tasks are typically
958                  * running system calls like creat/mkdir/rename/unlink/etc which
959                  * need to add delayed items to this delayed node.
960                  */
961                 mutex_unlock(&node->mutex);
962         }
963
964         return ret;
965 }
966
967 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
968 {
969         struct btrfs_delayed_root *delayed_root;
970
971         if (delayed_node &&
972             test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
973                 BUG_ON(!delayed_node->root);
974                 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
975                 delayed_node->count--;
976
977                 delayed_root = delayed_node->root->fs_info->delayed_root;
978                 finish_one_item(delayed_root);
979         }
980 }
981
982 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
983 {
984
985         if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
986                 struct btrfs_delayed_root *delayed_root;
987
988                 ASSERT(delayed_node->root);
989                 delayed_node->count--;
990
991                 delayed_root = delayed_node->root->fs_info->delayed_root;
992                 finish_one_item(delayed_root);
993         }
994 }
995
996 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
997                                         struct btrfs_root *root,
998                                         struct btrfs_path *path,
999                                         struct btrfs_delayed_node *node)
1000 {
1001         struct btrfs_fs_info *fs_info = root->fs_info;
1002         struct btrfs_key key;
1003         struct btrfs_inode_item *inode_item;
1004         struct extent_buffer *leaf;
1005         int mod;
1006         int ret;
1007
1008         key.objectid = node->inode_id;
1009         key.type = BTRFS_INODE_ITEM_KEY;
1010         key.offset = 0;
1011
1012         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1013                 mod = -1;
1014         else
1015                 mod = 1;
1016
1017         ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1018         if (ret > 0)
1019                 ret = -ENOENT;
1020         if (ret < 0)
1021                 goto out;
1022
1023         leaf = path->nodes[0];
1024         inode_item = btrfs_item_ptr(leaf, path->slots[0],
1025                                     struct btrfs_inode_item);
1026         write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1027                             sizeof(struct btrfs_inode_item));
1028         btrfs_mark_buffer_dirty(leaf);
1029
1030         if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1031                 goto out;
1032
1033         path->slots[0]++;
1034         if (path->slots[0] >= btrfs_header_nritems(leaf))
1035                 goto search;
1036 again:
1037         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1038         if (key.objectid != node->inode_id)
1039                 goto out;
1040
1041         if (key.type != BTRFS_INODE_REF_KEY &&
1042             key.type != BTRFS_INODE_EXTREF_KEY)
1043                 goto out;
1044
1045         /*
1046          * Delayed iref deletion is for the inode who has only one link,
1047          * so there is only one iref. The case that several irefs are
1048          * in the same item doesn't exist.
1049          */
1050         btrfs_del_item(trans, root, path);
1051 out:
1052         btrfs_release_delayed_iref(node);
1053         btrfs_release_path(path);
1054 err_out:
1055         btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1056         btrfs_release_delayed_inode(node);
1057
1058         /*
1059          * If we fail to update the delayed inode we need to abort the
1060          * transaction, because we could leave the inode with the improper
1061          * counts behind.
1062          */
1063         if (ret && ret != -ENOENT)
1064                 btrfs_abort_transaction(trans, ret);
1065
1066         return ret;
1067
1068 search:
1069         btrfs_release_path(path);
1070
1071         key.type = BTRFS_INODE_EXTREF_KEY;
1072         key.offset = -1;
1073
1074         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1075         if (ret < 0)
1076                 goto err_out;
1077         ASSERT(ret);
1078
1079         ret = 0;
1080         leaf = path->nodes[0];
1081         path->slots[0]--;
1082         goto again;
1083 }
1084
1085 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1086                                              struct btrfs_root *root,
1087                                              struct btrfs_path *path,
1088                                              struct btrfs_delayed_node *node)
1089 {
1090         int ret;
1091
1092         mutex_lock(&node->mutex);
1093         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1094                 mutex_unlock(&node->mutex);
1095                 return 0;
1096         }
1097
1098         ret = __btrfs_update_delayed_inode(trans, root, path, node);
1099         mutex_unlock(&node->mutex);
1100         return ret;
1101 }
1102
1103 static inline int
1104 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1105                                    struct btrfs_path *path,
1106                                    struct btrfs_delayed_node *node)
1107 {
1108         int ret;
1109
1110         ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1111         if (ret)
1112                 return ret;
1113
1114         ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1115         if (ret)
1116                 return ret;
1117
1118         ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1119         return ret;
1120 }
1121
1122 /*
1123  * Called when committing the transaction.
1124  * Returns 0 on success.
1125  * Returns < 0 on error and returns with an aborted transaction with any
1126  * outstanding delayed items cleaned up.
1127  */
1128 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1129 {
1130         struct btrfs_fs_info *fs_info = trans->fs_info;
1131         struct btrfs_delayed_root *delayed_root;
1132         struct btrfs_delayed_node *curr_node, *prev_node;
1133         struct btrfs_path *path;
1134         struct btrfs_block_rsv *block_rsv;
1135         int ret = 0;
1136         bool count = (nr > 0);
1137
1138         if (TRANS_ABORTED(trans))
1139                 return -EIO;
1140
1141         path = btrfs_alloc_path();
1142         if (!path)
1143                 return -ENOMEM;
1144
1145         block_rsv = trans->block_rsv;
1146         trans->block_rsv = &fs_info->delayed_block_rsv;
1147
1148         delayed_root = fs_info->delayed_root;
1149
1150         curr_node = btrfs_first_delayed_node(delayed_root);
1151         while (curr_node && (!count || nr--)) {
1152                 ret = __btrfs_commit_inode_delayed_items(trans, path,
1153                                                          curr_node);
1154                 if (ret) {
1155                         btrfs_release_delayed_node(curr_node);
1156                         curr_node = NULL;
1157                         btrfs_abort_transaction(trans, ret);
1158                         break;
1159                 }
1160
1161                 prev_node = curr_node;
1162                 curr_node = btrfs_next_delayed_node(curr_node);
1163                 btrfs_release_delayed_node(prev_node);
1164         }
1165
1166         if (curr_node)
1167                 btrfs_release_delayed_node(curr_node);
1168         btrfs_free_path(path);
1169         trans->block_rsv = block_rsv;
1170
1171         return ret;
1172 }
1173
1174 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1175 {
1176         return __btrfs_run_delayed_items(trans, -1);
1177 }
1178
1179 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1180 {
1181         return __btrfs_run_delayed_items(trans, nr);
1182 }
1183
1184 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1185                                      struct btrfs_inode *inode)
1186 {
1187         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1188         struct btrfs_path *path;
1189         struct btrfs_block_rsv *block_rsv;
1190         int ret;
1191
1192         if (!delayed_node)
1193                 return 0;
1194
1195         mutex_lock(&delayed_node->mutex);
1196         if (!delayed_node->count) {
1197                 mutex_unlock(&delayed_node->mutex);
1198                 btrfs_release_delayed_node(delayed_node);
1199                 return 0;
1200         }
1201         mutex_unlock(&delayed_node->mutex);
1202
1203         path = btrfs_alloc_path();
1204         if (!path) {
1205                 btrfs_release_delayed_node(delayed_node);
1206                 return -ENOMEM;
1207         }
1208
1209         block_rsv = trans->block_rsv;
1210         trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1211
1212         ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1213
1214         btrfs_release_delayed_node(delayed_node);
1215         btrfs_free_path(path);
1216         trans->block_rsv = block_rsv;
1217
1218         return ret;
1219 }
1220
1221 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1222 {
1223         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1224         struct btrfs_trans_handle *trans;
1225         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1226         struct btrfs_path *path;
1227         struct btrfs_block_rsv *block_rsv;
1228         int ret;
1229
1230         if (!delayed_node)
1231                 return 0;
1232
1233         mutex_lock(&delayed_node->mutex);
1234         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1235                 mutex_unlock(&delayed_node->mutex);
1236                 btrfs_release_delayed_node(delayed_node);
1237                 return 0;
1238         }
1239         mutex_unlock(&delayed_node->mutex);
1240
1241         trans = btrfs_join_transaction(delayed_node->root);
1242         if (IS_ERR(trans)) {
1243                 ret = PTR_ERR(trans);
1244                 goto out;
1245         }
1246
1247         path = btrfs_alloc_path();
1248         if (!path) {
1249                 ret = -ENOMEM;
1250                 goto trans_out;
1251         }
1252
1253         block_rsv = trans->block_rsv;
1254         trans->block_rsv = &fs_info->delayed_block_rsv;
1255
1256         mutex_lock(&delayed_node->mutex);
1257         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1258                 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1259                                                    path, delayed_node);
1260         else
1261                 ret = 0;
1262         mutex_unlock(&delayed_node->mutex);
1263
1264         btrfs_free_path(path);
1265         trans->block_rsv = block_rsv;
1266 trans_out:
1267         btrfs_end_transaction(trans);
1268         btrfs_btree_balance_dirty(fs_info);
1269 out:
1270         btrfs_release_delayed_node(delayed_node);
1271
1272         return ret;
1273 }
1274
1275 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1276 {
1277         struct btrfs_delayed_node *delayed_node;
1278
1279         delayed_node = READ_ONCE(inode->delayed_node);
1280         if (!delayed_node)
1281                 return;
1282
1283         inode->delayed_node = NULL;
1284         btrfs_release_delayed_node(delayed_node);
1285 }
1286
1287 struct btrfs_async_delayed_work {
1288         struct btrfs_delayed_root *delayed_root;
1289         int nr;
1290         struct btrfs_work work;
1291 };
1292
1293 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1294 {
1295         struct btrfs_async_delayed_work *async_work;
1296         struct btrfs_delayed_root *delayed_root;
1297         struct btrfs_trans_handle *trans;
1298         struct btrfs_path *path;
1299         struct btrfs_delayed_node *delayed_node = NULL;
1300         struct btrfs_root *root;
1301         struct btrfs_block_rsv *block_rsv;
1302         int total_done = 0;
1303
1304         async_work = container_of(work, struct btrfs_async_delayed_work, work);
1305         delayed_root = async_work->delayed_root;
1306
1307         path = btrfs_alloc_path();
1308         if (!path)
1309                 goto out;
1310
1311         do {
1312                 if (atomic_read(&delayed_root->items) <
1313                     BTRFS_DELAYED_BACKGROUND / 2)
1314                         break;
1315
1316                 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1317                 if (!delayed_node)
1318                         break;
1319
1320                 root = delayed_node->root;
1321
1322                 trans = btrfs_join_transaction(root);
1323                 if (IS_ERR(trans)) {
1324                         btrfs_release_path(path);
1325                         btrfs_release_prepared_delayed_node(delayed_node);
1326                         total_done++;
1327                         continue;
1328                 }
1329
1330                 block_rsv = trans->block_rsv;
1331                 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1332
1333                 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1334
1335                 trans->block_rsv = block_rsv;
1336                 btrfs_end_transaction(trans);
1337                 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1338
1339                 btrfs_release_path(path);
1340                 btrfs_release_prepared_delayed_node(delayed_node);
1341                 total_done++;
1342
1343         } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1344                  || total_done < async_work->nr);
1345
1346         btrfs_free_path(path);
1347 out:
1348         wake_up(&delayed_root->wait);
1349         kfree(async_work);
1350 }
1351
1352
1353 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1354                                      struct btrfs_fs_info *fs_info, int nr)
1355 {
1356         struct btrfs_async_delayed_work *async_work;
1357
1358         async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1359         if (!async_work)
1360                 return -ENOMEM;
1361
1362         async_work->delayed_root = delayed_root;
1363         btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1364                         NULL);
1365         async_work->nr = nr;
1366
1367         btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1368         return 0;
1369 }
1370
1371 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1372 {
1373         WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1374 }
1375
1376 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1377 {
1378         int val = atomic_read(&delayed_root->items_seq);
1379
1380         if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1381                 return 1;
1382
1383         if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1384                 return 1;
1385
1386         return 0;
1387 }
1388
1389 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1390 {
1391         struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1392
1393         if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1394                 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1395                 return;
1396
1397         if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1398                 int seq;
1399                 int ret;
1400
1401                 seq = atomic_read(&delayed_root->items_seq);
1402
1403                 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1404                 if (ret)
1405                         return;
1406
1407                 wait_event_interruptible(delayed_root->wait,
1408                                          could_end_wait(delayed_root, seq));
1409                 return;
1410         }
1411
1412         btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1413 }
1414
1415 /* Will return 0 or -ENOMEM */
1416 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1417                                    const char *name, int name_len,
1418                                    struct btrfs_inode *dir,
1419                                    struct btrfs_disk_key *disk_key, u8 flags,
1420                                    u64 index)
1421 {
1422         struct btrfs_fs_info *fs_info = trans->fs_info;
1423         const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1424         struct btrfs_delayed_node *delayed_node;
1425         struct btrfs_delayed_item *delayed_item;
1426         struct btrfs_dir_item *dir_item;
1427         bool reserve_leaf_space;
1428         u32 data_len;
1429         int ret;
1430
1431         delayed_node = btrfs_get_or_create_delayed_node(dir);
1432         if (IS_ERR(delayed_node))
1433                 return PTR_ERR(delayed_node);
1434
1435         delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1436                                                 delayed_node,
1437                                                 BTRFS_DELAYED_INSERTION_ITEM);
1438         if (!delayed_item) {
1439                 ret = -ENOMEM;
1440                 goto release_node;
1441         }
1442
1443         delayed_item->index = index;
1444
1445         dir_item = (struct btrfs_dir_item *)delayed_item->data;
1446         dir_item->location = *disk_key;
1447         btrfs_set_stack_dir_transid(dir_item, trans->transid);
1448         btrfs_set_stack_dir_data_len(dir_item, 0);
1449         btrfs_set_stack_dir_name_len(dir_item, name_len);
1450         btrfs_set_stack_dir_flags(dir_item, flags);
1451         memcpy((char *)(dir_item + 1), name, name_len);
1452
1453         data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1454
1455         mutex_lock(&delayed_node->mutex);
1456
1457         if (delayed_node->index_item_leaves == 0 ||
1458             delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1459                 delayed_node->curr_index_batch_size = data_len;
1460                 reserve_leaf_space = true;
1461         } else {
1462                 delayed_node->curr_index_batch_size += data_len;
1463                 reserve_leaf_space = false;
1464         }
1465
1466         if (reserve_leaf_space) {
1467                 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1468                 /*
1469                  * Space was reserved for a dir index item insertion when we
1470                  * started the transaction, so getting a failure here should be
1471                  * impossible.
1472                  */
1473                 if (WARN_ON(ret)) {
1474                         mutex_unlock(&delayed_node->mutex);
1475                         btrfs_release_delayed_item(delayed_item);
1476                         goto release_node;
1477                 }
1478
1479                 delayed_node->index_item_leaves++;
1480         } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
1481                 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1482
1483                 /*
1484                  * Adding the new dir index item does not require touching another
1485                  * leaf, so we can release 1 unit of metadata that was previously
1486                  * reserved when starting the transaction. This applies only to
1487                  * the case where we had a transaction start and excludes the
1488                  * transaction join case (when replaying log trees).
1489                  */
1490                 trace_btrfs_space_reservation(fs_info, "transaction",
1491                                               trans->transid, bytes, 0);
1492                 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1493                 ASSERT(trans->bytes_reserved >= bytes);
1494                 trans->bytes_reserved -= bytes;
1495         }
1496
1497         ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1498         if (unlikely(ret)) {
1499                 btrfs_err(trans->fs_info,
1500                           "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1501                           name_len, name, delayed_node->root->root_key.objectid,
1502                           delayed_node->inode_id, ret);
1503                 BUG();
1504         }
1505         mutex_unlock(&delayed_node->mutex);
1506
1507 release_node:
1508         btrfs_release_delayed_node(delayed_node);
1509         return ret;
1510 }
1511
1512 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1513                                                struct btrfs_delayed_node *node,
1514                                                u64 index)
1515 {
1516         struct btrfs_delayed_item *item;
1517
1518         mutex_lock(&node->mutex);
1519         item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1520         if (!item) {
1521                 mutex_unlock(&node->mutex);
1522                 return 1;
1523         }
1524
1525         /*
1526          * For delayed items to insert, we track reserved metadata bytes based
1527          * on the number of leaves that we will use.
1528          * See btrfs_insert_delayed_dir_index() and
1529          * btrfs_delayed_item_reserve_metadata()).
1530          */
1531         ASSERT(item->bytes_reserved == 0);
1532         ASSERT(node->index_item_leaves > 0);
1533
1534         /*
1535          * If there's only one leaf reserved, we can decrement this item from the
1536          * current batch, otherwise we can not because we don't know which leaf
1537          * it belongs to. With the current limit on delayed items, we rarely
1538          * accumulate enough dir index items to fill more than one leaf (even
1539          * when using a leaf size of 4K).
1540          */
1541         if (node->index_item_leaves == 1) {
1542                 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1543
1544                 ASSERT(node->curr_index_batch_size >= data_len);
1545                 node->curr_index_batch_size -= data_len;
1546         }
1547
1548         btrfs_release_delayed_item(item);
1549
1550         /* If we now have no more dir index items, we can release all leaves. */
1551         if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1552                 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1553                 node->index_item_leaves = 0;
1554         }
1555
1556         mutex_unlock(&node->mutex);
1557         return 0;
1558 }
1559
1560 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1561                                    struct btrfs_inode *dir, u64 index)
1562 {
1563         struct btrfs_delayed_node *node;
1564         struct btrfs_delayed_item *item;
1565         int ret;
1566
1567         node = btrfs_get_or_create_delayed_node(dir);
1568         if (IS_ERR(node))
1569                 return PTR_ERR(node);
1570
1571         ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1572         if (!ret)
1573                 goto end;
1574
1575         item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1576         if (!item) {
1577                 ret = -ENOMEM;
1578                 goto end;
1579         }
1580
1581         item->index = index;
1582
1583         ret = btrfs_delayed_item_reserve_metadata(trans, item);
1584         /*
1585          * we have reserved enough space when we start a new transaction,
1586          * so reserving metadata failure is impossible.
1587          */
1588         if (ret < 0) {
1589                 btrfs_err(trans->fs_info,
1590 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1591                 btrfs_release_delayed_item(item);
1592                 goto end;
1593         }
1594
1595         mutex_lock(&node->mutex);
1596         ret = __btrfs_add_delayed_item(node, item);
1597         if (unlikely(ret)) {
1598                 btrfs_err(trans->fs_info,
1599                           "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1600                           index, node->root->root_key.objectid,
1601                           node->inode_id, ret);
1602                 btrfs_delayed_item_release_metadata(dir->root, item);
1603                 btrfs_release_delayed_item(item);
1604         }
1605         mutex_unlock(&node->mutex);
1606 end:
1607         btrfs_release_delayed_node(node);
1608         return ret;
1609 }
1610
1611 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1612 {
1613         struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1614
1615         if (!delayed_node)
1616                 return -ENOENT;
1617
1618         /*
1619          * Since we have held i_mutex of this directory, it is impossible that
1620          * a new directory index is added into the delayed node and index_cnt
1621          * is updated now. So we needn't lock the delayed node.
1622          */
1623         if (!delayed_node->index_cnt) {
1624                 btrfs_release_delayed_node(delayed_node);
1625                 return -EINVAL;
1626         }
1627
1628         inode->index_cnt = delayed_node->index_cnt;
1629         btrfs_release_delayed_node(delayed_node);
1630         return 0;
1631 }
1632
1633 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1634                                      struct list_head *ins_list,
1635                                      struct list_head *del_list)
1636 {
1637         struct btrfs_delayed_node *delayed_node;
1638         struct btrfs_delayed_item *item;
1639
1640         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1641         if (!delayed_node)
1642                 return false;
1643
1644         /*
1645          * We can only do one readdir with delayed items at a time because of
1646          * item->readdir_list.
1647          */
1648         btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1649         btrfs_inode_lock(inode, 0);
1650
1651         mutex_lock(&delayed_node->mutex);
1652         item = __btrfs_first_delayed_insertion_item(delayed_node);
1653         while (item) {
1654                 refcount_inc(&item->refs);
1655                 list_add_tail(&item->readdir_list, ins_list);
1656                 item = __btrfs_next_delayed_item(item);
1657         }
1658
1659         item = __btrfs_first_delayed_deletion_item(delayed_node);
1660         while (item) {
1661                 refcount_inc(&item->refs);
1662                 list_add_tail(&item->readdir_list, del_list);
1663                 item = __btrfs_next_delayed_item(item);
1664         }
1665         mutex_unlock(&delayed_node->mutex);
1666         /*
1667          * This delayed node is still cached in the btrfs inode, so refs
1668          * must be > 1 now, and we needn't check it is going to be freed
1669          * or not.
1670          *
1671          * Besides that, this function is used to read dir, we do not
1672          * insert/delete delayed items in this period. So we also needn't
1673          * requeue or dequeue this delayed node.
1674          */
1675         refcount_dec(&delayed_node->refs);
1676
1677         return true;
1678 }
1679
1680 void btrfs_readdir_put_delayed_items(struct inode *inode,
1681                                      struct list_head *ins_list,
1682                                      struct list_head *del_list)
1683 {
1684         struct btrfs_delayed_item *curr, *next;
1685
1686         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1687                 list_del(&curr->readdir_list);
1688                 if (refcount_dec_and_test(&curr->refs))
1689                         kfree(curr);
1690         }
1691
1692         list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1693                 list_del(&curr->readdir_list);
1694                 if (refcount_dec_and_test(&curr->refs))
1695                         kfree(curr);
1696         }
1697
1698         /*
1699          * The VFS is going to do up_read(), so we need to downgrade back to a
1700          * read lock.
1701          */
1702         downgrade_write(&inode->i_rwsem);
1703 }
1704
1705 int btrfs_should_delete_dir_index(struct list_head *del_list,
1706                                   u64 index)
1707 {
1708         struct btrfs_delayed_item *curr;
1709         int ret = 0;
1710
1711         list_for_each_entry(curr, del_list, readdir_list) {
1712                 if (curr->index > index)
1713                         break;
1714                 if (curr->index == index) {
1715                         ret = 1;
1716                         break;
1717                 }
1718         }
1719         return ret;
1720 }
1721
1722 /*
1723  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1724  *
1725  */
1726 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1727                                     struct list_head *ins_list)
1728 {
1729         struct btrfs_dir_item *di;
1730         struct btrfs_delayed_item *curr, *next;
1731         struct btrfs_key location;
1732         char *name;
1733         int name_len;
1734         int over = 0;
1735         unsigned char d_type;
1736
1737         if (list_empty(ins_list))
1738                 return 0;
1739
1740         /*
1741          * Changing the data of the delayed item is impossible. So
1742          * we needn't lock them. And we have held i_mutex of the
1743          * directory, nobody can delete any directory indexes now.
1744          */
1745         list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1746                 list_del(&curr->readdir_list);
1747
1748                 if (curr->index < ctx->pos) {
1749                         if (refcount_dec_and_test(&curr->refs))
1750                                 kfree(curr);
1751                         continue;
1752                 }
1753
1754                 ctx->pos = curr->index;
1755
1756                 di = (struct btrfs_dir_item *)curr->data;
1757                 name = (char *)(di + 1);
1758                 name_len = btrfs_stack_dir_name_len(di);
1759
1760                 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1761                 btrfs_disk_key_to_cpu(&location, &di->location);
1762
1763                 over = !dir_emit(ctx, name, name_len,
1764                                location.objectid, d_type);
1765
1766                 if (refcount_dec_and_test(&curr->refs))
1767                         kfree(curr);
1768
1769                 if (over)
1770                         return 1;
1771                 ctx->pos++;
1772         }
1773         return 0;
1774 }
1775
1776 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1777                                   struct btrfs_inode_item *inode_item,
1778                                   struct inode *inode)
1779 {
1780         u64 flags;
1781
1782         btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1783         btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1784         btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1785         btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1786         btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1787         btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1788         btrfs_set_stack_inode_generation(inode_item,
1789                                          BTRFS_I(inode)->generation);
1790         btrfs_set_stack_inode_sequence(inode_item,
1791                                        inode_peek_iversion(inode));
1792         btrfs_set_stack_inode_transid(inode_item, trans->transid);
1793         btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1794         flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1795                                           BTRFS_I(inode)->ro_flags);
1796         btrfs_set_stack_inode_flags(inode_item, flags);
1797         btrfs_set_stack_inode_block_group(inode_item, 0);
1798
1799         btrfs_set_stack_timespec_sec(&inode_item->atime,
1800                                      inode->i_atime.tv_sec);
1801         btrfs_set_stack_timespec_nsec(&inode_item->atime,
1802                                       inode->i_atime.tv_nsec);
1803
1804         btrfs_set_stack_timespec_sec(&inode_item->mtime,
1805                                      inode->i_mtime.tv_sec);
1806         btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1807                                       inode->i_mtime.tv_nsec);
1808
1809         btrfs_set_stack_timespec_sec(&inode_item->ctime,
1810                                      inode->i_ctime.tv_sec);
1811         btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1812                                       inode->i_ctime.tv_nsec);
1813
1814         btrfs_set_stack_timespec_sec(&inode_item->otime,
1815                                      BTRFS_I(inode)->i_otime.tv_sec);
1816         btrfs_set_stack_timespec_nsec(&inode_item->otime,
1817                                      BTRFS_I(inode)->i_otime.tv_nsec);
1818 }
1819
1820 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1821 {
1822         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1823         struct btrfs_delayed_node *delayed_node;
1824         struct btrfs_inode_item *inode_item;
1825
1826         delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1827         if (!delayed_node)
1828                 return -ENOENT;
1829
1830         mutex_lock(&delayed_node->mutex);
1831         if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1832                 mutex_unlock(&delayed_node->mutex);
1833                 btrfs_release_delayed_node(delayed_node);
1834                 return -ENOENT;
1835         }
1836
1837         inode_item = &delayed_node->inode_item;
1838
1839         i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1840         i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1841         btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1842         btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1843                         round_up(i_size_read(inode), fs_info->sectorsize));
1844         inode->i_mode = btrfs_stack_inode_mode(inode_item);
1845         set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1846         inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1847         BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1848         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1849
1850         inode_set_iversion_queried(inode,
1851                                    btrfs_stack_inode_sequence(inode_item));
1852         inode->i_rdev = 0;
1853         *rdev = btrfs_stack_inode_rdev(inode_item);
1854         btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1855                                 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1856
1857         inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1858         inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1859
1860         inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1861         inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1862
1863         inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1864         inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1865
1866         BTRFS_I(inode)->i_otime.tv_sec =
1867                 btrfs_stack_timespec_sec(&inode_item->otime);
1868         BTRFS_I(inode)->i_otime.tv_nsec =
1869                 btrfs_stack_timespec_nsec(&inode_item->otime);
1870
1871         inode->i_generation = BTRFS_I(inode)->generation;
1872         BTRFS_I(inode)->index_cnt = (u64)-1;
1873
1874         mutex_unlock(&delayed_node->mutex);
1875         btrfs_release_delayed_node(delayed_node);
1876         return 0;
1877 }
1878
1879 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1880                                struct btrfs_root *root,
1881                                struct btrfs_inode *inode)
1882 {
1883         struct btrfs_delayed_node *delayed_node;
1884         int ret = 0;
1885
1886         delayed_node = btrfs_get_or_create_delayed_node(inode);
1887         if (IS_ERR(delayed_node))
1888                 return PTR_ERR(delayed_node);
1889
1890         mutex_lock(&delayed_node->mutex);
1891         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1892                 fill_stack_inode_item(trans, &delayed_node->inode_item,
1893                                       &inode->vfs_inode);
1894                 goto release_node;
1895         }
1896
1897         ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1898         if (ret)
1899                 goto release_node;
1900
1901         fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1902         set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1903         delayed_node->count++;
1904         atomic_inc(&root->fs_info->delayed_root->items);
1905 release_node:
1906         mutex_unlock(&delayed_node->mutex);
1907         btrfs_release_delayed_node(delayed_node);
1908         return ret;
1909 }
1910
1911 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1912 {
1913         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1914         struct btrfs_delayed_node *delayed_node;
1915
1916         /*
1917          * we don't do delayed inode updates during log recovery because it
1918          * leads to enospc problems.  This means we also can't do
1919          * delayed inode refs
1920          */
1921         if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1922                 return -EAGAIN;
1923
1924         delayed_node = btrfs_get_or_create_delayed_node(inode);
1925         if (IS_ERR(delayed_node))
1926                 return PTR_ERR(delayed_node);
1927
1928         /*
1929          * We don't reserve space for inode ref deletion is because:
1930          * - We ONLY do async inode ref deletion for the inode who has only
1931          *   one link(i_nlink == 1), it means there is only one inode ref.
1932          *   And in most case, the inode ref and the inode item are in the
1933          *   same leaf, and we will deal with them at the same time.
1934          *   Since we are sure we will reserve the space for the inode item,
1935          *   it is unnecessary to reserve space for inode ref deletion.
1936          * - If the inode ref and the inode item are not in the same leaf,
1937          *   We also needn't worry about enospc problem, because we reserve
1938          *   much more space for the inode update than it needs.
1939          * - At the worst, we can steal some space from the global reservation.
1940          *   It is very rare.
1941          */
1942         mutex_lock(&delayed_node->mutex);
1943         if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1944                 goto release_node;
1945
1946         set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1947         delayed_node->count++;
1948         atomic_inc(&fs_info->delayed_root->items);
1949 release_node:
1950         mutex_unlock(&delayed_node->mutex);
1951         btrfs_release_delayed_node(delayed_node);
1952         return 0;
1953 }
1954
1955 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1956 {
1957         struct btrfs_root *root = delayed_node->root;
1958         struct btrfs_fs_info *fs_info = root->fs_info;
1959         struct btrfs_delayed_item *curr_item, *prev_item;
1960
1961         mutex_lock(&delayed_node->mutex);
1962         curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1963         while (curr_item) {
1964                 prev_item = curr_item;
1965                 curr_item = __btrfs_next_delayed_item(prev_item);
1966                 btrfs_release_delayed_item(prev_item);
1967         }
1968
1969         if (delayed_node->index_item_leaves > 0) {
1970                 btrfs_delayed_item_release_leaves(delayed_node,
1971                                           delayed_node->index_item_leaves);
1972                 delayed_node->index_item_leaves = 0;
1973         }
1974
1975         curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1976         while (curr_item) {
1977                 btrfs_delayed_item_release_metadata(root, curr_item);
1978                 prev_item = curr_item;
1979                 curr_item = __btrfs_next_delayed_item(prev_item);
1980                 btrfs_release_delayed_item(prev_item);
1981         }
1982
1983         btrfs_release_delayed_iref(delayed_node);
1984
1985         if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1986                 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1987                 btrfs_release_delayed_inode(delayed_node);
1988         }
1989         mutex_unlock(&delayed_node->mutex);
1990 }
1991
1992 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1993 {
1994         struct btrfs_delayed_node *delayed_node;
1995
1996         delayed_node = btrfs_get_delayed_node(inode);
1997         if (!delayed_node)
1998                 return;
1999
2000         __btrfs_kill_delayed_node(delayed_node);
2001         btrfs_release_delayed_node(delayed_node);
2002 }
2003
2004 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2005 {
2006         u64 inode_id = 0;
2007         struct btrfs_delayed_node *delayed_nodes[8];
2008         int i, n;
2009
2010         while (1) {
2011                 spin_lock(&root->inode_lock);
2012                 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
2013                                            (void **)delayed_nodes, inode_id,
2014                                            ARRAY_SIZE(delayed_nodes));
2015                 if (!n) {
2016                         spin_unlock(&root->inode_lock);
2017                         break;
2018                 }
2019
2020                 inode_id = delayed_nodes[n - 1]->inode_id + 1;
2021                 for (i = 0; i < n; i++) {
2022                         /*
2023                          * Don't increase refs in case the node is dead and
2024                          * about to be removed from the tree in the loop below
2025                          */
2026                         if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
2027                                 delayed_nodes[i] = NULL;
2028                 }
2029                 spin_unlock(&root->inode_lock);
2030
2031                 for (i = 0; i < n; i++) {
2032                         if (!delayed_nodes[i])
2033                                 continue;
2034                         __btrfs_kill_delayed_node(delayed_nodes[i]);
2035                         btrfs_release_delayed_node(delayed_nodes[i]);
2036                 }
2037         }
2038 }
2039
2040 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2041 {
2042         struct btrfs_delayed_node *curr_node, *prev_node;
2043
2044         curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2045         while (curr_node) {
2046                 __btrfs_kill_delayed_node(curr_node);
2047
2048                 prev_node = curr_node;
2049                 curr_node = btrfs_next_delayed_node(curr_node);
2050                 btrfs_release_delayed_node(prev_node);
2051         }
2052 }
2053
2054 void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2055                                  struct list_head *ins_list,
2056                                  struct list_head *del_list)
2057 {
2058         struct btrfs_delayed_node *node;
2059         struct btrfs_delayed_item *item;
2060
2061         node = btrfs_get_delayed_node(inode);
2062         if (!node)
2063                 return;
2064
2065         mutex_lock(&node->mutex);
2066         item = __btrfs_first_delayed_insertion_item(node);
2067         while (item) {
2068                 /*
2069                  * It's possible that the item is already in a log list. This
2070                  * can happen in case two tasks are trying to log the same
2071                  * directory. For example if we have tasks A and task B:
2072                  *
2073                  * Task A collected the delayed items into a log list while
2074                  * under the inode's log_mutex (at btrfs_log_inode()), but it
2075                  * only releases the items after logging the inodes they point
2076                  * to (if they are new inodes), which happens after unlocking
2077                  * the log mutex;
2078                  *
2079                  * Task B enters btrfs_log_inode() and acquires the log_mutex
2080                  * of the same directory inode, before task B releases the
2081                  * delayed items. This can happen for example when logging some
2082                  * inode we need to trigger logging of its parent directory, so
2083                  * logging two files that have the same parent directory can
2084                  * lead to this.
2085                  *
2086                  * If this happens, just ignore delayed items already in a log
2087                  * list. All the tasks logging the directory are under a log
2088                  * transaction and whichever finishes first can not sync the log
2089                  * before the other completes and leaves the log transaction.
2090                  */
2091                 if (!item->logged && list_empty(&item->log_list)) {
2092                         refcount_inc(&item->refs);
2093                         list_add_tail(&item->log_list, ins_list);
2094                 }
2095                 item = __btrfs_next_delayed_item(item);
2096         }
2097
2098         item = __btrfs_first_delayed_deletion_item(node);
2099         while (item) {
2100                 /* It may be non-empty, for the same reason mentioned above. */
2101                 if (!item->logged && list_empty(&item->log_list)) {
2102                         refcount_inc(&item->refs);
2103                         list_add_tail(&item->log_list, del_list);
2104                 }
2105                 item = __btrfs_next_delayed_item(item);
2106         }
2107         mutex_unlock(&node->mutex);
2108
2109         /*
2110          * We are called during inode logging, which means the inode is in use
2111          * and can not be evicted before we finish logging the inode. So we never
2112          * have the last reference on the delayed inode.
2113          * Also, we don't use btrfs_release_delayed_node() because that would
2114          * requeue the delayed inode (change its order in the list of prepared
2115          * nodes) and we don't want to do such change because we don't create or
2116          * delete delayed items.
2117          */
2118         ASSERT(refcount_read(&node->refs) > 1);
2119         refcount_dec(&node->refs);
2120 }
2121
2122 void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2123                                  struct list_head *ins_list,
2124                                  struct list_head *del_list)
2125 {
2126         struct btrfs_delayed_node *node;
2127         struct btrfs_delayed_item *item;
2128         struct btrfs_delayed_item *next;
2129
2130         node = btrfs_get_delayed_node(inode);
2131         if (!node)
2132                 return;
2133
2134         mutex_lock(&node->mutex);
2135
2136         list_for_each_entry_safe(item, next, ins_list, log_list) {
2137                 item->logged = true;
2138                 list_del_init(&item->log_list);
2139                 if (refcount_dec_and_test(&item->refs))
2140                         kfree(item);
2141         }
2142
2143         list_for_each_entry_safe(item, next, del_list, log_list) {
2144                 item->logged = true;
2145                 list_del_init(&item->log_list);
2146                 if (refcount_dec_and_test(&item->refs))
2147                         kfree(item);
2148         }
2149
2150         mutex_unlock(&node->mutex);
2151
2152         /*
2153          * We are called during inode logging, which means the inode is in use
2154          * and can not be evicted before we finish logging the inode. So we never
2155          * have the last reference on the delayed inode.
2156          * Also, we don't use btrfs_release_delayed_node() because that would
2157          * requeue the delayed inode (change its order in the list of prepared
2158          * nodes) and we don't want to do such change because we don't create or
2159          * delete delayed items.
2160          */
2161         ASSERT(refcount_read(&node->refs) > 1);
2162         refcount_dec(&node->refs);
2163 }