Merge branch 'remove-unlikely' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[sfrench/cifs-2.6.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34 #include "qgroup.h"
35
36 #define BTRFS_ROOT_TRANS_TAG 0
37
38 static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
39         [TRANS_STATE_RUNNING]           = 0U,
40         [TRANS_STATE_BLOCKED]           = (__TRANS_USERSPACE |
41                                            __TRANS_START),
42         [TRANS_STATE_COMMIT_START]      = (__TRANS_USERSPACE |
43                                            __TRANS_START |
44                                            __TRANS_ATTACH),
45         [TRANS_STATE_COMMIT_DOING]      = (__TRANS_USERSPACE |
46                                            __TRANS_START |
47                                            __TRANS_ATTACH |
48                                            __TRANS_JOIN),
49         [TRANS_STATE_UNBLOCKED]         = (__TRANS_USERSPACE |
50                                            __TRANS_START |
51                                            __TRANS_ATTACH |
52                                            __TRANS_JOIN |
53                                            __TRANS_JOIN_NOLOCK),
54         [TRANS_STATE_COMPLETED]         = (__TRANS_USERSPACE |
55                                            __TRANS_START |
56                                            __TRANS_ATTACH |
57                                            __TRANS_JOIN |
58                                            __TRANS_JOIN_NOLOCK),
59 };
60
61 void btrfs_put_transaction(struct btrfs_transaction *transaction)
62 {
63         WARN_ON(atomic_read(&transaction->use_count) == 0);
64         if (atomic_dec_and_test(&transaction->use_count)) {
65                 BUG_ON(!list_empty(&transaction->list));
66                 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
67                 while (!list_empty(&transaction->pending_chunks)) {
68                         struct extent_map *em;
69
70                         em = list_first_entry(&transaction->pending_chunks,
71                                               struct extent_map, list);
72                         list_del_init(&em->list);
73                         free_extent_map(em);
74                 }
75                 kmem_cache_free(btrfs_transaction_cachep, transaction);
76         }
77 }
78
79 static noinline void switch_commit_roots(struct btrfs_transaction *trans,
80                                          struct btrfs_fs_info *fs_info)
81 {
82         struct btrfs_root *root, *tmp;
83
84         down_write(&fs_info->commit_root_sem);
85         list_for_each_entry_safe(root, tmp, &trans->switch_commits,
86                                  dirty_list) {
87                 list_del_init(&root->dirty_list);
88                 free_extent_buffer(root->commit_root);
89                 root->commit_root = btrfs_root_node(root);
90                 if (is_fstree(root->objectid))
91                         btrfs_unpin_free_ino(root);
92         }
93         up_write(&fs_info->commit_root_sem);
94 }
95
96 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
97                                          unsigned int type)
98 {
99         if (type & TRANS_EXTWRITERS)
100                 atomic_inc(&trans->num_extwriters);
101 }
102
103 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
104                                          unsigned int type)
105 {
106         if (type & TRANS_EXTWRITERS)
107                 atomic_dec(&trans->num_extwriters);
108 }
109
110 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
111                                           unsigned int type)
112 {
113         atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
114 }
115
116 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
117 {
118         return atomic_read(&trans->num_extwriters);
119 }
120
121 /*
122  * either allocate a new transaction or hop into the existing one
123  */
124 static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
125 {
126         struct btrfs_transaction *cur_trans;
127         struct btrfs_fs_info *fs_info = root->fs_info;
128
129         spin_lock(&fs_info->trans_lock);
130 loop:
131         /* The file system has been taken offline. No new transactions. */
132         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
133                 spin_unlock(&fs_info->trans_lock);
134                 return -EROFS;
135         }
136
137         cur_trans = fs_info->running_transaction;
138         if (cur_trans) {
139                 if (cur_trans->aborted) {
140                         spin_unlock(&fs_info->trans_lock);
141                         return cur_trans->aborted;
142                 }
143                 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
144                         spin_unlock(&fs_info->trans_lock);
145                         return -EBUSY;
146                 }
147                 atomic_inc(&cur_trans->use_count);
148                 atomic_inc(&cur_trans->num_writers);
149                 extwriter_counter_inc(cur_trans, type);
150                 spin_unlock(&fs_info->trans_lock);
151                 return 0;
152         }
153         spin_unlock(&fs_info->trans_lock);
154
155         /*
156          * If we are ATTACH, we just want to catch the current transaction,
157          * and commit it. If there is no transaction, just return ENOENT.
158          */
159         if (type == TRANS_ATTACH)
160                 return -ENOENT;
161
162         /*
163          * JOIN_NOLOCK only happens during the transaction commit, so
164          * it is impossible that ->running_transaction is NULL
165          */
166         BUG_ON(type == TRANS_JOIN_NOLOCK);
167
168         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
169         if (!cur_trans)
170                 return -ENOMEM;
171
172         spin_lock(&fs_info->trans_lock);
173         if (fs_info->running_transaction) {
174                 /*
175                  * someone started a transaction after we unlocked.  Make sure
176                  * to redo the checks above
177                  */
178                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
179                 goto loop;
180         } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
181                 spin_unlock(&fs_info->trans_lock);
182                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
183                 return -EROFS;
184         }
185
186         atomic_set(&cur_trans->num_writers, 1);
187         extwriter_counter_init(cur_trans, type);
188         init_waitqueue_head(&cur_trans->writer_wait);
189         init_waitqueue_head(&cur_trans->commit_wait);
190         cur_trans->state = TRANS_STATE_RUNNING;
191         /*
192          * One for this trans handle, one so it will live on until we
193          * commit the transaction.
194          */
195         atomic_set(&cur_trans->use_count, 2);
196         cur_trans->start_time = get_seconds();
197
198         cur_trans->delayed_refs.href_root = RB_ROOT;
199         atomic_set(&cur_trans->delayed_refs.num_entries, 0);
200         cur_trans->delayed_refs.num_heads_ready = 0;
201         cur_trans->delayed_refs.num_heads = 0;
202         cur_trans->delayed_refs.flushing = 0;
203         cur_trans->delayed_refs.run_delayed_start = 0;
204
205         /*
206          * although the tree mod log is per file system and not per transaction,
207          * the log must never go across transaction boundaries.
208          */
209         smp_mb();
210         if (!list_empty(&fs_info->tree_mod_seq_list))
211                 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
212                         "creating a fresh transaction\n");
213         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
214                 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
215                         "creating a fresh transaction\n");
216         atomic64_set(&fs_info->tree_mod_seq, 0);
217
218         spin_lock_init(&cur_trans->delayed_refs.lock);
219
220         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
221         INIT_LIST_HEAD(&cur_trans->pending_chunks);
222         INIT_LIST_HEAD(&cur_trans->switch_commits);
223         list_add_tail(&cur_trans->list, &fs_info->trans_list);
224         extent_io_tree_init(&cur_trans->dirty_pages,
225                              fs_info->btree_inode->i_mapping);
226         fs_info->generation++;
227         cur_trans->transid = fs_info->generation;
228         fs_info->running_transaction = cur_trans;
229         cur_trans->aborted = 0;
230         spin_unlock(&fs_info->trans_lock);
231
232         return 0;
233 }
234
235 /*
236  * this does all the record keeping required to make sure that a reference
237  * counted root is properly recorded in a given transaction.  This is required
238  * to make sure the old root from before we joined the transaction is deleted
239  * when the transaction commits
240  */
241 static int record_root_in_trans(struct btrfs_trans_handle *trans,
242                                struct btrfs_root *root)
243 {
244         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
245             root->last_trans < trans->transid) {
246                 WARN_ON(root == root->fs_info->extent_root);
247                 WARN_ON(root->commit_root != root->node);
248
249                 /*
250                  * see below for IN_TRANS_SETUP usage rules
251                  * we have the reloc mutex held now, so there
252                  * is only one writer in this function
253                  */
254                 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
255
256                 /* make sure readers find IN_TRANS_SETUP before
257                  * they find our root->last_trans update
258                  */
259                 smp_wmb();
260
261                 spin_lock(&root->fs_info->fs_roots_radix_lock);
262                 if (root->last_trans == trans->transid) {
263                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
264                         return 0;
265                 }
266                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
267                            (unsigned long)root->root_key.objectid,
268                            BTRFS_ROOT_TRANS_TAG);
269                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
270                 root->last_trans = trans->transid;
271
272                 /* this is pretty tricky.  We don't want to
273                  * take the relocation lock in btrfs_record_root_in_trans
274                  * unless we're really doing the first setup for this root in
275                  * this transaction.
276                  *
277                  * Normally we'd use root->last_trans as a flag to decide
278                  * if we want to take the expensive mutex.
279                  *
280                  * But, we have to set root->last_trans before we
281                  * init the relocation root, otherwise, we trip over warnings
282                  * in ctree.c.  The solution used here is to flag ourselves
283                  * with root IN_TRANS_SETUP.  When this is 1, we're still
284                  * fixing up the reloc trees and everyone must wait.
285                  *
286                  * When this is zero, they can trust root->last_trans and fly
287                  * through btrfs_record_root_in_trans without having to take the
288                  * lock.  smp_wmb() makes sure that all the writes above are
289                  * done before we pop in the zero below
290                  */
291                 btrfs_init_reloc_root(trans, root);
292                 smp_mb__before_atomic();
293                 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
294         }
295         return 0;
296 }
297
298
299 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
300                                struct btrfs_root *root)
301 {
302         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
303                 return 0;
304
305         /*
306          * see record_root_in_trans for comments about IN_TRANS_SETUP usage
307          * and barriers
308          */
309         smp_rmb();
310         if (root->last_trans == trans->transid &&
311             !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
312                 return 0;
313
314         mutex_lock(&root->fs_info->reloc_mutex);
315         record_root_in_trans(trans, root);
316         mutex_unlock(&root->fs_info->reloc_mutex);
317
318         return 0;
319 }
320
321 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
322 {
323         return (trans->state >= TRANS_STATE_BLOCKED &&
324                 trans->state < TRANS_STATE_UNBLOCKED &&
325                 !trans->aborted);
326 }
327
328 /* wait for commit against the current transaction to become unblocked
329  * when this is done, it is safe to start a new transaction, but the current
330  * transaction might not be fully on disk.
331  */
332 static void wait_current_trans(struct btrfs_root *root)
333 {
334         struct btrfs_transaction *cur_trans;
335
336         spin_lock(&root->fs_info->trans_lock);
337         cur_trans = root->fs_info->running_transaction;
338         if (cur_trans && is_transaction_blocked(cur_trans)) {
339                 atomic_inc(&cur_trans->use_count);
340                 spin_unlock(&root->fs_info->trans_lock);
341
342                 wait_event(root->fs_info->transaction_wait,
343                            cur_trans->state >= TRANS_STATE_UNBLOCKED ||
344                            cur_trans->aborted);
345                 btrfs_put_transaction(cur_trans);
346         } else {
347                 spin_unlock(&root->fs_info->trans_lock);
348         }
349 }
350
351 static int may_wait_transaction(struct btrfs_root *root, int type)
352 {
353         if (root->fs_info->log_root_recovering)
354                 return 0;
355
356         if (type == TRANS_USERSPACE)
357                 return 1;
358
359         if (type == TRANS_START &&
360             !atomic_read(&root->fs_info->open_ioctl_trans))
361                 return 1;
362
363         return 0;
364 }
365
366 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
367 {
368         if (!root->fs_info->reloc_ctl ||
369             !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
370             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
371             root->reloc_root)
372                 return false;
373
374         return true;
375 }
376
377 static struct btrfs_trans_handle *
378 start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
379                   enum btrfs_reserve_flush_enum flush)
380 {
381         struct btrfs_trans_handle *h;
382         struct btrfs_transaction *cur_trans;
383         u64 num_bytes = 0;
384         u64 qgroup_reserved = 0;
385         bool reloc_reserved = false;
386         int ret;
387
388         /* Send isn't supposed to start transactions. */
389         ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
390
391         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
392                 return ERR_PTR(-EROFS);
393
394         if (current->journal_info) {
395                 WARN_ON(type & TRANS_EXTWRITERS);
396                 h = current->journal_info;
397                 h->use_count++;
398                 WARN_ON(h->use_count > 2);
399                 h->orig_rsv = h->block_rsv;
400                 h->block_rsv = NULL;
401                 goto got_it;
402         }
403
404         /*
405          * Do the reservation before we join the transaction so we can do all
406          * the appropriate flushing if need be.
407          */
408         if (num_items > 0 && root != root->fs_info->chunk_root) {
409                 if (root->fs_info->quota_enabled &&
410                     is_fstree(root->root_key.objectid)) {
411                         qgroup_reserved = num_items * root->nodesize;
412                         ret = btrfs_qgroup_reserve(root, qgroup_reserved);
413                         if (ret)
414                                 return ERR_PTR(ret);
415                 }
416
417                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
418                 /*
419                  * Do the reservation for the relocation root creation
420                  */
421                 if (need_reserve_reloc_root(root)) {
422                         num_bytes += root->nodesize;
423                         reloc_reserved = true;
424                 }
425
426                 ret = btrfs_block_rsv_add(root,
427                                           &root->fs_info->trans_block_rsv,
428                                           num_bytes, flush);
429                 if (ret)
430                         goto reserve_fail;
431         }
432 again:
433         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
434         if (!h) {
435                 ret = -ENOMEM;
436                 goto alloc_fail;
437         }
438
439         /*
440          * If we are JOIN_NOLOCK we're already committing a transaction and
441          * waiting on this guy, so we don't need to do the sb_start_intwrite
442          * because we're already holding a ref.  We need this because we could
443          * have raced in and did an fsync() on a file which can kick a commit
444          * and then we deadlock with somebody doing a freeze.
445          *
446          * If we are ATTACH, it means we just want to catch the current
447          * transaction and commit it, so we needn't do sb_start_intwrite(). 
448          */
449         if (type & __TRANS_FREEZABLE)
450                 sb_start_intwrite(root->fs_info->sb);
451
452         if (may_wait_transaction(root, type))
453                 wait_current_trans(root);
454
455         do {
456                 ret = join_transaction(root, type);
457                 if (ret == -EBUSY) {
458                         wait_current_trans(root);
459                         if (unlikely(type == TRANS_ATTACH))
460                                 ret = -ENOENT;
461                 }
462         } while (ret == -EBUSY);
463
464         if (ret < 0) {
465                 /* We must get the transaction if we are JOIN_NOLOCK. */
466                 BUG_ON(type == TRANS_JOIN_NOLOCK);
467                 goto join_fail;
468         }
469
470         cur_trans = root->fs_info->running_transaction;
471
472         h->transid = cur_trans->transid;
473         h->transaction = cur_trans;
474         h->blocks_used = 0;
475         h->bytes_reserved = 0;
476         h->root = root;
477         h->delayed_ref_updates = 0;
478         h->use_count = 1;
479         h->adding_csums = 0;
480         h->block_rsv = NULL;
481         h->orig_rsv = NULL;
482         h->aborted = 0;
483         h->qgroup_reserved = 0;
484         h->delayed_ref_elem.seq = 0;
485         h->type = type;
486         h->allocating_chunk = false;
487         h->reloc_reserved = false;
488         h->sync = false;
489         INIT_LIST_HEAD(&h->qgroup_ref_list);
490         INIT_LIST_HEAD(&h->new_bgs);
491
492         smp_mb();
493         if (cur_trans->state >= TRANS_STATE_BLOCKED &&
494             may_wait_transaction(root, type)) {
495                 current->journal_info = h;
496                 btrfs_commit_transaction(h, root);
497                 goto again;
498         }
499
500         if (num_bytes) {
501                 trace_btrfs_space_reservation(root->fs_info, "transaction",
502                                               h->transid, num_bytes, 1);
503                 h->block_rsv = &root->fs_info->trans_block_rsv;
504                 h->bytes_reserved = num_bytes;
505                 h->reloc_reserved = reloc_reserved;
506         }
507         h->qgroup_reserved = qgroup_reserved;
508
509 got_it:
510         btrfs_record_root_in_trans(h, root);
511
512         if (!current->journal_info && type != TRANS_USERSPACE)
513                 current->journal_info = h;
514         return h;
515
516 join_fail:
517         if (type & __TRANS_FREEZABLE)
518                 sb_end_intwrite(root->fs_info->sb);
519         kmem_cache_free(btrfs_trans_handle_cachep, h);
520 alloc_fail:
521         if (num_bytes)
522                 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
523                                         num_bytes);
524 reserve_fail:
525         if (qgroup_reserved)
526                 btrfs_qgroup_free(root, qgroup_reserved);
527         return ERR_PTR(ret);
528 }
529
530 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
531                                                    int num_items)
532 {
533         return start_transaction(root, num_items, TRANS_START,
534                                  BTRFS_RESERVE_FLUSH_ALL);
535 }
536
537 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
538                                         struct btrfs_root *root, int num_items)
539 {
540         return start_transaction(root, num_items, TRANS_START,
541                                  BTRFS_RESERVE_FLUSH_LIMIT);
542 }
543
544 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
545 {
546         return start_transaction(root, 0, TRANS_JOIN, 0);
547 }
548
549 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
550 {
551         return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
552 }
553
554 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
555 {
556         return start_transaction(root, 0, TRANS_USERSPACE, 0);
557 }
558
559 /*
560  * btrfs_attach_transaction() - catch the running transaction
561  *
562  * It is used when we want to commit the current the transaction, but
563  * don't want to start a new one.
564  *
565  * Note: If this function return -ENOENT, it just means there is no
566  * running transaction. But it is possible that the inactive transaction
567  * is still in the memory, not fully on disk. If you hope there is no
568  * inactive transaction in the fs when -ENOENT is returned, you should
569  * invoke
570  *     btrfs_attach_transaction_barrier()
571  */
572 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
573 {
574         return start_transaction(root, 0, TRANS_ATTACH, 0);
575 }
576
577 /*
578  * btrfs_attach_transaction_barrier() - catch the running transaction
579  *
580  * It is similar to the above function, the differentia is this one
581  * will wait for all the inactive transactions until they fully
582  * complete.
583  */
584 struct btrfs_trans_handle *
585 btrfs_attach_transaction_barrier(struct btrfs_root *root)
586 {
587         struct btrfs_trans_handle *trans;
588
589         trans = start_transaction(root, 0, TRANS_ATTACH, 0);
590         if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
591                 btrfs_wait_for_commit(root, 0);
592
593         return trans;
594 }
595
596 /* wait for a transaction commit to be fully complete */
597 static noinline void wait_for_commit(struct btrfs_root *root,
598                                     struct btrfs_transaction *commit)
599 {
600         wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
601 }
602
603 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
604 {
605         struct btrfs_transaction *cur_trans = NULL, *t;
606         int ret = 0;
607
608         if (transid) {
609                 if (transid <= root->fs_info->last_trans_committed)
610                         goto out;
611
612                 /* find specified transaction */
613                 spin_lock(&root->fs_info->trans_lock);
614                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
615                         if (t->transid == transid) {
616                                 cur_trans = t;
617                                 atomic_inc(&cur_trans->use_count);
618                                 ret = 0;
619                                 break;
620                         }
621                         if (t->transid > transid) {
622                                 ret = 0;
623                                 break;
624                         }
625                 }
626                 spin_unlock(&root->fs_info->trans_lock);
627
628                 /*
629                  * The specified transaction doesn't exist, or we
630                  * raced with btrfs_commit_transaction
631                  */
632                 if (!cur_trans) {
633                         if (transid > root->fs_info->last_trans_committed)
634                                 ret = -EINVAL;
635                         goto out;
636                 }
637         } else {
638                 /* find newest transaction that is committing | committed */
639                 spin_lock(&root->fs_info->trans_lock);
640                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
641                                             list) {
642                         if (t->state >= TRANS_STATE_COMMIT_START) {
643                                 if (t->state == TRANS_STATE_COMPLETED)
644                                         break;
645                                 cur_trans = t;
646                                 atomic_inc(&cur_trans->use_count);
647                                 break;
648                         }
649                 }
650                 spin_unlock(&root->fs_info->trans_lock);
651                 if (!cur_trans)
652                         goto out;  /* nothing committing|committed */
653         }
654
655         wait_for_commit(root, cur_trans);
656         btrfs_put_transaction(cur_trans);
657 out:
658         return ret;
659 }
660
661 void btrfs_throttle(struct btrfs_root *root)
662 {
663         if (!atomic_read(&root->fs_info->open_ioctl_trans))
664                 wait_current_trans(root);
665 }
666
667 static int should_end_transaction(struct btrfs_trans_handle *trans,
668                                   struct btrfs_root *root)
669 {
670         if (root->fs_info->global_block_rsv.space_info->full &&
671             btrfs_check_space_for_delayed_refs(trans, root))
672                 return 1;
673
674         return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
675 }
676
677 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
678                                  struct btrfs_root *root)
679 {
680         struct btrfs_transaction *cur_trans = trans->transaction;
681         int updates;
682         int err;
683
684         smp_mb();
685         if (cur_trans->state >= TRANS_STATE_BLOCKED ||
686             cur_trans->delayed_refs.flushing)
687                 return 1;
688
689         updates = trans->delayed_ref_updates;
690         trans->delayed_ref_updates = 0;
691         if (updates) {
692                 err = btrfs_run_delayed_refs(trans, root, updates);
693                 if (err) /* Error code will also eval true */
694                         return err;
695         }
696
697         return should_end_transaction(trans, root);
698 }
699
700 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
701                           struct btrfs_root *root, int throttle)
702 {
703         struct btrfs_transaction *cur_trans = trans->transaction;
704         struct btrfs_fs_info *info = root->fs_info;
705         unsigned long cur = trans->delayed_ref_updates;
706         int lock = (trans->type != TRANS_JOIN_NOLOCK);
707         int err = 0;
708         int must_run_delayed_refs = 0;
709
710         if (trans->use_count > 1) {
711                 trans->use_count--;
712                 trans->block_rsv = trans->orig_rsv;
713                 return 0;
714         }
715
716         btrfs_trans_release_metadata(trans, root);
717         trans->block_rsv = NULL;
718
719         if (!list_empty(&trans->new_bgs))
720                 btrfs_create_pending_block_groups(trans, root);
721
722         trans->delayed_ref_updates = 0;
723         if (!trans->sync) {
724                 must_run_delayed_refs =
725                         btrfs_should_throttle_delayed_refs(trans, root);
726                 cur = max_t(unsigned long, cur, 32);
727
728                 /*
729                  * don't make the caller wait if they are from a NOLOCK
730                  * or ATTACH transaction, it will deadlock with commit
731                  */
732                 if (must_run_delayed_refs == 1 &&
733                     (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
734                         must_run_delayed_refs = 2;
735         }
736
737         if (trans->qgroup_reserved) {
738                 /*
739                  * the same root has to be passed here between start_transaction
740                  * and end_transaction. Subvolume quota depends on this.
741                  */
742                 btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
743                 trans->qgroup_reserved = 0;
744         }
745
746         btrfs_trans_release_metadata(trans, root);
747         trans->block_rsv = NULL;
748
749         if (!list_empty(&trans->new_bgs))
750                 btrfs_create_pending_block_groups(trans, root);
751
752         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
753             should_end_transaction(trans, root) &&
754             ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
755                 spin_lock(&info->trans_lock);
756                 if (cur_trans->state == TRANS_STATE_RUNNING)
757                         cur_trans->state = TRANS_STATE_BLOCKED;
758                 spin_unlock(&info->trans_lock);
759         }
760
761         if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
762                 if (throttle)
763                         return btrfs_commit_transaction(trans, root);
764                 else
765                         wake_up_process(info->transaction_kthread);
766         }
767
768         if (trans->type & __TRANS_FREEZABLE)
769                 sb_end_intwrite(root->fs_info->sb);
770
771         WARN_ON(cur_trans != info->running_transaction);
772         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
773         atomic_dec(&cur_trans->num_writers);
774         extwriter_counter_dec(cur_trans, trans->type);
775
776         smp_mb();
777         if (waitqueue_active(&cur_trans->writer_wait))
778                 wake_up(&cur_trans->writer_wait);
779         btrfs_put_transaction(cur_trans);
780
781         if (current->journal_info == trans)
782                 current->journal_info = NULL;
783
784         if (throttle)
785                 btrfs_run_delayed_iputs(root);
786
787         if (trans->aborted ||
788             test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
789                 wake_up_process(info->transaction_kthread);
790                 err = -EIO;
791         }
792         assert_qgroups_uptodate(trans);
793
794         kmem_cache_free(btrfs_trans_handle_cachep, trans);
795         if (must_run_delayed_refs) {
796                 btrfs_async_run_delayed_refs(root, cur,
797                                              must_run_delayed_refs == 1);
798         }
799         return err;
800 }
801
802 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
803                           struct btrfs_root *root)
804 {
805         return __btrfs_end_transaction(trans, root, 0);
806 }
807
808 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
809                                    struct btrfs_root *root)
810 {
811         return __btrfs_end_transaction(trans, root, 1);
812 }
813
814 /*
815  * when btree blocks are allocated, they have some corresponding bits set for
816  * them in one of two extent_io trees.  This is used to make sure all of
817  * those extents are sent to disk but does not wait on them
818  */
819 int btrfs_write_marked_extents(struct btrfs_root *root,
820                                struct extent_io_tree *dirty_pages, int mark)
821 {
822         int err = 0;
823         int werr = 0;
824         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
825         struct extent_state *cached_state = NULL;
826         u64 start = 0;
827         u64 end;
828
829         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
830                                       mark, &cached_state)) {
831                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
832                                    mark, &cached_state, GFP_NOFS);
833                 cached_state = NULL;
834                 err = filemap_fdatawrite_range(mapping, start, end);
835                 if (err)
836                         werr = err;
837                 cond_resched();
838                 start = end + 1;
839         }
840         if (err)
841                 werr = err;
842         return werr;
843 }
844
845 /*
846  * when btree blocks are allocated, they have some corresponding bits set for
847  * them in one of two extent_io trees.  This is used to make sure all of
848  * those extents are on disk for transaction or log commit.  We wait
849  * on all the pages and clear them from the dirty pages state tree
850  */
851 int btrfs_wait_marked_extents(struct btrfs_root *root,
852                               struct extent_io_tree *dirty_pages, int mark)
853 {
854         int err = 0;
855         int werr = 0;
856         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
857         struct extent_state *cached_state = NULL;
858         u64 start = 0;
859         u64 end;
860         struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
861         bool errors = false;
862
863         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
864                                       EXTENT_NEED_WAIT, &cached_state)) {
865                 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
866                                  0, 0, &cached_state, GFP_NOFS);
867                 err = filemap_fdatawait_range(mapping, start, end);
868                 if (err)
869                         werr = err;
870                 cond_resched();
871                 start = end + 1;
872         }
873         if (err)
874                 werr = err;
875
876         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
877                 if ((mark & EXTENT_DIRTY) &&
878                     test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
879                                        &btree_ino->runtime_flags))
880                         errors = true;
881
882                 if ((mark & EXTENT_NEW) &&
883                     test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
884                                        &btree_ino->runtime_flags))
885                         errors = true;
886         } else {
887                 if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
888                                        &btree_ino->runtime_flags))
889                         errors = true;
890         }
891
892         if (errors && !werr)
893                 werr = -EIO;
894
895         return werr;
896 }
897
898 /*
899  * when btree blocks are allocated, they have some corresponding bits set for
900  * them in one of two extent_io trees.  This is used to make sure all of
901  * those extents are on disk for transaction or log commit
902  */
903 static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
904                                 struct extent_io_tree *dirty_pages, int mark)
905 {
906         int ret;
907         int ret2;
908         struct blk_plug plug;
909
910         blk_start_plug(&plug);
911         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
912         blk_finish_plug(&plug);
913         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
914
915         if (ret)
916                 return ret;
917         if (ret2)
918                 return ret2;
919         return 0;
920 }
921
922 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
923                                      struct btrfs_root *root)
924 {
925         if (!trans || !trans->transaction) {
926                 struct inode *btree_inode;
927                 btree_inode = root->fs_info->btree_inode;
928                 return filemap_write_and_wait(btree_inode->i_mapping);
929         }
930         return btrfs_write_and_wait_marked_extents(root,
931                                            &trans->transaction->dirty_pages,
932                                            EXTENT_DIRTY);
933 }
934
935 /*
936  * this is used to update the root pointer in the tree of tree roots.
937  *
938  * But, in the case of the extent allocation tree, updating the root
939  * pointer may allocate blocks which may change the root of the extent
940  * allocation tree.
941  *
942  * So, this loops and repeats and makes sure the cowonly root didn't
943  * change while the root pointer was being updated in the metadata.
944  */
945 static int update_cowonly_root(struct btrfs_trans_handle *trans,
946                                struct btrfs_root *root)
947 {
948         int ret;
949         u64 old_root_bytenr;
950         u64 old_root_used;
951         struct btrfs_root *tree_root = root->fs_info->tree_root;
952
953         old_root_used = btrfs_root_used(&root->root_item);
954         btrfs_write_dirty_block_groups(trans, root);
955
956         while (1) {
957                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
958                 if (old_root_bytenr == root->node->start &&
959                     old_root_used == btrfs_root_used(&root->root_item))
960                         break;
961
962                 btrfs_set_root_node(&root->root_item, root->node);
963                 ret = btrfs_update_root(trans, tree_root,
964                                         &root->root_key,
965                                         &root->root_item);
966                 if (ret)
967                         return ret;
968
969                 old_root_used = btrfs_root_used(&root->root_item);
970                 ret = btrfs_write_dirty_block_groups(trans, root);
971                 if (ret)
972                         return ret;
973         }
974
975         return 0;
976 }
977
978 /*
979  * update all the cowonly tree roots on disk
980  *
981  * The error handling in this function may not be obvious. Any of the
982  * failures will cause the file system to go offline. We still need
983  * to clean up the delayed refs.
984  */
985 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
986                                          struct btrfs_root *root)
987 {
988         struct btrfs_fs_info *fs_info = root->fs_info;
989         struct list_head *next;
990         struct extent_buffer *eb;
991         int ret;
992
993         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
994         if (ret)
995                 return ret;
996
997         eb = btrfs_lock_root_node(fs_info->tree_root);
998         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
999                               0, &eb);
1000         btrfs_tree_unlock(eb);
1001         free_extent_buffer(eb);
1002
1003         if (ret)
1004                 return ret;
1005
1006         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1007         if (ret)
1008                 return ret;
1009
1010         ret = btrfs_run_dev_stats(trans, root->fs_info);
1011         if (ret)
1012                 return ret;
1013         ret = btrfs_run_dev_replace(trans, root->fs_info);
1014         if (ret)
1015                 return ret;
1016         ret = btrfs_run_qgroups(trans, root->fs_info);
1017         if (ret)
1018                 return ret;
1019
1020         /* run_qgroups might have added some more refs */
1021         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1022         if (ret)
1023                 return ret;
1024
1025         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1026                 next = fs_info->dirty_cowonly_roots.next;
1027                 list_del_init(next);
1028                 root = list_entry(next, struct btrfs_root, dirty_list);
1029
1030                 if (root != fs_info->extent_root)
1031                         list_add_tail(&root->dirty_list,
1032                                       &trans->transaction->switch_commits);
1033                 ret = update_cowonly_root(trans, root);
1034                 if (ret)
1035                         return ret;
1036         }
1037
1038         list_add_tail(&fs_info->extent_root->dirty_list,
1039                       &trans->transaction->switch_commits);
1040         btrfs_after_dev_replace_commit(fs_info);
1041
1042         return 0;
1043 }
1044
1045 /*
1046  * dead roots are old snapshots that need to be deleted.  This allocates
1047  * a dirty root struct and adds it into the list of dead roots that need to
1048  * be deleted
1049  */
1050 void btrfs_add_dead_root(struct btrfs_root *root)
1051 {
1052         spin_lock(&root->fs_info->trans_lock);
1053         if (list_empty(&root->root_list))
1054                 list_add_tail(&root->root_list, &root->fs_info->dead_roots);
1055         spin_unlock(&root->fs_info->trans_lock);
1056 }
1057
1058 /*
1059  * update all the cowonly tree roots on disk
1060  */
1061 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
1062                                     struct btrfs_root *root)
1063 {
1064         struct btrfs_root *gang[8];
1065         struct btrfs_fs_info *fs_info = root->fs_info;
1066         int i;
1067         int ret;
1068         int err = 0;
1069
1070         spin_lock(&fs_info->fs_roots_radix_lock);
1071         while (1) {
1072                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1073                                                  (void **)gang, 0,
1074                                                  ARRAY_SIZE(gang),
1075                                                  BTRFS_ROOT_TRANS_TAG);
1076                 if (ret == 0)
1077                         break;
1078                 for (i = 0; i < ret; i++) {
1079                         root = gang[i];
1080                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
1081                                         (unsigned long)root->root_key.objectid,
1082                                         BTRFS_ROOT_TRANS_TAG);
1083                         spin_unlock(&fs_info->fs_roots_radix_lock);
1084
1085                         btrfs_free_log(trans, root);
1086                         btrfs_update_reloc_root(trans, root);
1087                         btrfs_orphan_commit_root(trans, root);
1088
1089                         btrfs_save_ino_cache(root, trans);
1090
1091                         /* see comments in should_cow_block() */
1092                         clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1093                         smp_mb__after_atomic();
1094
1095                         if (root->commit_root != root->node) {
1096                                 list_add_tail(&root->dirty_list,
1097                                         &trans->transaction->switch_commits);
1098                                 btrfs_set_root_node(&root->root_item,
1099                                                     root->node);
1100                         }
1101
1102                         err = btrfs_update_root(trans, fs_info->tree_root,
1103                                                 &root->root_key,
1104                                                 &root->root_item);
1105                         spin_lock(&fs_info->fs_roots_radix_lock);
1106                         if (err)
1107                                 break;
1108                 }
1109         }
1110         spin_unlock(&fs_info->fs_roots_radix_lock);
1111         return err;
1112 }
1113
1114 /*
1115  * defrag a given btree.
1116  * Every leaf in the btree is read and defragged.
1117  */
1118 int btrfs_defrag_root(struct btrfs_root *root)
1119 {
1120         struct btrfs_fs_info *info = root->fs_info;
1121         struct btrfs_trans_handle *trans;
1122         int ret;
1123
1124         if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1125                 return 0;
1126
1127         while (1) {
1128                 trans = btrfs_start_transaction(root, 0);
1129                 if (IS_ERR(trans))
1130                         return PTR_ERR(trans);
1131
1132                 ret = btrfs_defrag_leaves(trans, root);
1133
1134                 btrfs_end_transaction(trans, root);
1135                 btrfs_btree_balance_dirty(info->tree_root);
1136                 cond_resched();
1137
1138                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
1139                         break;
1140
1141                 if (btrfs_defrag_cancelled(root->fs_info)) {
1142                         pr_debug("BTRFS: defrag_root cancelled\n");
1143                         ret = -EAGAIN;
1144                         break;
1145                 }
1146         }
1147         clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1148         return ret;
1149 }
1150
1151 /*
1152  * new snapshots need to be created at a very specific time in the
1153  * transaction commit.  This does the actual creation.
1154  *
1155  * Note:
1156  * If the error which may affect the commitment of the current transaction
1157  * happens, we should return the error number. If the error which just affect
1158  * the creation of the pending snapshots, just return 0.
1159  */
1160 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1161                                    struct btrfs_fs_info *fs_info,
1162                                    struct btrfs_pending_snapshot *pending)
1163 {
1164         struct btrfs_key key;
1165         struct btrfs_root_item *new_root_item;
1166         struct btrfs_root *tree_root = fs_info->tree_root;
1167         struct btrfs_root *root = pending->root;
1168         struct btrfs_root *parent_root;
1169         struct btrfs_block_rsv *rsv;
1170         struct inode *parent_inode;
1171         struct btrfs_path *path;
1172         struct btrfs_dir_item *dir_item;
1173         struct dentry *dentry;
1174         struct extent_buffer *tmp;
1175         struct extent_buffer *old;
1176         struct timespec cur_time = CURRENT_TIME;
1177         int ret = 0;
1178         u64 to_reserve = 0;
1179         u64 index = 0;
1180         u64 objectid;
1181         u64 root_flags;
1182         uuid_le new_uuid;
1183
1184         path = btrfs_alloc_path();
1185         if (!path) {
1186                 pending->error = -ENOMEM;
1187                 return 0;
1188         }
1189
1190         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1191         if (!new_root_item) {
1192                 pending->error = -ENOMEM;
1193                 goto root_item_alloc_fail;
1194         }
1195
1196         pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1197         if (pending->error)
1198                 goto no_free_objectid;
1199
1200         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1201
1202         if (to_reserve > 0) {
1203                 pending->error = btrfs_block_rsv_add(root,
1204                                                      &pending->block_rsv,
1205                                                      to_reserve,
1206                                                      BTRFS_RESERVE_NO_FLUSH);
1207                 if (pending->error)
1208                         goto no_free_objectid;
1209         }
1210
1211         key.objectid = objectid;
1212         key.offset = (u64)-1;
1213         key.type = BTRFS_ROOT_ITEM_KEY;
1214
1215         rsv = trans->block_rsv;
1216         trans->block_rsv = &pending->block_rsv;
1217         trans->bytes_reserved = trans->block_rsv->reserved;
1218
1219         dentry = pending->dentry;
1220         parent_inode = pending->dir;
1221         parent_root = BTRFS_I(parent_inode)->root;
1222         record_root_in_trans(trans, parent_root);
1223
1224         /*
1225          * insert the directory item
1226          */
1227         ret = btrfs_set_inode_index(parent_inode, &index);
1228         BUG_ON(ret); /* -ENOMEM */
1229
1230         /* check if there is a file/dir which has the same name. */
1231         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1232                                          btrfs_ino(parent_inode),
1233                                          dentry->d_name.name,
1234                                          dentry->d_name.len, 0);
1235         if (dir_item != NULL && !IS_ERR(dir_item)) {
1236                 pending->error = -EEXIST;
1237                 goto dir_item_existed;
1238         } else if (IS_ERR(dir_item)) {
1239                 ret = PTR_ERR(dir_item);
1240                 btrfs_abort_transaction(trans, root, ret);
1241                 goto fail;
1242         }
1243         btrfs_release_path(path);
1244
1245         /*
1246          * pull in the delayed directory update
1247          * and the delayed inode item
1248          * otherwise we corrupt the FS during
1249          * snapshot
1250          */
1251         ret = btrfs_run_delayed_items(trans, root);
1252         if (ret) {      /* Transaction aborted */
1253                 btrfs_abort_transaction(trans, root, ret);
1254                 goto fail;
1255         }
1256
1257         record_root_in_trans(trans, root);
1258         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1259         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1260         btrfs_check_and_init_root_item(new_root_item);
1261
1262         root_flags = btrfs_root_flags(new_root_item);
1263         if (pending->readonly)
1264                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1265         else
1266                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1267         btrfs_set_root_flags(new_root_item, root_flags);
1268
1269         btrfs_set_root_generation_v2(new_root_item,
1270                         trans->transid);
1271         uuid_le_gen(&new_uuid);
1272         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1273         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1274                         BTRFS_UUID_SIZE);
1275         if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1276                 memset(new_root_item->received_uuid, 0,
1277                        sizeof(new_root_item->received_uuid));
1278                 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1279                 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1280                 btrfs_set_root_stransid(new_root_item, 0);
1281                 btrfs_set_root_rtransid(new_root_item, 0);
1282         }
1283         btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1284         btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1285         btrfs_set_root_otransid(new_root_item, trans->transid);
1286
1287         old = btrfs_lock_root_node(root);
1288         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1289         if (ret) {
1290                 btrfs_tree_unlock(old);
1291                 free_extent_buffer(old);
1292                 btrfs_abort_transaction(trans, root, ret);
1293                 goto fail;
1294         }
1295
1296         btrfs_set_lock_blocking(old);
1297
1298         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1299         /* clean up in any case */
1300         btrfs_tree_unlock(old);
1301         free_extent_buffer(old);
1302         if (ret) {
1303                 btrfs_abort_transaction(trans, root, ret);
1304                 goto fail;
1305         }
1306
1307         /*
1308          * We need to flush delayed refs in order to make sure all of our quota
1309          * operations have been done before we call btrfs_qgroup_inherit.
1310          */
1311         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1312         if (ret) {
1313                 btrfs_abort_transaction(trans, root, ret);
1314                 goto fail;
1315         }
1316
1317         ret = btrfs_qgroup_inherit(trans, fs_info,
1318                                    root->root_key.objectid,
1319                                    objectid, pending->inherit);
1320         if (ret) {
1321                 btrfs_abort_transaction(trans, root, ret);
1322                 goto fail;
1323         }
1324
1325         /* see comments in should_cow_block() */
1326         set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1327         smp_wmb();
1328
1329         btrfs_set_root_node(new_root_item, tmp);
1330         /* record when the snapshot was created in key.offset */
1331         key.offset = trans->transid;
1332         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1333         btrfs_tree_unlock(tmp);
1334         free_extent_buffer(tmp);
1335         if (ret) {
1336                 btrfs_abort_transaction(trans, root, ret);
1337                 goto fail;
1338         }
1339
1340         /*
1341          * insert root back/forward references
1342          */
1343         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1344                                  parent_root->root_key.objectid,
1345                                  btrfs_ino(parent_inode), index,
1346                                  dentry->d_name.name, dentry->d_name.len);
1347         if (ret) {
1348                 btrfs_abort_transaction(trans, root, ret);
1349                 goto fail;
1350         }
1351
1352         key.offset = (u64)-1;
1353         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1354         if (IS_ERR(pending->snap)) {
1355                 ret = PTR_ERR(pending->snap);
1356                 btrfs_abort_transaction(trans, root, ret);
1357                 goto fail;
1358         }
1359
1360         ret = btrfs_reloc_post_snapshot(trans, pending);
1361         if (ret) {
1362                 btrfs_abort_transaction(trans, root, ret);
1363                 goto fail;
1364         }
1365
1366         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1367         if (ret) {
1368                 btrfs_abort_transaction(trans, root, ret);
1369                 goto fail;
1370         }
1371
1372         ret = btrfs_insert_dir_item(trans, parent_root,
1373                                     dentry->d_name.name, dentry->d_name.len,
1374                                     parent_inode, &key,
1375                                     BTRFS_FT_DIR, index);
1376         /* We have check then name at the beginning, so it is impossible. */
1377         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1378         if (ret) {
1379                 btrfs_abort_transaction(trans, root, ret);
1380                 goto fail;
1381         }
1382
1383         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1384                                          dentry->d_name.len * 2);
1385         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1386         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1387         if (ret) {
1388                 btrfs_abort_transaction(trans, root, ret);
1389                 goto fail;
1390         }
1391         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
1392                                   BTRFS_UUID_KEY_SUBVOL, objectid);
1393         if (ret) {
1394                 btrfs_abort_transaction(trans, root, ret);
1395                 goto fail;
1396         }
1397         if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1398                 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
1399                                           new_root_item->received_uuid,
1400                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1401                                           objectid);
1402                 if (ret && ret != -EEXIST) {
1403                         btrfs_abort_transaction(trans, root, ret);
1404                         goto fail;
1405                 }
1406         }
1407 fail:
1408         pending->error = ret;
1409 dir_item_existed:
1410         trans->block_rsv = rsv;
1411         trans->bytes_reserved = 0;
1412 no_free_objectid:
1413         kfree(new_root_item);
1414 root_item_alloc_fail:
1415         btrfs_free_path(path);
1416         return ret;
1417 }
1418
1419 /*
1420  * create all the snapshots we've scheduled for creation
1421  */
1422 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1423                                              struct btrfs_fs_info *fs_info)
1424 {
1425         struct btrfs_pending_snapshot *pending, *next;
1426         struct list_head *head = &trans->transaction->pending_snapshots;
1427         int ret = 0;
1428
1429         list_for_each_entry_safe(pending, next, head, list) {
1430                 list_del(&pending->list);
1431                 ret = create_pending_snapshot(trans, fs_info, pending);
1432                 if (ret)
1433                         break;
1434         }
1435         return ret;
1436 }
1437
1438 static void update_super_roots(struct btrfs_root *root)
1439 {
1440         struct btrfs_root_item *root_item;
1441         struct btrfs_super_block *super;
1442
1443         super = root->fs_info->super_copy;
1444
1445         root_item = &root->fs_info->chunk_root->root_item;
1446         super->chunk_root = root_item->bytenr;
1447         super->chunk_root_generation = root_item->generation;
1448         super->chunk_root_level = root_item->level;
1449
1450         root_item = &root->fs_info->tree_root->root_item;
1451         super->root = root_item->bytenr;
1452         super->generation = root_item->generation;
1453         super->root_level = root_item->level;
1454         if (btrfs_test_opt(root, SPACE_CACHE))
1455                 super->cache_generation = root_item->generation;
1456         if (root->fs_info->update_uuid_tree_gen)
1457                 super->uuid_tree_generation = root_item->generation;
1458 }
1459
1460 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1461 {
1462         struct btrfs_transaction *trans;
1463         int ret = 0;
1464
1465         spin_lock(&info->trans_lock);
1466         trans = info->running_transaction;
1467         if (trans)
1468                 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1469         spin_unlock(&info->trans_lock);
1470         return ret;
1471 }
1472
1473 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1474 {
1475         struct btrfs_transaction *trans;
1476         int ret = 0;
1477
1478         spin_lock(&info->trans_lock);
1479         trans = info->running_transaction;
1480         if (trans)
1481                 ret = is_transaction_blocked(trans);
1482         spin_unlock(&info->trans_lock);
1483         return ret;
1484 }
1485
1486 /*
1487  * wait for the current transaction commit to start and block subsequent
1488  * transaction joins
1489  */
1490 static void wait_current_trans_commit_start(struct btrfs_root *root,
1491                                             struct btrfs_transaction *trans)
1492 {
1493         wait_event(root->fs_info->transaction_blocked_wait,
1494                    trans->state >= TRANS_STATE_COMMIT_START ||
1495                    trans->aborted);
1496 }
1497
1498 /*
1499  * wait for the current transaction to start and then become unblocked.
1500  * caller holds ref.
1501  */
1502 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1503                                          struct btrfs_transaction *trans)
1504 {
1505         wait_event(root->fs_info->transaction_wait,
1506                    trans->state >= TRANS_STATE_UNBLOCKED ||
1507                    trans->aborted);
1508 }
1509
1510 /*
1511  * commit transactions asynchronously. once btrfs_commit_transaction_async
1512  * returns, any subsequent transaction will not be allowed to join.
1513  */
1514 struct btrfs_async_commit {
1515         struct btrfs_trans_handle *newtrans;
1516         struct btrfs_root *root;
1517         struct work_struct work;
1518 };
1519
1520 static void do_async_commit(struct work_struct *work)
1521 {
1522         struct btrfs_async_commit *ac =
1523                 container_of(work, struct btrfs_async_commit, work);
1524
1525         /*
1526          * We've got freeze protection passed with the transaction.
1527          * Tell lockdep about it.
1528          */
1529         if (ac->newtrans->type & __TRANS_FREEZABLE)
1530                 rwsem_acquire_read(
1531                      &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1532                      0, 1, _THIS_IP_);
1533
1534         current->journal_info = ac->newtrans;
1535
1536         btrfs_commit_transaction(ac->newtrans, ac->root);
1537         kfree(ac);
1538 }
1539
1540 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1541                                    struct btrfs_root *root,
1542                                    int wait_for_unblock)
1543 {
1544         struct btrfs_async_commit *ac;
1545         struct btrfs_transaction *cur_trans;
1546
1547         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1548         if (!ac)
1549                 return -ENOMEM;
1550
1551         INIT_WORK(&ac->work, do_async_commit);
1552         ac->root = root;
1553         ac->newtrans = btrfs_join_transaction(root);
1554         if (IS_ERR(ac->newtrans)) {
1555                 int err = PTR_ERR(ac->newtrans);
1556                 kfree(ac);
1557                 return err;
1558         }
1559
1560         /* take transaction reference */
1561         cur_trans = trans->transaction;
1562         atomic_inc(&cur_trans->use_count);
1563
1564         btrfs_end_transaction(trans, root);
1565
1566         /*
1567          * Tell lockdep we've released the freeze rwsem, since the
1568          * async commit thread will be the one to unlock it.
1569          */
1570         if (ac->newtrans->type & __TRANS_FREEZABLE)
1571                 rwsem_release(
1572                         &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1573                         1, _THIS_IP_);
1574
1575         schedule_work(&ac->work);
1576
1577         /* wait for transaction to start and unblock */
1578         if (wait_for_unblock)
1579                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1580         else
1581                 wait_current_trans_commit_start(root, cur_trans);
1582
1583         if (current->journal_info == trans)
1584                 current->journal_info = NULL;
1585
1586         btrfs_put_transaction(cur_trans);
1587         return 0;
1588 }
1589
1590
1591 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1592                                 struct btrfs_root *root, int err)
1593 {
1594         struct btrfs_transaction *cur_trans = trans->transaction;
1595         DEFINE_WAIT(wait);
1596
1597         WARN_ON(trans->use_count > 1);
1598
1599         btrfs_abort_transaction(trans, root, err);
1600
1601         spin_lock(&root->fs_info->trans_lock);
1602
1603         /*
1604          * If the transaction is removed from the list, it means this
1605          * transaction has been committed successfully, so it is impossible
1606          * to call the cleanup function.
1607          */
1608         BUG_ON(list_empty(&cur_trans->list));
1609
1610         list_del_init(&cur_trans->list);
1611         if (cur_trans == root->fs_info->running_transaction) {
1612                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1613                 spin_unlock(&root->fs_info->trans_lock);
1614                 wait_event(cur_trans->writer_wait,
1615                            atomic_read(&cur_trans->num_writers) == 1);
1616
1617                 spin_lock(&root->fs_info->trans_lock);
1618         }
1619         spin_unlock(&root->fs_info->trans_lock);
1620
1621         btrfs_cleanup_one_transaction(trans->transaction, root);
1622
1623         spin_lock(&root->fs_info->trans_lock);
1624         if (cur_trans == root->fs_info->running_transaction)
1625                 root->fs_info->running_transaction = NULL;
1626         spin_unlock(&root->fs_info->trans_lock);
1627
1628         if (trans->type & __TRANS_FREEZABLE)
1629                 sb_end_intwrite(root->fs_info->sb);
1630         btrfs_put_transaction(cur_trans);
1631         btrfs_put_transaction(cur_trans);
1632
1633         trace_btrfs_transaction_commit(root);
1634
1635         if (current->journal_info == trans)
1636                 current->journal_info = NULL;
1637         btrfs_scrub_cancel(root->fs_info);
1638
1639         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1640 }
1641
1642 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1643 {
1644         if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1645                 return btrfs_start_delalloc_roots(fs_info, 1, -1);
1646         return 0;
1647 }
1648
1649 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1650 {
1651         if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1652                 btrfs_wait_ordered_roots(fs_info, -1);
1653 }
1654
1655 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1656                              struct btrfs_root *root)
1657 {
1658         struct btrfs_transaction *cur_trans = trans->transaction;
1659         struct btrfs_transaction *prev_trans = NULL;
1660         struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
1661         int ret;
1662
1663         /* Stop the commit early if ->aborted is set */
1664         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1665                 ret = cur_trans->aborted;
1666                 btrfs_end_transaction(trans, root);
1667                 return ret;
1668         }
1669
1670         /* make a pass through all the delayed refs we have so far
1671          * any runnings procs may add more while we are here
1672          */
1673         ret = btrfs_run_delayed_refs(trans, root, 0);
1674         if (ret) {
1675                 btrfs_end_transaction(trans, root);
1676                 return ret;
1677         }
1678
1679         btrfs_trans_release_metadata(trans, root);
1680         trans->block_rsv = NULL;
1681         if (trans->qgroup_reserved) {
1682                 btrfs_qgroup_free(root, trans->qgroup_reserved);
1683                 trans->qgroup_reserved = 0;
1684         }
1685
1686         cur_trans = trans->transaction;
1687
1688         /*
1689          * set the flushing flag so procs in this transaction have to
1690          * start sending their work down.
1691          */
1692         cur_trans->delayed_refs.flushing = 1;
1693         smp_wmb();
1694
1695         if (!list_empty(&trans->new_bgs))
1696                 btrfs_create_pending_block_groups(trans, root);
1697
1698         ret = btrfs_run_delayed_refs(trans, root, 0);
1699         if (ret) {
1700                 btrfs_end_transaction(trans, root);
1701                 return ret;
1702         }
1703
1704         spin_lock(&root->fs_info->trans_lock);
1705         if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1706                 spin_unlock(&root->fs_info->trans_lock);
1707                 atomic_inc(&cur_trans->use_count);
1708                 ret = btrfs_end_transaction(trans, root);
1709
1710                 wait_for_commit(root, cur_trans);
1711
1712                 btrfs_put_transaction(cur_trans);
1713
1714                 return ret;
1715         }
1716
1717         cur_trans->state = TRANS_STATE_COMMIT_START;
1718         wake_up(&root->fs_info->transaction_blocked_wait);
1719
1720         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1721                 prev_trans = list_entry(cur_trans->list.prev,
1722                                         struct btrfs_transaction, list);
1723                 if (prev_trans->state != TRANS_STATE_COMPLETED) {
1724                         atomic_inc(&prev_trans->use_count);
1725                         spin_unlock(&root->fs_info->trans_lock);
1726
1727                         wait_for_commit(root, prev_trans);
1728
1729                         btrfs_put_transaction(prev_trans);
1730                 } else {
1731                         spin_unlock(&root->fs_info->trans_lock);
1732                 }
1733         } else {
1734                 spin_unlock(&root->fs_info->trans_lock);
1735         }
1736
1737         extwriter_counter_dec(cur_trans, trans->type);
1738
1739         ret = btrfs_start_delalloc_flush(root->fs_info);
1740         if (ret)
1741                 goto cleanup_transaction;
1742
1743         ret = btrfs_run_delayed_items(trans, root);
1744         if (ret)
1745                 goto cleanup_transaction;
1746
1747         wait_event(cur_trans->writer_wait,
1748                    extwriter_counter_read(cur_trans) == 0);
1749
1750         /* some pending stuffs might be added after the previous flush. */
1751         ret = btrfs_run_delayed_items(trans, root);
1752         if (ret)
1753                 goto cleanup_transaction;
1754
1755         btrfs_wait_delalloc_flush(root->fs_info);
1756
1757         btrfs_scrub_pause(root);
1758         /*
1759          * Ok now we need to make sure to block out any other joins while we
1760          * commit the transaction.  We could have started a join before setting
1761          * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
1762          */
1763         spin_lock(&root->fs_info->trans_lock);
1764         cur_trans->state = TRANS_STATE_COMMIT_DOING;
1765         spin_unlock(&root->fs_info->trans_lock);
1766         wait_event(cur_trans->writer_wait,
1767                    atomic_read(&cur_trans->num_writers) == 1);
1768
1769         /* ->aborted might be set after the previous check, so check it */
1770         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1771                 ret = cur_trans->aborted;
1772                 goto scrub_continue;
1773         }
1774         /*
1775          * the reloc mutex makes sure that we stop
1776          * the balancing code from coming in and moving
1777          * extents around in the middle of the commit
1778          */
1779         mutex_lock(&root->fs_info->reloc_mutex);
1780
1781         /*
1782          * We needn't worry about the delayed items because we will
1783          * deal with them in create_pending_snapshot(), which is the
1784          * core function of the snapshot creation.
1785          */
1786         ret = create_pending_snapshots(trans, root->fs_info);
1787         if (ret) {
1788                 mutex_unlock(&root->fs_info->reloc_mutex);
1789                 goto scrub_continue;
1790         }
1791
1792         /*
1793          * We insert the dir indexes of the snapshots and update the inode
1794          * of the snapshots' parents after the snapshot creation, so there
1795          * are some delayed items which are not dealt with. Now deal with
1796          * them.
1797          *
1798          * We needn't worry that this operation will corrupt the snapshots,
1799          * because all the tree which are snapshoted will be forced to COW
1800          * the nodes and leaves.
1801          */
1802         ret = btrfs_run_delayed_items(trans, root);
1803         if (ret) {
1804                 mutex_unlock(&root->fs_info->reloc_mutex);
1805                 goto scrub_continue;
1806         }
1807
1808         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1809         if (ret) {
1810                 mutex_unlock(&root->fs_info->reloc_mutex);
1811                 goto scrub_continue;
1812         }
1813
1814         /*
1815          * make sure none of the code above managed to slip in a
1816          * delayed item
1817          */
1818         btrfs_assert_delayed_root_empty(root);
1819
1820         WARN_ON(cur_trans != trans->transaction);
1821
1822         /* btrfs_commit_tree_roots is responsible for getting the
1823          * various roots consistent with each other.  Every pointer
1824          * in the tree of tree roots has to point to the most up to date
1825          * root for every subvolume and other tree.  So, we have to keep
1826          * the tree logging code from jumping in and changing any
1827          * of the trees.
1828          *
1829          * At this point in the commit, there can't be any tree-log
1830          * writers, but a little lower down we drop the trans mutex
1831          * and let new people in.  By holding the tree_log_mutex
1832          * from now until after the super is written, we avoid races
1833          * with the tree-log code.
1834          */
1835         mutex_lock(&root->fs_info->tree_log_mutex);
1836
1837         ret = commit_fs_roots(trans, root);
1838         if (ret) {
1839                 mutex_unlock(&root->fs_info->tree_log_mutex);
1840                 mutex_unlock(&root->fs_info->reloc_mutex);
1841                 goto scrub_continue;
1842         }
1843
1844         /*
1845          * Since the transaction is done, we should set the inode map cache flag
1846          * before any other comming transaction.
1847          */
1848         if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
1849                 btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
1850         else
1851                 btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
1852
1853         /* commit_fs_roots gets rid of all the tree log roots, it is now
1854          * safe to free the root of tree log roots
1855          */
1856         btrfs_free_log_root_tree(trans, root->fs_info);
1857
1858         ret = commit_cowonly_roots(trans, root);
1859         if (ret) {
1860                 mutex_unlock(&root->fs_info->tree_log_mutex);
1861                 mutex_unlock(&root->fs_info->reloc_mutex);
1862                 goto scrub_continue;
1863         }
1864
1865         /*
1866          * The tasks which save the space cache and inode cache may also
1867          * update ->aborted, check it.
1868          */
1869         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1870                 ret = cur_trans->aborted;
1871                 mutex_unlock(&root->fs_info->tree_log_mutex);
1872                 mutex_unlock(&root->fs_info->reloc_mutex);
1873                 goto scrub_continue;
1874         }
1875
1876         btrfs_prepare_extent_commit(trans, root);
1877
1878         cur_trans = root->fs_info->running_transaction;
1879
1880         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1881                             root->fs_info->tree_root->node);
1882         list_add_tail(&root->fs_info->tree_root->dirty_list,
1883                       &cur_trans->switch_commits);
1884
1885         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1886                             root->fs_info->chunk_root->node);
1887         list_add_tail(&root->fs_info->chunk_root->dirty_list,
1888                       &cur_trans->switch_commits);
1889
1890         switch_commit_roots(cur_trans, root->fs_info);
1891
1892         assert_qgroups_uptodate(trans);
1893         update_super_roots(root);
1894
1895         btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1896         btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1897         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1898                sizeof(*root->fs_info->super_copy));
1899
1900         btrfs_update_commit_device_size(root->fs_info);
1901         btrfs_update_commit_device_bytes_used(root, cur_trans);
1902
1903         clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
1904         clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
1905
1906         spin_lock(&root->fs_info->trans_lock);
1907         cur_trans->state = TRANS_STATE_UNBLOCKED;
1908         root->fs_info->running_transaction = NULL;
1909         spin_unlock(&root->fs_info->trans_lock);
1910         mutex_unlock(&root->fs_info->reloc_mutex);
1911
1912         wake_up(&root->fs_info->transaction_wait);
1913
1914         ret = btrfs_write_and_wait_transaction(trans, root);
1915         if (ret) {
1916                 btrfs_error(root->fs_info, ret,
1917                             "Error while writing out transaction");
1918                 mutex_unlock(&root->fs_info->tree_log_mutex);
1919                 goto scrub_continue;
1920         }
1921
1922         ret = write_ctree_super(trans, root, 0);
1923         if (ret) {
1924                 mutex_unlock(&root->fs_info->tree_log_mutex);
1925                 goto scrub_continue;
1926         }
1927
1928         /*
1929          * the super is written, we can safely allow the tree-loggers
1930          * to go about their business
1931          */
1932         mutex_unlock(&root->fs_info->tree_log_mutex);
1933
1934         btrfs_finish_extent_commit(trans, root);
1935
1936         root->fs_info->last_trans_committed = cur_trans->transid;
1937         /*
1938          * We needn't acquire the lock here because there is no other task
1939          * which can change it.
1940          */
1941         cur_trans->state = TRANS_STATE_COMPLETED;
1942         wake_up(&cur_trans->commit_wait);
1943
1944         spin_lock(&root->fs_info->trans_lock);
1945         list_del_init(&cur_trans->list);
1946         spin_unlock(&root->fs_info->trans_lock);
1947
1948         btrfs_put_transaction(cur_trans);
1949         btrfs_put_transaction(cur_trans);
1950
1951         if (trans->type & __TRANS_FREEZABLE)
1952                 sb_end_intwrite(root->fs_info->sb);
1953
1954         trace_btrfs_transaction_commit(root);
1955
1956         btrfs_scrub_continue(root);
1957
1958         if (current->journal_info == trans)
1959                 current->journal_info = NULL;
1960
1961         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1962
1963         if (current != root->fs_info->transaction_kthread)
1964                 btrfs_run_delayed_iputs(root);
1965
1966         return ret;
1967
1968 scrub_continue:
1969         btrfs_scrub_continue(root);
1970 cleanup_transaction:
1971         btrfs_trans_release_metadata(trans, root);
1972         trans->block_rsv = NULL;
1973         if (trans->qgroup_reserved) {
1974                 btrfs_qgroup_free(root, trans->qgroup_reserved);
1975                 trans->qgroup_reserved = 0;
1976         }
1977         btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
1978         if (current->journal_info == trans)
1979                 current->journal_info = NULL;
1980         cleanup_transaction(trans, root, ret);
1981
1982         return ret;
1983 }
1984
1985 /*
1986  * return < 0 if error
1987  * 0 if there are no more dead_roots at the time of call
1988  * 1 there are more to be processed, call me again
1989  *
1990  * The return value indicates there are certainly more snapshots to delete, but
1991  * if there comes a new one during processing, it may return 0. We don't mind,
1992  * because btrfs_commit_super will poke cleaner thread and it will process it a
1993  * few seconds later.
1994  */
1995 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
1996 {
1997         int ret;
1998         struct btrfs_fs_info *fs_info = root->fs_info;
1999
2000         spin_lock(&fs_info->trans_lock);
2001         if (list_empty(&fs_info->dead_roots)) {
2002                 spin_unlock(&fs_info->trans_lock);
2003                 return 0;
2004         }
2005         root = list_first_entry(&fs_info->dead_roots,
2006                         struct btrfs_root, root_list);
2007         list_del_init(&root->root_list);
2008         spin_unlock(&fs_info->trans_lock);
2009
2010         pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
2011
2012         btrfs_kill_all_delayed_nodes(root);
2013
2014         if (btrfs_header_backref_rev(root->node) <
2015                         BTRFS_MIXED_BACKREF_REV)
2016                 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2017         else
2018                 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2019
2020         return (ret < 0) ? 0 : 1;
2021 }