Merge tag 'stable/for-linus-3.7-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33
34 #define BTRFS_ROOT_TRANS_TAG 0
35
36 void put_transaction(struct btrfs_transaction *transaction)
37 {
38         WARN_ON(atomic_read(&transaction->use_count) == 0);
39         if (atomic_dec_and_test(&transaction->use_count)) {
40                 BUG_ON(!list_empty(&transaction->list));
41                 WARN_ON(transaction->delayed_refs.root.rb_node);
42                 memset(transaction, 0, sizeof(*transaction));
43                 kmem_cache_free(btrfs_transaction_cachep, transaction);
44         }
45 }
46
47 static noinline void switch_commit_root(struct btrfs_root *root)
48 {
49         free_extent_buffer(root->commit_root);
50         root->commit_root = btrfs_root_node(root);
51 }
52
53 /*
54  * either allocate a new transaction or hop into the existing one
55  */
56 static noinline int join_transaction(struct btrfs_root *root, int type)
57 {
58         struct btrfs_transaction *cur_trans;
59         struct btrfs_fs_info *fs_info = root->fs_info;
60
61         spin_lock(&fs_info->trans_lock);
62 loop:
63         /* The file system has been taken offline. No new transactions. */
64         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
65                 spin_unlock(&fs_info->trans_lock);
66                 return -EROFS;
67         }
68
69         if (fs_info->trans_no_join) {
70                 /* 
71                  * If we are JOIN_NOLOCK we're already committing a current
72                  * transaction, we just need a handle to deal with something
73                  * when committing the transaction, such as inode cache and
74                  * space cache. It is a special case.
75                  */
76                 if (type != TRANS_JOIN_NOLOCK) {
77                         spin_unlock(&fs_info->trans_lock);
78                         return -EBUSY;
79                 }
80         }
81
82         cur_trans = fs_info->running_transaction;
83         if (cur_trans) {
84                 if (cur_trans->aborted) {
85                         spin_unlock(&fs_info->trans_lock);
86                         return cur_trans->aborted;
87                 }
88                 atomic_inc(&cur_trans->use_count);
89                 atomic_inc(&cur_trans->num_writers);
90                 cur_trans->num_joined++;
91                 spin_unlock(&fs_info->trans_lock);
92                 return 0;
93         }
94         spin_unlock(&fs_info->trans_lock);
95
96         /*
97          * If we are ATTACH, we just want to catch the current transaction,
98          * and commit it. If there is no transaction, just return ENOENT.
99          */
100         if (type == TRANS_ATTACH)
101                 return -ENOENT;
102
103         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
104         if (!cur_trans)
105                 return -ENOMEM;
106
107         spin_lock(&fs_info->trans_lock);
108         if (fs_info->running_transaction) {
109                 /*
110                  * someone started a transaction after we unlocked.  Make sure
111                  * to redo the trans_no_join checks above
112                  */
113                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
114                 cur_trans = fs_info->running_transaction;
115                 goto loop;
116         } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
117                 spin_unlock(&fs_info->trans_lock);
118                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
119                 return -EROFS;
120         }
121
122         atomic_set(&cur_trans->num_writers, 1);
123         cur_trans->num_joined = 0;
124         init_waitqueue_head(&cur_trans->writer_wait);
125         init_waitqueue_head(&cur_trans->commit_wait);
126         cur_trans->in_commit = 0;
127         cur_trans->blocked = 0;
128         /*
129          * One for this trans handle, one so it will live on until we
130          * commit the transaction.
131          */
132         atomic_set(&cur_trans->use_count, 2);
133         cur_trans->commit_done = 0;
134         cur_trans->start_time = get_seconds();
135
136         cur_trans->delayed_refs.root = RB_ROOT;
137         cur_trans->delayed_refs.num_entries = 0;
138         cur_trans->delayed_refs.num_heads_ready = 0;
139         cur_trans->delayed_refs.num_heads = 0;
140         cur_trans->delayed_refs.flushing = 0;
141         cur_trans->delayed_refs.run_delayed_start = 0;
142
143         /*
144          * although the tree mod log is per file system and not per transaction,
145          * the log must never go across transaction boundaries.
146          */
147         smp_mb();
148         if (!list_empty(&fs_info->tree_mod_seq_list)) {
149                 printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
150                         "creating a fresh transaction\n");
151                 WARN_ON(1);
152         }
153         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
154                 printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
155                         "creating a fresh transaction\n");
156                 WARN_ON(1);
157         }
158         atomic_set(&fs_info->tree_mod_seq, 0);
159
160         spin_lock_init(&cur_trans->commit_lock);
161         spin_lock_init(&cur_trans->delayed_refs.lock);
162
163         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
164         list_add_tail(&cur_trans->list, &fs_info->trans_list);
165         extent_io_tree_init(&cur_trans->dirty_pages,
166                              fs_info->btree_inode->i_mapping);
167         fs_info->generation++;
168         cur_trans->transid = fs_info->generation;
169         fs_info->running_transaction = cur_trans;
170         cur_trans->aborted = 0;
171         spin_unlock(&fs_info->trans_lock);
172
173         return 0;
174 }
175
176 /*
177  * this does all the record keeping required to make sure that a reference
178  * counted root is properly recorded in a given transaction.  This is required
179  * to make sure the old root from before we joined the transaction is deleted
180  * when the transaction commits
181  */
182 static int record_root_in_trans(struct btrfs_trans_handle *trans,
183                                struct btrfs_root *root)
184 {
185         if (root->ref_cows && root->last_trans < trans->transid) {
186                 WARN_ON(root == root->fs_info->extent_root);
187                 WARN_ON(root->commit_root != root->node);
188
189                 /*
190                  * see below for in_trans_setup usage rules
191                  * we have the reloc mutex held now, so there
192                  * is only one writer in this function
193                  */
194                 root->in_trans_setup = 1;
195
196                 /* make sure readers find in_trans_setup before
197                  * they find our root->last_trans update
198                  */
199                 smp_wmb();
200
201                 spin_lock(&root->fs_info->fs_roots_radix_lock);
202                 if (root->last_trans == trans->transid) {
203                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
204                         return 0;
205                 }
206                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
207                            (unsigned long)root->root_key.objectid,
208                            BTRFS_ROOT_TRANS_TAG);
209                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
210                 root->last_trans = trans->transid;
211
212                 /* this is pretty tricky.  We don't want to
213                  * take the relocation lock in btrfs_record_root_in_trans
214                  * unless we're really doing the first setup for this root in
215                  * this transaction.
216                  *
217                  * Normally we'd use root->last_trans as a flag to decide
218                  * if we want to take the expensive mutex.
219                  *
220                  * But, we have to set root->last_trans before we
221                  * init the relocation root, otherwise, we trip over warnings
222                  * in ctree.c.  The solution used here is to flag ourselves
223                  * with root->in_trans_setup.  When this is 1, we're still
224                  * fixing up the reloc trees and everyone must wait.
225                  *
226                  * When this is zero, they can trust root->last_trans and fly
227                  * through btrfs_record_root_in_trans without having to take the
228                  * lock.  smp_wmb() makes sure that all the writes above are
229                  * done before we pop in the zero below
230                  */
231                 btrfs_init_reloc_root(trans, root);
232                 smp_wmb();
233                 root->in_trans_setup = 0;
234         }
235         return 0;
236 }
237
238
239 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
240                                struct btrfs_root *root)
241 {
242         if (!root->ref_cows)
243                 return 0;
244
245         /*
246          * see record_root_in_trans for comments about in_trans_setup usage
247          * and barriers
248          */
249         smp_rmb();
250         if (root->last_trans == trans->transid &&
251             !root->in_trans_setup)
252                 return 0;
253
254         mutex_lock(&root->fs_info->reloc_mutex);
255         record_root_in_trans(trans, root);
256         mutex_unlock(&root->fs_info->reloc_mutex);
257
258         return 0;
259 }
260
261 /* wait for commit against the current transaction to become unblocked
262  * when this is done, it is safe to start a new transaction, but the current
263  * transaction might not be fully on disk.
264  */
265 static void wait_current_trans(struct btrfs_root *root)
266 {
267         struct btrfs_transaction *cur_trans;
268
269         spin_lock(&root->fs_info->trans_lock);
270         cur_trans = root->fs_info->running_transaction;
271         if (cur_trans && cur_trans->blocked) {
272                 atomic_inc(&cur_trans->use_count);
273                 spin_unlock(&root->fs_info->trans_lock);
274
275                 wait_event(root->fs_info->transaction_wait,
276                            !cur_trans->blocked);
277                 put_transaction(cur_trans);
278         } else {
279                 spin_unlock(&root->fs_info->trans_lock);
280         }
281 }
282
283 static int may_wait_transaction(struct btrfs_root *root, int type)
284 {
285         if (root->fs_info->log_root_recovering)
286                 return 0;
287
288         if (type == TRANS_USERSPACE)
289                 return 1;
290
291         if (type == TRANS_START &&
292             !atomic_read(&root->fs_info->open_ioctl_trans))
293                 return 1;
294
295         return 0;
296 }
297
298 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
299                                                     u64 num_items, int type,
300                                                     int noflush)
301 {
302         struct btrfs_trans_handle *h;
303         struct btrfs_transaction *cur_trans;
304         u64 num_bytes = 0;
305         int ret;
306         u64 qgroup_reserved = 0;
307
308         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
309                 return ERR_PTR(-EROFS);
310
311         if (current->journal_info) {
312                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
313                 h = current->journal_info;
314                 h->use_count++;
315                 h->orig_rsv = h->block_rsv;
316                 h->block_rsv = NULL;
317                 goto got_it;
318         }
319
320         /*
321          * Do the reservation before we join the transaction so we can do all
322          * the appropriate flushing if need be.
323          */
324         if (num_items > 0 && root != root->fs_info->chunk_root) {
325                 if (root->fs_info->quota_enabled &&
326                     is_fstree(root->root_key.objectid)) {
327                         qgroup_reserved = num_items * root->leafsize;
328                         ret = btrfs_qgroup_reserve(root, qgroup_reserved);
329                         if (ret)
330                                 return ERR_PTR(ret);
331                 }
332
333                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
334                 if (noflush)
335                         ret = btrfs_block_rsv_add_noflush(root,
336                                                 &root->fs_info->trans_block_rsv,
337                                                 num_bytes);
338                 else
339                         ret = btrfs_block_rsv_add(root,
340                                                 &root->fs_info->trans_block_rsv,
341                                                 num_bytes);
342                 if (ret)
343                         return ERR_PTR(ret);
344         }
345 again:
346         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
347         if (!h)
348                 return ERR_PTR(-ENOMEM);
349
350         /*
351          * If we are JOIN_NOLOCK we're already committing a transaction and
352          * waiting on this guy, so we don't need to do the sb_start_intwrite
353          * because we're already holding a ref.  We need this because we could
354          * have raced in and did an fsync() on a file which can kick a commit
355          * and then we deadlock with somebody doing a freeze.
356          *
357          * If we are ATTACH, it means we just want to catch the current
358          * transaction and commit it, so we needn't do sb_start_intwrite(). 
359          */
360         if (type < TRANS_JOIN_NOLOCK)
361                 sb_start_intwrite(root->fs_info->sb);
362
363         if (may_wait_transaction(root, type))
364                 wait_current_trans(root);
365
366         do {
367                 ret = join_transaction(root, type);
368                 if (ret == -EBUSY)
369                         wait_current_trans(root);
370         } while (ret == -EBUSY);
371
372         if (ret < 0) {
373                 /* We must get the transaction if we are JOIN_NOLOCK. */
374                 BUG_ON(type == TRANS_JOIN_NOLOCK);
375
376                 if (type < TRANS_JOIN_NOLOCK)
377                         sb_end_intwrite(root->fs_info->sb);
378                 kmem_cache_free(btrfs_trans_handle_cachep, h);
379                 return ERR_PTR(ret);
380         }
381
382         cur_trans = root->fs_info->running_transaction;
383
384         h->transid = cur_trans->transid;
385         h->transaction = cur_trans;
386         h->blocks_used = 0;
387         h->bytes_reserved = 0;
388         h->root = root;
389         h->delayed_ref_updates = 0;
390         h->use_count = 1;
391         h->adding_csums = 0;
392         h->block_rsv = NULL;
393         h->orig_rsv = NULL;
394         h->aborted = 0;
395         h->qgroup_reserved = qgroup_reserved;
396         h->delayed_ref_elem.seq = 0;
397         h->type = type;
398         INIT_LIST_HEAD(&h->qgroup_ref_list);
399         INIT_LIST_HEAD(&h->new_bgs);
400
401         smp_mb();
402         if (cur_trans->blocked && may_wait_transaction(root, type)) {
403                 btrfs_commit_transaction(h, root);
404                 goto again;
405         }
406
407         if (num_bytes) {
408                 trace_btrfs_space_reservation(root->fs_info, "transaction",
409                                               h->transid, num_bytes, 1);
410                 h->block_rsv = &root->fs_info->trans_block_rsv;
411                 h->bytes_reserved = num_bytes;
412         }
413
414 got_it:
415         btrfs_record_root_in_trans(h, root);
416
417         if (!current->journal_info && type != TRANS_USERSPACE)
418                 current->journal_info = h;
419         return h;
420 }
421
422 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
423                                                    int num_items)
424 {
425         return start_transaction(root, num_items, TRANS_START, 0);
426 }
427
428 struct btrfs_trans_handle *btrfs_start_transaction_noflush(
429                                         struct btrfs_root *root, int num_items)
430 {
431         return start_transaction(root, num_items, TRANS_START, 1);
432 }
433
434 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
435 {
436         return start_transaction(root, 0, TRANS_JOIN, 0);
437 }
438
439 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
440 {
441         return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
442 }
443
444 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
445 {
446         return start_transaction(root, 0, TRANS_USERSPACE, 0);
447 }
448
449 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
450 {
451         return start_transaction(root, 0, TRANS_ATTACH, 0);
452 }
453
454 /* wait for a transaction commit to be fully complete */
455 static noinline void wait_for_commit(struct btrfs_root *root,
456                                     struct btrfs_transaction *commit)
457 {
458         wait_event(commit->commit_wait, commit->commit_done);
459 }
460
461 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
462 {
463         struct btrfs_transaction *cur_trans = NULL, *t;
464         int ret;
465
466         ret = 0;
467         if (transid) {
468                 if (transid <= root->fs_info->last_trans_committed)
469                         goto out;
470
471                 /* find specified transaction */
472                 spin_lock(&root->fs_info->trans_lock);
473                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
474                         if (t->transid == transid) {
475                                 cur_trans = t;
476                                 atomic_inc(&cur_trans->use_count);
477                                 break;
478                         }
479                         if (t->transid > transid)
480                                 break;
481                 }
482                 spin_unlock(&root->fs_info->trans_lock);
483                 ret = -EINVAL;
484                 if (!cur_trans)
485                         goto out;  /* bad transid */
486         } else {
487                 /* find newest transaction that is committing | committed */
488                 spin_lock(&root->fs_info->trans_lock);
489                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
490                                             list) {
491                         if (t->in_commit) {
492                                 if (t->commit_done)
493                                         break;
494                                 cur_trans = t;
495                                 atomic_inc(&cur_trans->use_count);
496                                 break;
497                         }
498                 }
499                 spin_unlock(&root->fs_info->trans_lock);
500                 if (!cur_trans)
501                         goto out;  /* nothing committing|committed */
502         }
503
504         wait_for_commit(root, cur_trans);
505
506         put_transaction(cur_trans);
507         ret = 0;
508 out:
509         return ret;
510 }
511
512 void btrfs_throttle(struct btrfs_root *root)
513 {
514         if (!atomic_read(&root->fs_info->open_ioctl_trans))
515                 wait_current_trans(root);
516 }
517
518 static int should_end_transaction(struct btrfs_trans_handle *trans,
519                                   struct btrfs_root *root)
520 {
521         int ret;
522
523         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
524         return ret ? 1 : 0;
525 }
526
527 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
528                                  struct btrfs_root *root)
529 {
530         struct btrfs_transaction *cur_trans = trans->transaction;
531         int updates;
532         int err;
533
534         smp_mb();
535         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
536                 return 1;
537
538         updates = trans->delayed_ref_updates;
539         trans->delayed_ref_updates = 0;
540         if (updates) {
541                 err = btrfs_run_delayed_refs(trans, root, updates);
542                 if (err) /* Error code will also eval true */
543                         return err;
544         }
545
546         return should_end_transaction(trans, root);
547 }
548
549 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
550                           struct btrfs_root *root, int throttle)
551 {
552         struct btrfs_transaction *cur_trans = trans->transaction;
553         struct btrfs_fs_info *info = root->fs_info;
554         int count = 0;
555         int lock = (trans->type != TRANS_JOIN_NOLOCK);
556         int err = 0;
557
558         if (--trans->use_count) {
559                 trans->block_rsv = trans->orig_rsv;
560                 return 0;
561         }
562
563         /*
564          * do the qgroup accounting as early as possible
565          */
566         err = btrfs_delayed_refs_qgroup_accounting(trans, info);
567
568         btrfs_trans_release_metadata(trans, root);
569         trans->block_rsv = NULL;
570         /*
571          * the same root has to be passed to start_transaction and
572          * end_transaction. Subvolume quota depends on this.
573          */
574         WARN_ON(trans->root != root);
575
576         if (trans->qgroup_reserved) {
577                 btrfs_qgroup_free(root, trans->qgroup_reserved);
578                 trans->qgroup_reserved = 0;
579         }
580
581         if (!list_empty(&trans->new_bgs))
582                 btrfs_create_pending_block_groups(trans, root);
583
584         while (count < 2) {
585                 unsigned long cur = trans->delayed_ref_updates;
586                 trans->delayed_ref_updates = 0;
587                 if (cur &&
588                     trans->transaction->delayed_refs.num_heads_ready > 64) {
589                         trans->delayed_ref_updates = 0;
590                         btrfs_run_delayed_refs(trans, root, cur);
591                 } else {
592                         break;
593                 }
594                 count++;
595         }
596         btrfs_trans_release_metadata(trans, root);
597         trans->block_rsv = NULL;
598
599         if (!list_empty(&trans->new_bgs))
600                 btrfs_create_pending_block_groups(trans, root);
601
602         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
603             should_end_transaction(trans, root)) {
604                 trans->transaction->blocked = 1;
605                 smp_wmb();
606         }
607
608         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
609                 if (throttle) {
610                         /*
611                          * We may race with somebody else here so end up having
612                          * to call end_transaction on ourselves again, so inc
613                          * our use_count.
614                          */
615                         trans->use_count++;
616                         return btrfs_commit_transaction(trans, root);
617                 } else {
618                         wake_up_process(info->transaction_kthread);
619                 }
620         }
621
622         if (trans->type < TRANS_JOIN_NOLOCK)
623                 sb_end_intwrite(root->fs_info->sb);
624
625         WARN_ON(cur_trans != info->running_transaction);
626         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
627         atomic_dec(&cur_trans->num_writers);
628
629         smp_mb();
630         if (waitqueue_active(&cur_trans->writer_wait))
631                 wake_up(&cur_trans->writer_wait);
632         put_transaction(cur_trans);
633
634         if (current->journal_info == trans)
635                 current->journal_info = NULL;
636
637         if (throttle)
638                 btrfs_run_delayed_iputs(root);
639
640         if (trans->aborted ||
641             root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
642                 err = -EIO;
643         }
644         assert_qgroups_uptodate(trans);
645
646         memset(trans, 0, sizeof(*trans));
647         kmem_cache_free(btrfs_trans_handle_cachep, trans);
648         return err;
649 }
650
651 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
652                           struct btrfs_root *root)
653 {
654         int ret;
655
656         ret = __btrfs_end_transaction(trans, root, 0);
657         if (ret)
658                 return ret;
659         return 0;
660 }
661
662 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
663                                    struct btrfs_root *root)
664 {
665         int ret;
666
667         ret = __btrfs_end_transaction(trans, root, 1);
668         if (ret)
669                 return ret;
670         return 0;
671 }
672
673 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
674                                 struct btrfs_root *root)
675 {
676         return __btrfs_end_transaction(trans, root, 1);
677 }
678
679 /*
680  * when btree blocks are allocated, they have some corresponding bits set for
681  * them in one of two extent_io trees.  This is used to make sure all of
682  * those extents are sent to disk but does not wait on them
683  */
684 int btrfs_write_marked_extents(struct btrfs_root *root,
685                                struct extent_io_tree *dirty_pages, int mark)
686 {
687         int err = 0;
688         int werr = 0;
689         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
690         struct extent_state *cached_state = NULL;
691         u64 start = 0;
692         u64 end;
693
694         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
695                                       mark, &cached_state)) {
696                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
697                                    mark, &cached_state, GFP_NOFS);
698                 cached_state = NULL;
699                 err = filemap_fdatawrite_range(mapping, start, end);
700                 if (err)
701                         werr = err;
702                 cond_resched();
703                 start = end + 1;
704         }
705         if (err)
706                 werr = err;
707         return werr;
708 }
709
710 /*
711  * when btree blocks are allocated, they have some corresponding bits set for
712  * them in one of two extent_io trees.  This is used to make sure all of
713  * those extents are on disk for transaction or log commit.  We wait
714  * on all the pages and clear them from the dirty pages state tree
715  */
716 int btrfs_wait_marked_extents(struct btrfs_root *root,
717                               struct extent_io_tree *dirty_pages, int mark)
718 {
719         int err = 0;
720         int werr = 0;
721         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
722         struct extent_state *cached_state = NULL;
723         u64 start = 0;
724         u64 end;
725
726         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
727                                       EXTENT_NEED_WAIT, &cached_state)) {
728                 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
729                                  0, 0, &cached_state, GFP_NOFS);
730                 err = filemap_fdatawait_range(mapping, start, end);
731                 if (err)
732                         werr = err;
733                 cond_resched();
734                 start = end + 1;
735         }
736         if (err)
737                 werr = err;
738         return werr;
739 }
740
741 /*
742  * when btree blocks are allocated, they have some corresponding bits set for
743  * them in one of two extent_io trees.  This is used to make sure all of
744  * those extents are on disk for transaction or log commit
745  */
746 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
747                                 struct extent_io_tree *dirty_pages, int mark)
748 {
749         int ret;
750         int ret2;
751
752         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
753         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
754
755         if (ret)
756                 return ret;
757         if (ret2)
758                 return ret2;
759         return 0;
760 }
761
762 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
763                                      struct btrfs_root *root)
764 {
765         if (!trans || !trans->transaction) {
766                 struct inode *btree_inode;
767                 btree_inode = root->fs_info->btree_inode;
768                 return filemap_write_and_wait(btree_inode->i_mapping);
769         }
770         return btrfs_write_and_wait_marked_extents(root,
771                                            &trans->transaction->dirty_pages,
772                                            EXTENT_DIRTY);
773 }
774
775 /*
776  * this is used to update the root pointer in the tree of tree roots.
777  *
778  * But, in the case of the extent allocation tree, updating the root
779  * pointer may allocate blocks which may change the root of the extent
780  * allocation tree.
781  *
782  * So, this loops and repeats and makes sure the cowonly root didn't
783  * change while the root pointer was being updated in the metadata.
784  */
785 static int update_cowonly_root(struct btrfs_trans_handle *trans,
786                                struct btrfs_root *root)
787 {
788         int ret;
789         u64 old_root_bytenr;
790         u64 old_root_used;
791         struct btrfs_root *tree_root = root->fs_info->tree_root;
792
793         old_root_used = btrfs_root_used(&root->root_item);
794         btrfs_write_dirty_block_groups(trans, root);
795
796         while (1) {
797                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
798                 if (old_root_bytenr == root->node->start &&
799                     old_root_used == btrfs_root_used(&root->root_item))
800                         break;
801
802                 btrfs_set_root_node(&root->root_item, root->node);
803                 ret = btrfs_update_root(trans, tree_root,
804                                         &root->root_key,
805                                         &root->root_item);
806                 if (ret)
807                         return ret;
808
809                 old_root_used = btrfs_root_used(&root->root_item);
810                 ret = btrfs_write_dirty_block_groups(trans, root);
811                 if (ret)
812                         return ret;
813         }
814
815         if (root != root->fs_info->extent_root)
816                 switch_commit_root(root);
817
818         return 0;
819 }
820
821 /*
822  * update all the cowonly tree roots on disk
823  *
824  * The error handling in this function may not be obvious. Any of the
825  * failures will cause the file system to go offline. We still need
826  * to clean up the delayed refs.
827  */
828 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
829                                          struct btrfs_root *root)
830 {
831         struct btrfs_fs_info *fs_info = root->fs_info;
832         struct list_head *next;
833         struct extent_buffer *eb;
834         int ret;
835
836         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
837         if (ret)
838                 return ret;
839
840         eb = btrfs_lock_root_node(fs_info->tree_root);
841         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
842                               0, &eb);
843         btrfs_tree_unlock(eb);
844         free_extent_buffer(eb);
845
846         if (ret)
847                 return ret;
848
849         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
850         if (ret)
851                 return ret;
852
853         ret = btrfs_run_dev_stats(trans, root->fs_info);
854         BUG_ON(ret);
855
856         ret = btrfs_run_qgroups(trans, root->fs_info);
857         BUG_ON(ret);
858
859         /* run_qgroups might have added some more refs */
860         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
861         BUG_ON(ret);
862
863         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
864                 next = fs_info->dirty_cowonly_roots.next;
865                 list_del_init(next);
866                 root = list_entry(next, struct btrfs_root, dirty_list);
867
868                 ret = update_cowonly_root(trans, root);
869                 if (ret)
870                         return ret;
871         }
872
873         down_write(&fs_info->extent_commit_sem);
874         switch_commit_root(fs_info->extent_root);
875         up_write(&fs_info->extent_commit_sem);
876
877         return 0;
878 }
879
880 /*
881  * dead roots are old snapshots that need to be deleted.  This allocates
882  * a dirty root struct and adds it into the list of dead roots that need to
883  * be deleted
884  */
885 int btrfs_add_dead_root(struct btrfs_root *root)
886 {
887         spin_lock(&root->fs_info->trans_lock);
888         list_add(&root->root_list, &root->fs_info->dead_roots);
889         spin_unlock(&root->fs_info->trans_lock);
890         return 0;
891 }
892
893 /*
894  * update all the cowonly tree roots on disk
895  */
896 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
897                                     struct btrfs_root *root)
898 {
899         struct btrfs_root *gang[8];
900         struct btrfs_fs_info *fs_info = root->fs_info;
901         int i;
902         int ret;
903         int err = 0;
904
905         spin_lock(&fs_info->fs_roots_radix_lock);
906         while (1) {
907                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
908                                                  (void **)gang, 0,
909                                                  ARRAY_SIZE(gang),
910                                                  BTRFS_ROOT_TRANS_TAG);
911                 if (ret == 0)
912                         break;
913                 for (i = 0; i < ret; i++) {
914                         root = gang[i];
915                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
916                                         (unsigned long)root->root_key.objectid,
917                                         BTRFS_ROOT_TRANS_TAG);
918                         spin_unlock(&fs_info->fs_roots_radix_lock);
919
920                         btrfs_free_log(trans, root);
921                         btrfs_update_reloc_root(trans, root);
922                         btrfs_orphan_commit_root(trans, root);
923
924                         btrfs_save_ino_cache(root, trans);
925
926                         /* see comments in should_cow_block() */
927                         root->force_cow = 0;
928                         smp_wmb();
929
930                         if (root->commit_root != root->node) {
931                                 mutex_lock(&root->fs_commit_mutex);
932                                 switch_commit_root(root);
933                                 btrfs_unpin_free_ino(root);
934                                 mutex_unlock(&root->fs_commit_mutex);
935
936                                 btrfs_set_root_node(&root->root_item,
937                                                     root->node);
938                         }
939
940                         err = btrfs_update_root(trans, fs_info->tree_root,
941                                                 &root->root_key,
942                                                 &root->root_item);
943                         spin_lock(&fs_info->fs_roots_radix_lock);
944                         if (err)
945                                 break;
946                 }
947         }
948         spin_unlock(&fs_info->fs_roots_radix_lock);
949         return err;
950 }
951
952 /*
953  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
954  * otherwise every leaf in the btree is read and defragged.
955  */
956 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
957 {
958         struct btrfs_fs_info *info = root->fs_info;
959         struct btrfs_trans_handle *trans;
960         int ret;
961         unsigned long nr;
962
963         if (xchg(&root->defrag_running, 1))
964                 return 0;
965
966         while (1) {
967                 trans = btrfs_start_transaction(root, 0);
968                 if (IS_ERR(trans))
969                         return PTR_ERR(trans);
970
971                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
972
973                 nr = trans->blocks_used;
974                 btrfs_end_transaction(trans, root);
975                 btrfs_btree_balance_dirty(info->tree_root, nr);
976                 cond_resched();
977
978                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
979                         break;
980         }
981         root->defrag_running = 0;
982         return ret;
983 }
984
985 /*
986  * new snapshots need to be created at a very specific time in the
987  * transaction commit.  This does the actual creation
988  */
989 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
990                                    struct btrfs_fs_info *fs_info,
991                                    struct btrfs_pending_snapshot *pending)
992 {
993         struct btrfs_key key;
994         struct btrfs_root_item *new_root_item;
995         struct btrfs_root *tree_root = fs_info->tree_root;
996         struct btrfs_root *root = pending->root;
997         struct btrfs_root *parent_root;
998         struct btrfs_block_rsv *rsv;
999         struct inode *parent_inode;
1000         struct btrfs_path *path;
1001         struct btrfs_dir_item *dir_item;
1002         struct dentry *parent;
1003         struct dentry *dentry;
1004         struct extent_buffer *tmp;
1005         struct extent_buffer *old;
1006         struct timespec cur_time = CURRENT_TIME;
1007         int ret;
1008         u64 to_reserve = 0;
1009         u64 index = 0;
1010         u64 objectid;
1011         u64 root_flags;
1012         uuid_le new_uuid;
1013
1014         path = btrfs_alloc_path();
1015         if (!path) {
1016                 ret = pending->error = -ENOMEM;
1017                 goto path_alloc_fail;
1018         }
1019
1020         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1021         if (!new_root_item) {
1022                 ret = pending->error = -ENOMEM;
1023                 goto root_item_alloc_fail;
1024         }
1025
1026         ret = btrfs_find_free_objectid(tree_root, &objectid);
1027         if (ret) {
1028                 pending->error = ret;
1029                 goto no_free_objectid;
1030         }
1031
1032         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1033
1034         if (to_reserve > 0) {
1035                 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
1036                                                   to_reserve);
1037                 if (ret) {
1038                         pending->error = ret;
1039                         goto no_free_objectid;
1040                 }
1041         }
1042
1043         ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
1044                                    objectid, pending->inherit);
1045         if (ret) {
1046                 pending->error = ret;
1047                 goto no_free_objectid;
1048         }
1049
1050         key.objectid = objectid;
1051         key.offset = (u64)-1;
1052         key.type = BTRFS_ROOT_ITEM_KEY;
1053
1054         rsv = trans->block_rsv;
1055         trans->block_rsv = &pending->block_rsv;
1056
1057         dentry = pending->dentry;
1058         parent = dget_parent(dentry);
1059         parent_inode = parent->d_inode;
1060         parent_root = BTRFS_I(parent_inode)->root;
1061         record_root_in_trans(trans, parent_root);
1062
1063         /*
1064          * insert the directory item
1065          */
1066         ret = btrfs_set_inode_index(parent_inode, &index);
1067         BUG_ON(ret); /* -ENOMEM */
1068
1069         /* check if there is a file/dir which has the same name. */
1070         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1071                                          btrfs_ino(parent_inode),
1072                                          dentry->d_name.name,
1073                                          dentry->d_name.len, 0);
1074         if (dir_item != NULL && !IS_ERR(dir_item)) {
1075                 pending->error = -EEXIST;
1076                 goto fail;
1077         } else if (IS_ERR(dir_item)) {
1078                 ret = PTR_ERR(dir_item);
1079                 btrfs_abort_transaction(trans, root, ret);
1080                 goto fail;
1081         }
1082         btrfs_release_path(path);
1083
1084         /*
1085          * pull in the delayed directory update
1086          * and the delayed inode item
1087          * otherwise we corrupt the FS during
1088          * snapshot
1089          */
1090         ret = btrfs_run_delayed_items(trans, root);
1091         if (ret) {      /* Transaction aborted */
1092                 btrfs_abort_transaction(trans, root, ret);
1093                 goto fail;
1094         }
1095
1096         record_root_in_trans(trans, root);
1097         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1098         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1099         btrfs_check_and_init_root_item(new_root_item);
1100
1101         root_flags = btrfs_root_flags(new_root_item);
1102         if (pending->readonly)
1103                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1104         else
1105                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1106         btrfs_set_root_flags(new_root_item, root_flags);
1107
1108         btrfs_set_root_generation_v2(new_root_item,
1109                         trans->transid);
1110         uuid_le_gen(&new_uuid);
1111         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1112         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1113                         BTRFS_UUID_SIZE);
1114         new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1115         new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1116         btrfs_set_root_otransid(new_root_item, trans->transid);
1117         memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1118         memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1119         btrfs_set_root_stransid(new_root_item, 0);
1120         btrfs_set_root_rtransid(new_root_item, 0);
1121
1122         old = btrfs_lock_root_node(root);
1123         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1124         if (ret) {
1125                 btrfs_tree_unlock(old);
1126                 free_extent_buffer(old);
1127                 btrfs_abort_transaction(trans, root, ret);
1128                 goto fail;
1129         }
1130
1131         btrfs_set_lock_blocking(old);
1132
1133         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1134         /* clean up in any case */
1135         btrfs_tree_unlock(old);
1136         free_extent_buffer(old);
1137         if (ret) {
1138                 btrfs_abort_transaction(trans, root, ret);
1139                 goto fail;
1140         }
1141
1142         /* see comments in should_cow_block() */
1143         root->force_cow = 1;
1144         smp_wmb();
1145
1146         btrfs_set_root_node(new_root_item, tmp);
1147         /* record when the snapshot was created in key.offset */
1148         key.offset = trans->transid;
1149         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1150         btrfs_tree_unlock(tmp);
1151         free_extent_buffer(tmp);
1152         if (ret) {
1153                 btrfs_abort_transaction(trans, root, ret);
1154                 goto fail;
1155         }
1156
1157         /*
1158          * insert root back/forward references
1159          */
1160         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1161                                  parent_root->root_key.objectid,
1162                                  btrfs_ino(parent_inode), index,
1163                                  dentry->d_name.name, dentry->d_name.len);
1164         if (ret) {
1165                 btrfs_abort_transaction(trans, root, ret);
1166                 goto fail;
1167         }
1168
1169         key.offset = (u64)-1;
1170         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1171         if (IS_ERR(pending->snap)) {
1172                 ret = PTR_ERR(pending->snap);
1173                 btrfs_abort_transaction(trans, root, ret);
1174                 goto fail;
1175         }
1176
1177         ret = btrfs_reloc_post_snapshot(trans, pending);
1178         if (ret) {
1179                 btrfs_abort_transaction(trans, root, ret);
1180                 goto fail;
1181         }
1182
1183         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1184         if (ret) {
1185                 btrfs_abort_transaction(trans, root, ret);
1186                 goto fail;
1187         }
1188
1189         ret = btrfs_insert_dir_item(trans, parent_root,
1190                                     dentry->d_name.name, dentry->d_name.len,
1191                                     parent_inode, &key,
1192                                     BTRFS_FT_DIR, index);
1193         /* We have check then name at the beginning, so it is impossible. */
1194         BUG_ON(ret == -EEXIST);
1195         if (ret) {
1196                 btrfs_abort_transaction(trans, root, ret);
1197                 goto fail;
1198         }
1199
1200         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1201                                          dentry->d_name.len * 2);
1202         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1203         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1204         if (ret)
1205                 btrfs_abort_transaction(trans, root, ret);
1206 fail:
1207         dput(parent);
1208         trans->block_rsv = rsv;
1209 no_free_objectid:
1210         kfree(new_root_item);
1211 root_item_alloc_fail:
1212         btrfs_free_path(path);
1213 path_alloc_fail:
1214         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1215         return ret;
1216 }
1217
1218 /*
1219  * create all the snapshots we've scheduled for creation
1220  */
1221 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1222                                              struct btrfs_fs_info *fs_info)
1223 {
1224         struct btrfs_pending_snapshot *pending;
1225         struct list_head *head = &trans->transaction->pending_snapshots;
1226
1227         list_for_each_entry(pending, head, list)
1228                 create_pending_snapshot(trans, fs_info, pending);
1229         return 0;
1230 }
1231
1232 static void update_super_roots(struct btrfs_root *root)
1233 {
1234         struct btrfs_root_item *root_item;
1235         struct btrfs_super_block *super;
1236
1237         super = root->fs_info->super_copy;
1238
1239         root_item = &root->fs_info->chunk_root->root_item;
1240         super->chunk_root = root_item->bytenr;
1241         super->chunk_root_generation = root_item->generation;
1242         super->chunk_root_level = root_item->level;
1243
1244         root_item = &root->fs_info->tree_root->root_item;
1245         super->root = root_item->bytenr;
1246         super->generation = root_item->generation;
1247         super->root_level = root_item->level;
1248         if (btrfs_test_opt(root, SPACE_CACHE))
1249                 super->cache_generation = root_item->generation;
1250 }
1251
1252 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1253 {
1254         int ret = 0;
1255         spin_lock(&info->trans_lock);
1256         if (info->running_transaction)
1257                 ret = info->running_transaction->in_commit;
1258         spin_unlock(&info->trans_lock);
1259         return ret;
1260 }
1261
1262 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1263 {
1264         int ret = 0;
1265         spin_lock(&info->trans_lock);
1266         if (info->running_transaction)
1267                 ret = info->running_transaction->blocked;
1268         spin_unlock(&info->trans_lock);
1269         return ret;
1270 }
1271
1272 /*
1273  * wait for the current transaction commit to start and block subsequent
1274  * transaction joins
1275  */
1276 static void wait_current_trans_commit_start(struct btrfs_root *root,
1277                                             struct btrfs_transaction *trans)
1278 {
1279         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1280 }
1281
1282 /*
1283  * wait for the current transaction to start and then become unblocked.
1284  * caller holds ref.
1285  */
1286 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1287                                          struct btrfs_transaction *trans)
1288 {
1289         wait_event(root->fs_info->transaction_wait,
1290                    trans->commit_done || (trans->in_commit && !trans->blocked));
1291 }
1292
1293 /*
1294  * commit transactions asynchronously. once btrfs_commit_transaction_async
1295  * returns, any subsequent transaction will not be allowed to join.
1296  */
1297 struct btrfs_async_commit {
1298         struct btrfs_trans_handle *newtrans;
1299         struct btrfs_root *root;
1300         struct delayed_work work;
1301 };
1302
1303 static void do_async_commit(struct work_struct *work)
1304 {
1305         struct btrfs_async_commit *ac =
1306                 container_of(work, struct btrfs_async_commit, work.work);
1307
1308         /*
1309          * We've got freeze protection passed with the transaction.
1310          * Tell lockdep about it.
1311          */
1312         rwsem_acquire_read(
1313                 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1314                 0, 1, _THIS_IP_);
1315
1316         current->journal_info = ac->newtrans;
1317
1318         btrfs_commit_transaction(ac->newtrans, ac->root);
1319         kfree(ac);
1320 }
1321
1322 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1323                                    struct btrfs_root *root,
1324                                    int wait_for_unblock)
1325 {
1326         struct btrfs_async_commit *ac;
1327         struct btrfs_transaction *cur_trans;
1328
1329         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1330         if (!ac)
1331                 return -ENOMEM;
1332
1333         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1334         ac->root = root;
1335         ac->newtrans = btrfs_join_transaction(root);
1336         if (IS_ERR(ac->newtrans)) {
1337                 int err = PTR_ERR(ac->newtrans);
1338                 kfree(ac);
1339                 return err;
1340         }
1341
1342         /* take transaction reference */
1343         cur_trans = trans->transaction;
1344         atomic_inc(&cur_trans->use_count);
1345
1346         btrfs_end_transaction(trans, root);
1347
1348         /*
1349          * Tell lockdep we've released the freeze rwsem, since the
1350          * async commit thread will be the one to unlock it.
1351          */
1352         rwsem_release(&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1353                       1, _THIS_IP_);
1354
1355         schedule_delayed_work(&ac->work, 0);
1356
1357         /* wait for transaction to start and unblock */
1358         if (wait_for_unblock)
1359                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1360         else
1361                 wait_current_trans_commit_start(root, cur_trans);
1362
1363         if (current->journal_info == trans)
1364                 current->journal_info = NULL;
1365
1366         put_transaction(cur_trans);
1367         return 0;
1368 }
1369
1370
1371 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1372                                 struct btrfs_root *root, int err)
1373 {
1374         struct btrfs_transaction *cur_trans = trans->transaction;
1375
1376         WARN_ON(trans->use_count > 1);
1377
1378         btrfs_abort_transaction(trans, root, err);
1379
1380         spin_lock(&root->fs_info->trans_lock);
1381         list_del_init(&cur_trans->list);
1382         if (cur_trans == root->fs_info->running_transaction) {
1383                 root->fs_info->running_transaction = NULL;
1384                 root->fs_info->trans_no_join = 0;
1385         }
1386         spin_unlock(&root->fs_info->trans_lock);
1387
1388         btrfs_cleanup_one_transaction(trans->transaction, root);
1389
1390         put_transaction(cur_trans);
1391         put_transaction(cur_trans);
1392
1393         trace_btrfs_transaction_commit(root);
1394
1395         btrfs_scrub_continue(root);
1396
1397         if (current->journal_info == trans)
1398                 current->journal_info = NULL;
1399
1400         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1401 }
1402
1403 /*
1404  * btrfs_transaction state sequence:
1405  *    in_commit = 0, blocked = 0  (initial)
1406  *    in_commit = 1, blocked = 1
1407  *    blocked = 0
1408  *    commit_done = 1
1409  */
1410 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1411                              struct btrfs_root *root)
1412 {
1413         unsigned long joined = 0;
1414         struct btrfs_transaction *cur_trans = trans->transaction;
1415         struct btrfs_transaction *prev_trans = NULL;
1416         DEFINE_WAIT(wait);
1417         int ret = -EIO;
1418         int should_grow = 0;
1419         unsigned long now = get_seconds();
1420         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1421
1422         btrfs_run_ordered_operations(root, 0);
1423
1424         if (cur_trans->aborted)
1425                 goto cleanup_transaction;
1426
1427         /* make a pass through all the delayed refs we have so far
1428          * any runnings procs may add more while we are here
1429          */
1430         ret = btrfs_run_delayed_refs(trans, root, 0);
1431         if (ret)
1432                 goto cleanup_transaction;
1433
1434         btrfs_trans_release_metadata(trans, root);
1435         trans->block_rsv = NULL;
1436
1437         cur_trans = trans->transaction;
1438
1439         /*
1440          * set the flushing flag so procs in this transaction have to
1441          * start sending their work down.
1442          */
1443         cur_trans->delayed_refs.flushing = 1;
1444
1445         if (!list_empty(&trans->new_bgs))
1446                 btrfs_create_pending_block_groups(trans, root);
1447
1448         ret = btrfs_run_delayed_refs(trans, root, 0);
1449         if (ret)
1450                 goto cleanup_transaction;
1451
1452         spin_lock(&cur_trans->commit_lock);
1453         if (cur_trans->in_commit) {
1454                 spin_unlock(&cur_trans->commit_lock);
1455                 atomic_inc(&cur_trans->use_count);
1456                 ret = btrfs_end_transaction(trans, root);
1457
1458                 wait_for_commit(root, cur_trans);
1459
1460                 put_transaction(cur_trans);
1461
1462                 return ret;
1463         }
1464
1465         trans->transaction->in_commit = 1;
1466         trans->transaction->blocked = 1;
1467         spin_unlock(&cur_trans->commit_lock);
1468         wake_up(&root->fs_info->transaction_blocked_wait);
1469
1470         spin_lock(&root->fs_info->trans_lock);
1471         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1472                 prev_trans = list_entry(cur_trans->list.prev,
1473                                         struct btrfs_transaction, list);
1474                 if (!prev_trans->commit_done) {
1475                         atomic_inc(&prev_trans->use_count);
1476                         spin_unlock(&root->fs_info->trans_lock);
1477
1478                         wait_for_commit(root, prev_trans);
1479
1480                         put_transaction(prev_trans);
1481                 } else {
1482                         spin_unlock(&root->fs_info->trans_lock);
1483                 }
1484         } else {
1485                 spin_unlock(&root->fs_info->trans_lock);
1486         }
1487
1488         if (!btrfs_test_opt(root, SSD) &&
1489             (now < cur_trans->start_time || now - cur_trans->start_time < 1))
1490                 should_grow = 1;
1491
1492         do {
1493                 int snap_pending = 0;
1494
1495                 joined = cur_trans->num_joined;
1496                 if (!list_empty(&trans->transaction->pending_snapshots))
1497                         snap_pending = 1;
1498
1499                 WARN_ON(cur_trans != trans->transaction);
1500
1501                 if (flush_on_commit || snap_pending) {
1502                         btrfs_start_delalloc_inodes(root, 1);
1503                         btrfs_wait_ordered_extents(root, 1);
1504                 }
1505
1506                 ret = btrfs_run_delayed_items(trans, root);
1507                 if (ret)
1508                         goto cleanup_transaction;
1509
1510                 /*
1511                  * running the delayed items may have added new refs. account
1512                  * them now so that they hinder processing of more delayed refs
1513                  * as little as possible.
1514                  */
1515                 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1516
1517                 /*
1518                  * rename don't use btrfs_join_transaction, so, once we
1519                  * set the transaction to blocked above, we aren't going
1520                  * to get any new ordered operations.  We can safely run
1521                  * it here and no for sure that nothing new will be added
1522                  * to the list
1523                  */
1524                 btrfs_run_ordered_operations(root, 1);
1525
1526                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1527                                 TASK_UNINTERRUPTIBLE);
1528
1529                 if (atomic_read(&cur_trans->num_writers) > 1)
1530                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1531                 else if (should_grow)
1532                         schedule_timeout(1);
1533
1534                 finish_wait(&cur_trans->writer_wait, &wait);
1535         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1536                  (should_grow && cur_trans->num_joined != joined));
1537
1538         /*
1539          * Ok now we need to make sure to block out any other joins while we
1540          * commit the transaction.  We could have started a join before setting
1541          * no_join so make sure to wait for num_writers to == 1 again.
1542          */
1543         spin_lock(&root->fs_info->trans_lock);
1544         root->fs_info->trans_no_join = 1;
1545         spin_unlock(&root->fs_info->trans_lock);
1546         wait_event(cur_trans->writer_wait,
1547                    atomic_read(&cur_trans->num_writers) == 1);
1548
1549         /*
1550          * the reloc mutex makes sure that we stop
1551          * the balancing code from coming in and moving
1552          * extents around in the middle of the commit
1553          */
1554         mutex_lock(&root->fs_info->reloc_mutex);
1555
1556         /*
1557          * We needn't worry about the delayed items because we will
1558          * deal with them in create_pending_snapshot(), which is the
1559          * core function of the snapshot creation.
1560          */
1561         ret = create_pending_snapshots(trans, root->fs_info);
1562         if (ret) {
1563                 mutex_unlock(&root->fs_info->reloc_mutex);
1564                 goto cleanup_transaction;
1565         }
1566
1567         /*
1568          * We insert the dir indexes of the snapshots and update the inode
1569          * of the snapshots' parents after the snapshot creation, so there
1570          * are some delayed items which are not dealt with. Now deal with
1571          * them.
1572          *
1573          * We needn't worry that this operation will corrupt the snapshots,
1574          * because all the tree which are snapshoted will be forced to COW
1575          * the nodes and leaves.
1576          */
1577         ret = btrfs_run_delayed_items(trans, root);
1578         if (ret) {
1579                 mutex_unlock(&root->fs_info->reloc_mutex);
1580                 goto cleanup_transaction;
1581         }
1582
1583         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1584         if (ret) {
1585                 mutex_unlock(&root->fs_info->reloc_mutex);
1586                 goto cleanup_transaction;
1587         }
1588
1589         /*
1590          * make sure none of the code above managed to slip in a
1591          * delayed item
1592          */
1593         btrfs_assert_delayed_root_empty(root);
1594
1595         WARN_ON(cur_trans != trans->transaction);
1596
1597         btrfs_scrub_pause(root);
1598         /* btrfs_commit_tree_roots is responsible for getting the
1599          * various roots consistent with each other.  Every pointer
1600          * in the tree of tree roots has to point to the most up to date
1601          * root for every subvolume and other tree.  So, we have to keep
1602          * the tree logging code from jumping in and changing any
1603          * of the trees.
1604          *
1605          * At this point in the commit, there can't be any tree-log
1606          * writers, but a little lower down we drop the trans mutex
1607          * and let new people in.  By holding the tree_log_mutex
1608          * from now until after the super is written, we avoid races
1609          * with the tree-log code.
1610          */
1611         mutex_lock(&root->fs_info->tree_log_mutex);
1612
1613         ret = commit_fs_roots(trans, root);
1614         if (ret) {
1615                 mutex_unlock(&root->fs_info->tree_log_mutex);
1616                 mutex_unlock(&root->fs_info->reloc_mutex);
1617                 goto cleanup_transaction;
1618         }
1619
1620         /* commit_fs_roots gets rid of all the tree log roots, it is now
1621          * safe to free the root of tree log roots
1622          */
1623         btrfs_free_log_root_tree(trans, root->fs_info);
1624
1625         ret = commit_cowonly_roots(trans, root);
1626         if (ret) {
1627                 mutex_unlock(&root->fs_info->tree_log_mutex);
1628                 mutex_unlock(&root->fs_info->reloc_mutex);
1629                 goto cleanup_transaction;
1630         }
1631
1632         btrfs_prepare_extent_commit(trans, root);
1633
1634         cur_trans = root->fs_info->running_transaction;
1635
1636         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1637                             root->fs_info->tree_root->node);
1638         switch_commit_root(root->fs_info->tree_root);
1639
1640         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1641                             root->fs_info->chunk_root->node);
1642         switch_commit_root(root->fs_info->chunk_root);
1643
1644         assert_qgroups_uptodate(trans);
1645         update_super_roots(root);
1646
1647         if (!root->fs_info->log_root_recovering) {
1648                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1649                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1650         }
1651
1652         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1653                sizeof(*root->fs_info->super_copy));
1654
1655         trans->transaction->blocked = 0;
1656         spin_lock(&root->fs_info->trans_lock);
1657         root->fs_info->running_transaction = NULL;
1658         root->fs_info->trans_no_join = 0;
1659         spin_unlock(&root->fs_info->trans_lock);
1660         mutex_unlock(&root->fs_info->reloc_mutex);
1661
1662         wake_up(&root->fs_info->transaction_wait);
1663
1664         ret = btrfs_write_and_wait_transaction(trans, root);
1665         if (ret) {
1666                 btrfs_error(root->fs_info, ret,
1667                             "Error while writing out transaction.");
1668                 mutex_unlock(&root->fs_info->tree_log_mutex);
1669                 goto cleanup_transaction;
1670         }
1671
1672         ret = write_ctree_super(trans, root, 0);
1673         if (ret) {
1674                 mutex_unlock(&root->fs_info->tree_log_mutex);
1675                 goto cleanup_transaction;
1676         }
1677
1678         /*
1679          * the super is written, we can safely allow the tree-loggers
1680          * to go about their business
1681          */
1682         mutex_unlock(&root->fs_info->tree_log_mutex);
1683
1684         btrfs_finish_extent_commit(trans, root);
1685
1686         cur_trans->commit_done = 1;
1687
1688         root->fs_info->last_trans_committed = cur_trans->transid;
1689
1690         wake_up(&cur_trans->commit_wait);
1691
1692         spin_lock(&root->fs_info->trans_lock);
1693         list_del_init(&cur_trans->list);
1694         spin_unlock(&root->fs_info->trans_lock);
1695
1696         put_transaction(cur_trans);
1697         put_transaction(cur_trans);
1698
1699         if (trans->type < TRANS_JOIN_NOLOCK)
1700                 sb_end_intwrite(root->fs_info->sb);
1701
1702         trace_btrfs_transaction_commit(root);
1703
1704         btrfs_scrub_continue(root);
1705
1706         if (current->journal_info == trans)
1707                 current->journal_info = NULL;
1708
1709         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1710
1711         if (current != root->fs_info->transaction_kthread)
1712                 btrfs_run_delayed_iputs(root);
1713
1714         return ret;
1715
1716 cleanup_transaction:
1717         btrfs_trans_release_metadata(trans, root);
1718         trans->block_rsv = NULL;
1719         btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1720 //      WARN_ON(1);
1721         if (current->journal_info == trans)
1722                 current->journal_info = NULL;
1723         cleanup_transaction(trans, root, ret);
1724
1725         return ret;
1726 }
1727
1728 /*
1729  * interface function to delete all the snapshots we have scheduled for deletion
1730  */
1731 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1732 {
1733         LIST_HEAD(list);
1734         struct btrfs_fs_info *fs_info = root->fs_info;
1735
1736         spin_lock(&fs_info->trans_lock);
1737         list_splice_init(&fs_info->dead_roots, &list);
1738         spin_unlock(&fs_info->trans_lock);
1739
1740         while (!list_empty(&list)) {
1741                 int ret;
1742
1743                 root = list_entry(list.next, struct btrfs_root, root_list);
1744                 list_del(&root->root_list);
1745
1746                 btrfs_kill_all_delayed_nodes(root);
1747
1748                 if (btrfs_header_backref_rev(root->node) <
1749                     BTRFS_MIXED_BACKREF_REV)
1750                         ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1751                 else
1752                         ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1753                 BUG_ON(ret < 0);
1754         }
1755         return 0;
1756 }