Btrfs: process the delayed reference queue in clusters
[sfrench/cifs-2.6.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include "compat.h"
30 #include "crc32c.h"
31 #include "ctree.h"
32 #include "disk-io.h"
33 #include "transaction.h"
34 #include "btrfs_inode.h"
35 #include "volumes.h"
36 #include "print-tree.h"
37 #include "async-thread.h"
38 #include "locking.h"
39 #include "ref-cache.h"
40 #include "tree-log.h"
41
42 static struct extent_io_ops btree_extent_io_ops;
43 static void end_workqueue_fn(struct btrfs_work *work);
44
45 /*
46  * end_io_wq structs are used to do processing in task context when an IO is
47  * complete.  This is used during reads to verify checksums, and it is used
48  * by writes to insert metadata for new file extents after IO is complete.
49  */
50 struct end_io_wq {
51         struct bio *bio;
52         bio_end_io_t *end_io;
53         void *private;
54         struct btrfs_fs_info *info;
55         int error;
56         int metadata;
57         struct list_head list;
58         struct btrfs_work work;
59 };
60
61 /*
62  * async submit bios are used to offload expensive checksumming
63  * onto the worker threads.  They checksum file and metadata bios
64  * just before they are sent down the IO stack.
65  */
66 struct async_submit_bio {
67         struct inode *inode;
68         struct bio *bio;
69         struct list_head list;
70         extent_submit_bio_hook_t *submit_bio_start;
71         extent_submit_bio_hook_t *submit_bio_done;
72         int rw;
73         int mirror_num;
74         unsigned long bio_flags;
75         struct btrfs_work work;
76 };
77
78 /* These are used to set the lockdep class on the extent buffer locks.
79  * The class is set by the readpage_end_io_hook after the buffer has
80  * passed csum validation but before the pages are unlocked.
81  *
82  * The lockdep class is also set by btrfs_init_new_buffer on freshly
83  * allocated blocks.
84  *
85  * The class is based on the level in the tree block, which allows lockdep
86  * to know that lower nodes nest inside the locks of higher nodes.
87  *
88  * We also add a check to make sure the highest level of the tree is
89  * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this
90  * code needs update as well.
91  */
92 #ifdef CONFIG_DEBUG_LOCK_ALLOC
93 # if BTRFS_MAX_LEVEL != 8
94 #  error
95 # endif
96 static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
97 static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
98         /* leaf */
99         "btrfs-extent-00",
100         "btrfs-extent-01",
101         "btrfs-extent-02",
102         "btrfs-extent-03",
103         "btrfs-extent-04",
104         "btrfs-extent-05",
105         "btrfs-extent-06",
106         "btrfs-extent-07",
107         /* highest possible level */
108         "btrfs-extent-08",
109 };
110 #endif
111
112 /*
113  * extents on the btree inode are pretty simple, there's one extent
114  * that covers the entire device
115  */
116 static struct extent_map *btree_get_extent(struct inode *inode,
117                 struct page *page, size_t page_offset, u64 start, u64 len,
118                 int create)
119 {
120         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
121         struct extent_map *em;
122         int ret;
123
124         spin_lock(&em_tree->lock);
125         em = lookup_extent_mapping(em_tree, start, len);
126         if (em) {
127                 em->bdev =
128                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
129                 spin_unlock(&em_tree->lock);
130                 goto out;
131         }
132         spin_unlock(&em_tree->lock);
133
134         em = alloc_extent_map(GFP_NOFS);
135         if (!em) {
136                 em = ERR_PTR(-ENOMEM);
137                 goto out;
138         }
139         em->start = 0;
140         em->len = (u64)-1;
141         em->block_len = (u64)-1;
142         em->block_start = 0;
143         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
144
145         spin_lock(&em_tree->lock);
146         ret = add_extent_mapping(em_tree, em);
147         if (ret == -EEXIST) {
148                 u64 failed_start = em->start;
149                 u64 failed_len = em->len;
150
151                 free_extent_map(em);
152                 em = lookup_extent_mapping(em_tree, start, len);
153                 if (em) {
154                         ret = 0;
155                 } else {
156                         em = lookup_extent_mapping(em_tree, failed_start,
157                                                    failed_len);
158                         ret = -EIO;
159                 }
160         } else if (ret) {
161                 free_extent_map(em);
162                 em = NULL;
163         }
164         spin_unlock(&em_tree->lock);
165
166         if (ret)
167                 em = ERR_PTR(ret);
168 out:
169         return em;
170 }
171
172 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
173 {
174         return btrfs_crc32c(seed, data, len);
175 }
176
177 void btrfs_csum_final(u32 crc, char *result)
178 {
179         *(__le32 *)result = ~cpu_to_le32(crc);
180 }
181
182 /*
183  * compute the csum for a btree block, and either verify it or write it
184  * into the csum field of the block.
185  */
186 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
187                            int verify)
188 {
189         u16 csum_size =
190                 btrfs_super_csum_size(&root->fs_info->super_copy);
191         char *result = NULL;
192         unsigned long len;
193         unsigned long cur_len;
194         unsigned long offset = BTRFS_CSUM_SIZE;
195         char *map_token = NULL;
196         char *kaddr;
197         unsigned long map_start;
198         unsigned long map_len;
199         int err;
200         u32 crc = ~(u32)0;
201         unsigned long inline_result;
202
203         len = buf->len - offset;
204         while (len > 0) {
205                 err = map_private_extent_buffer(buf, offset, 32,
206                                         &map_token, &kaddr,
207                                         &map_start, &map_len, KM_USER0);
208                 if (err)
209                         return 1;
210                 cur_len = min(len, map_len - (offset - map_start));
211                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
212                                       crc, cur_len);
213                 len -= cur_len;
214                 offset += cur_len;
215                 unmap_extent_buffer(buf, map_token, KM_USER0);
216         }
217         if (csum_size > sizeof(inline_result)) {
218                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
219                 if (!result)
220                         return 1;
221         } else {
222                 result = (char *)&inline_result;
223         }
224
225         btrfs_csum_final(crc, result);
226
227         if (verify) {
228                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
229                         u32 val;
230                         u32 found = 0;
231                         memcpy(&found, result, csum_size);
232
233                         read_extent_buffer(buf, &val, 0, csum_size);
234                         printk(KERN_INFO "btrfs: %s checksum verify failed "
235                                "on %llu wanted %X found %X level %d\n",
236                                root->fs_info->sb->s_id,
237                                buf->start, val, found, btrfs_header_level(buf));
238                         if (result != (char *)&inline_result)
239                                 kfree(result);
240                         return 1;
241                 }
242         } else {
243                 write_extent_buffer(buf, result, 0, csum_size);
244         }
245         if (result != (char *)&inline_result)
246                 kfree(result);
247         return 0;
248 }
249
250 /*
251  * we can't consider a given block up to date unless the transid of the
252  * block matches the transid in the parent node's pointer.  This is how we
253  * detect blocks that either didn't get written at all or got written
254  * in the wrong place.
255  */
256 static int verify_parent_transid(struct extent_io_tree *io_tree,
257                                  struct extent_buffer *eb, u64 parent_transid)
258 {
259         int ret;
260
261         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
262                 return 0;
263
264         lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
265         if (extent_buffer_uptodate(io_tree, eb) &&
266             btrfs_header_generation(eb) == parent_transid) {
267                 ret = 0;
268                 goto out;
269         }
270         printk("parent transid verify failed on %llu wanted %llu found %llu\n",
271                (unsigned long long)eb->start,
272                (unsigned long long)parent_transid,
273                (unsigned long long)btrfs_header_generation(eb));
274         ret = 1;
275         clear_extent_buffer_uptodate(io_tree, eb);
276 out:
277         unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
278                       GFP_NOFS);
279         return ret;
280 }
281
282 /*
283  * helper to read a given tree block, doing retries as required when
284  * the checksums don't match and we have alternate mirrors to try.
285  */
286 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
287                                           struct extent_buffer *eb,
288                                           u64 start, u64 parent_transid)
289 {
290         struct extent_io_tree *io_tree;
291         int ret;
292         int num_copies = 0;
293         int mirror_num = 0;
294
295         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
296         while (1) {
297                 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
298                                                btree_get_extent, mirror_num);
299                 if (!ret &&
300                     !verify_parent_transid(io_tree, eb, parent_transid))
301                         return ret;
302
303                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
304                                               eb->start, eb->len);
305                 if (num_copies == 1)
306                         return ret;
307
308                 mirror_num++;
309                 if (mirror_num > num_copies)
310                         return ret;
311         }
312         return -EIO;
313 }
314
315 /*
316  * checksum a dirty tree block before IO.  This has extra checks to make sure
317  * we only fill in the checksum field in the first page of a multi-page block
318  */
319
320 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
321 {
322         struct extent_io_tree *tree;
323         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
324         u64 found_start;
325         int found_level;
326         unsigned long len;
327         struct extent_buffer *eb;
328         int ret;
329
330         tree = &BTRFS_I(page->mapping->host)->io_tree;
331
332         if (page->private == EXTENT_PAGE_PRIVATE)
333                 goto out;
334         if (!page->private)
335                 goto out;
336         len = page->private >> 2;
337         WARN_ON(len == 0);
338
339         eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
340         ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
341                                              btrfs_header_generation(eb));
342         BUG_ON(ret);
343         found_start = btrfs_header_bytenr(eb);
344         if (found_start != start) {
345                 WARN_ON(1);
346                 goto err;
347         }
348         if (eb->first_page != page) {
349                 WARN_ON(1);
350                 goto err;
351         }
352         if (!PageUptodate(page)) {
353                 WARN_ON(1);
354                 goto err;
355         }
356         found_level = btrfs_header_level(eb);
357
358         csum_tree_block(root, eb, 0);
359 err:
360         free_extent_buffer(eb);
361 out:
362         return 0;
363 }
364
365 static int check_tree_block_fsid(struct btrfs_root *root,
366                                  struct extent_buffer *eb)
367 {
368         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
369         u8 fsid[BTRFS_UUID_SIZE];
370         int ret = 1;
371
372         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
373                            BTRFS_FSID_SIZE);
374         while (fs_devices) {
375                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
376                         ret = 0;
377                         break;
378                 }
379                 fs_devices = fs_devices->seed;
380         }
381         return ret;
382 }
383
384 #ifdef CONFIG_DEBUG_LOCK_ALLOC
385 void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
386 {
387         lockdep_set_class_and_name(&eb->lock,
388                            &btrfs_eb_class[level],
389                            btrfs_eb_name[level]);
390 }
391 #endif
392
393 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
394                                struct extent_state *state)
395 {
396         struct extent_io_tree *tree;
397         u64 found_start;
398         int found_level;
399         unsigned long len;
400         struct extent_buffer *eb;
401         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
402         int ret = 0;
403
404         tree = &BTRFS_I(page->mapping->host)->io_tree;
405         if (page->private == EXTENT_PAGE_PRIVATE)
406                 goto out;
407         if (!page->private)
408                 goto out;
409
410         len = page->private >> 2;
411         WARN_ON(len == 0);
412
413         eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
414
415         found_start = btrfs_header_bytenr(eb);
416         if (found_start != start) {
417                 printk(KERN_INFO "btrfs bad tree block start %llu %llu\n",
418                        (unsigned long long)found_start,
419                        (unsigned long long)eb->start);
420                 ret = -EIO;
421                 goto err;
422         }
423         if (eb->first_page != page) {
424                 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
425                        eb->first_page->index, page->index);
426                 WARN_ON(1);
427                 ret = -EIO;
428                 goto err;
429         }
430         if (check_tree_block_fsid(root, eb)) {
431                 printk(KERN_INFO "btrfs bad fsid on block %llu\n",
432                        (unsigned long long)eb->start);
433                 ret = -EIO;
434                 goto err;
435         }
436         found_level = btrfs_header_level(eb);
437
438         btrfs_set_buffer_lockdep_class(eb, found_level);
439
440         ret = csum_tree_block(root, eb, 1);
441         if (ret)
442                 ret = -EIO;
443
444         end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
445         end = eb->start + end - 1;
446 err:
447         free_extent_buffer(eb);
448 out:
449         return ret;
450 }
451
452 static void end_workqueue_bio(struct bio *bio, int err)
453 {
454         struct end_io_wq *end_io_wq = bio->bi_private;
455         struct btrfs_fs_info *fs_info;
456
457         fs_info = end_io_wq->info;
458         end_io_wq->error = err;
459         end_io_wq->work.func = end_workqueue_fn;
460         end_io_wq->work.flags = 0;
461
462         if (bio->bi_rw & (1 << BIO_RW)) {
463                 if (end_io_wq->metadata)
464                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
465                                            &end_io_wq->work);
466                 else
467                         btrfs_queue_worker(&fs_info->endio_write_workers,
468                                            &end_io_wq->work);
469         } else {
470                 if (end_io_wq->metadata)
471                         btrfs_queue_worker(&fs_info->endio_meta_workers,
472                                            &end_io_wq->work);
473                 else
474                         btrfs_queue_worker(&fs_info->endio_workers,
475                                            &end_io_wq->work);
476         }
477 }
478
479 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
480                         int metadata)
481 {
482         struct end_io_wq *end_io_wq;
483         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
484         if (!end_io_wq)
485                 return -ENOMEM;
486
487         end_io_wq->private = bio->bi_private;
488         end_io_wq->end_io = bio->bi_end_io;
489         end_io_wq->info = info;
490         end_io_wq->error = 0;
491         end_io_wq->bio = bio;
492         end_io_wq->metadata = metadata;
493
494         bio->bi_private = end_io_wq;
495         bio->bi_end_io = end_workqueue_bio;
496         return 0;
497 }
498
499 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
500 {
501         unsigned long limit = min_t(unsigned long,
502                                     info->workers.max_workers,
503                                     info->fs_devices->open_devices);
504         return 256 * limit;
505 }
506
507 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
508 {
509         return atomic_read(&info->nr_async_bios) >
510                 btrfs_async_submit_limit(info);
511 }
512
513 static void run_one_async_start(struct btrfs_work *work)
514 {
515         struct btrfs_fs_info *fs_info;
516         struct async_submit_bio *async;
517
518         async = container_of(work, struct  async_submit_bio, work);
519         fs_info = BTRFS_I(async->inode)->root->fs_info;
520         async->submit_bio_start(async->inode, async->rw, async->bio,
521                                async->mirror_num, async->bio_flags);
522 }
523
524 static void run_one_async_done(struct btrfs_work *work)
525 {
526         struct btrfs_fs_info *fs_info;
527         struct async_submit_bio *async;
528         int limit;
529
530         async = container_of(work, struct  async_submit_bio, work);
531         fs_info = BTRFS_I(async->inode)->root->fs_info;
532
533         limit = btrfs_async_submit_limit(fs_info);
534         limit = limit * 2 / 3;
535
536         atomic_dec(&fs_info->nr_async_submits);
537
538         if (atomic_read(&fs_info->nr_async_submits) < limit &&
539             waitqueue_active(&fs_info->async_submit_wait))
540                 wake_up(&fs_info->async_submit_wait);
541
542         async->submit_bio_done(async->inode, async->rw, async->bio,
543                                async->mirror_num, async->bio_flags);
544 }
545
546 static void run_one_async_free(struct btrfs_work *work)
547 {
548         struct async_submit_bio *async;
549
550         async = container_of(work, struct  async_submit_bio, work);
551         kfree(async);
552 }
553
554 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
555                         int rw, struct bio *bio, int mirror_num,
556                         unsigned long bio_flags,
557                         extent_submit_bio_hook_t *submit_bio_start,
558                         extent_submit_bio_hook_t *submit_bio_done)
559 {
560         struct async_submit_bio *async;
561
562         async = kmalloc(sizeof(*async), GFP_NOFS);
563         if (!async)
564                 return -ENOMEM;
565
566         async->inode = inode;
567         async->rw = rw;
568         async->bio = bio;
569         async->mirror_num = mirror_num;
570         async->submit_bio_start = submit_bio_start;
571         async->submit_bio_done = submit_bio_done;
572
573         async->work.func = run_one_async_start;
574         async->work.ordered_func = run_one_async_done;
575         async->work.ordered_free = run_one_async_free;
576
577         async->work.flags = 0;
578         async->bio_flags = bio_flags;
579
580         atomic_inc(&fs_info->nr_async_submits);
581         btrfs_queue_worker(&fs_info->workers, &async->work);
582 #if 0
583         int limit = btrfs_async_submit_limit(fs_info);
584         if (atomic_read(&fs_info->nr_async_submits) > limit) {
585                 wait_event_timeout(fs_info->async_submit_wait,
586                            (atomic_read(&fs_info->nr_async_submits) < limit),
587                            HZ/10);
588
589                 wait_event_timeout(fs_info->async_submit_wait,
590                            (atomic_read(&fs_info->nr_async_bios) < limit),
591                            HZ/10);
592         }
593 #endif
594         while (atomic_read(&fs_info->async_submit_draining) &&
595               atomic_read(&fs_info->nr_async_submits)) {
596                 wait_event(fs_info->async_submit_wait,
597                            (atomic_read(&fs_info->nr_async_submits) == 0));
598         }
599
600         return 0;
601 }
602
603 static int btree_csum_one_bio(struct bio *bio)
604 {
605         struct bio_vec *bvec = bio->bi_io_vec;
606         int bio_index = 0;
607         struct btrfs_root *root;
608
609         WARN_ON(bio->bi_vcnt <= 0);
610         while (bio_index < bio->bi_vcnt) {
611                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
612                 csum_dirty_buffer(root, bvec->bv_page);
613                 bio_index++;
614                 bvec++;
615         }
616         return 0;
617 }
618
619 static int __btree_submit_bio_start(struct inode *inode, int rw,
620                                     struct bio *bio, int mirror_num,
621                                     unsigned long bio_flags)
622 {
623         /*
624          * when we're called for a write, we're already in the async
625          * submission context.  Just jump into btrfs_map_bio
626          */
627         btree_csum_one_bio(bio);
628         return 0;
629 }
630
631 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
632                                  int mirror_num, unsigned long bio_flags)
633 {
634         /*
635          * when we're called for a write, we're already in the async
636          * submission context.  Just jump into btrfs_map_bio
637          */
638         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
639 }
640
641 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
642                                  int mirror_num, unsigned long bio_flags)
643 {
644         int ret;
645
646         ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
647                                           bio, 1);
648         BUG_ON(ret);
649
650         if (!(rw & (1 << BIO_RW))) {
651                 /*
652                  * called for a read, do the setup so that checksum validation
653                  * can happen in the async kernel threads
654                  */
655                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
656                                      mirror_num, 0);
657         }
658         /*
659          * kthread helpers are used to submit writes so that checksumming
660          * can happen in parallel across all CPUs
661          */
662         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
663                                    inode, rw, bio, mirror_num, 0,
664                                    __btree_submit_bio_start,
665                                    __btree_submit_bio_done);
666 }
667
668 static int btree_writepage(struct page *page, struct writeback_control *wbc)
669 {
670         struct extent_io_tree *tree;
671         tree = &BTRFS_I(page->mapping->host)->io_tree;
672
673         if (current->flags & PF_MEMALLOC) {
674                 redirty_page_for_writepage(wbc, page);
675                 unlock_page(page);
676                 return 0;
677         }
678         return extent_write_full_page(tree, page, btree_get_extent, wbc);
679 }
680
681 static int btree_writepages(struct address_space *mapping,
682                             struct writeback_control *wbc)
683 {
684         struct extent_io_tree *tree;
685         tree = &BTRFS_I(mapping->host)->io_tree;
686         if (wbc->sync_mode == WB_SYNC_NONE) {
687                 u64 num_dirty;
688                 u64 start = 0;
689                 unsigned long thresh = 32 * 1024 * 1024;
690
691                 if (wbc->for_kupdate)
692                         return 0;
693
694                 num_dirty = count_range_bits(tree, &start, (u64)-1,
695                                              thresh, EXTENT_DIRTY);
696                 if (num_dirty < thresh)
697                         return 0;
698         }
699         return extent_writepages(tree, mapping, btree_get_extent, wbc);
700 }
701
702 static int btree_readpage(struct file *file, struct page *page)
703 {
704         struct extent_io_tree *tree;
705         tree = &BTRFS_I(page->mapping->host)->io_tree;
706         return extent_read_full_page(tree, page, btree_get_extent);
707 }
708
709 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
710 {
711         struct extent_io_tree *tree;
712         struct extent_map_tree *map;
713         int ret;
714
715         if (PageWriteback(page) || PageDirty(page))
716                 return 0;
717
718         tree = &BTRFS_I(page->mapping->host)->io_tree;
719         map = &BTRFS_I(page->mapping->host)->extent_tree;
720
721         ret = try_release_extent_state(map, tree, page, gfp_flags);
722         if (!ret)
723                 return 0;
724
725         ret = try_release_extent_buffer(tree, page);
726         if (ret == 1) {
727                 ClearPagePrivate(page);
728                 set_page_private(page, 0);
729                 page_cache_release(page);
730         }
731
732         return ret;
733 }
734
735 static void btree_invalidatepage(struct page *page, unsigned long offset)
736 {
737         struct extent_io_tree *tree;
738         tree = &BTRFS_I(page->mapping->host)->io_tree;
739         extent_invalidatepage(tree, page, offset);
740         btree_releasepage(page, GFP_NOFS);
741         if (PagePrivate(page)) {
742                 printk(KERN_WARNING "btrfs warning page private not zero "
743                        "on page %llu\n", (unsigned long long)page_offset(page));
744                 ClearPagePrivate(page);
745                 set_page_private(page, 0);
746                 page_cache_release(page);
747         }
748 }
749
750 #if 0
751 static int btree_writepage(struct page *page, struct writeback_control *wbc)
752 {
753         struct buffer_head *bh;
754         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
755         struct buffer_head *head;
756         if (!page_has_buffers(page)) {
757                 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
758                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
759         }
760         head = page_buffers(page);
761         bh = head;
762         do {
763                 if (buffer_dirty(bh))
764                         csum_tree_block(root, bh, 0);
765                 bh = bh->b_this_page;
766         } while (bh != head);
767         return block_write_full_page(page, btree_get_block, wbc);
768 }
769 #endif
770
771 static struct address_space_operations btree_aops = {
772         .readpage       = btree_readpage,
773         .writepage      = btree_writepage,
774         .writepages     = btree_writepages,
775         .releasepage    = btree_releasepage,
776         .invalidatepage = btree_invalidatepage,
777         .sync_page      = block_sync_page,
778 };
779
780 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
781                          u64 parent_transid)
782 {
783         struct extent_buffer *buf = NULL;
784         struct inode *btree_inode = root->fs_info->btree_inode;
785         int ret = 0;
786
787         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
788         if (!buf)
789                 return 0;
790         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
791                                  buf, 0, 0, btree_get_extent, 0);
792         free_extent_buffer(buf);
793         return ret;
794 }
795
796 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
797                                             u64 bytenr, u32 blocksize)
798 {
799         struct inode *btree_inode = root->fs_info->btree_inode;
800         struct extent_buffer *eb;
801         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
802                                 bytenr, blocksize, GFP_NOFS);
803         return eb;
804 }
805
806 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
807                                                  u64 bytenr, u32 blocksize)
808 {
809         struct inode *btree_inode = root->fs_info->btree_inode;
810         struct extent_buffer *eb;
811
812         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
813                                  bytenr, blocksize, NULL, GFP_NOFS);
814         return eb;
815 }
816
817
818 int btrfs_write_tree_block(struct extent_buffer *buf)
819 {
820         return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
821                                       buf->start + buf->len - 1, WB_SYNC_ALL);
822 }
823
824 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
825 {
826         return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
827                                   buf->start, buf->start + buf->len - 1);
828 }
829
830 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
831                                       u32 blocksize, u64 parent_transid)
832 {
833         struct extent_buffer *buf = NULL;
834         struct inode *btree_inode = root->fs_info->btree_inode;
835         struct extent_io_tree *io_tree;
836         int ret;
837
838         io_tree = &BTRFS_I(btree_inode)->io_tree;
839
840         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
841         if (!buf)
842                 return NULL;
843
844         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
845
846         if (ret == 0)
847                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
848         else
849                 WARN_ON(1);
850         return buf;
851
852 }
853
854 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
855                      struct extent_buffer *buf)
856 {
857         struct inode *btree_inode = root->fs_info->btree_inode;
858         if (btrfs_header_generation(buf) ==
859             root->fs_info->running_transaction->transid) {
860                 btrfs_assert_tree_locked(buf);
861
862                 /* ugh, clear_extent_buffer_dirty can be expensive */
863                 btrfs_set_lock_blocking(buf);
864
865                 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
866                                           buf);
867         }
868         return 0;
869 }
870
871 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
872                         u32 stripesize, struct btrfs_root *root,
873                         struct btrfs_fs_info *fs_info,
874                         u64 objectid)
875 {
876         root->node = NULL;
877         root->commit_root = NULL;
878         root->ref_tree = NULL;
879         root->sectorsize = sectorsize;
880         root->nodesize = nodesize;
881         root->leafsize = leafsize;
882         root->stripesize = stripesize;
883         root->ref_cows = 0;
884         root->track_dirty = 0;
885
886         root->fs_info = fs_info;
887         root->objectid = objectid;
888         root->last_trans = 0;
889         root->highest_inode = 0;
890         root->last_inode_alloc = 0;
891         root->name = NULL;
892         root->in_sysfs = 0;
893
894         INIT_LIST_HEAD(&root->dirty_list);
895         INIT_LIST_HEAD(&root->orphan_list);
896         INIT_LIST_HEAD(&root->dead_list);
897         spin_lock_init(&root->node_lock);
898         spin_lock_init(&root->list_lock);
899         mutex_init(&root->objectid_mutex);
900         mutex_init(&root->log_mutex);
901         init_waitqueue_head(&root->log_writer_wait);
902         init_waitqueue_head(&root->log_commit_wait[0]);
903         init_waitqueue_head(&root->log_commit_wait[1]);
904         atomic_set(&root->log_commit[0], 0);
905         atomic_set(&root->log_commit[1], 0);
906         atomic_set(&root->log_writers, 0);
907         root->log_batch = 0;
908         root->log_transid = 0;
909         extent_io_tree_init(&root->dirty_log_pages,
910                              fs_info->btree_inode->i_mapping, GFP_NOFS);
911
912         btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
913         root->ref_tree = &root->ref_tree_struct;
914
915         memset(&root->root_key, 0, sizeof(root->root_key));
916         memset(&root->root_item, 0, sizeof(root->root_item));
917         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
918         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
919         root->defrag_trans_start = fs_info->generation;
920         init_completion(&root->kobj_unregister);
921         root->defrag_running = 0;
922         root->defrag_level = 0;
923         root->root_key.objectid = objectid;
924         root->anon_super.s_root = NULL;
925         root->anon_super.s_dev = 0;
926         INIT_LIST_HEAD(&root->anon_super.s_list);
927         INIT_LIST_HEAD(&root->anon_super.s_instances);
928         init_rwsem(&root->anon_super.s_umount);
929
930         return 0;
931 }
932
933 static int find_and_setup_root(struct btrfs_root *tree_root,
934                                struct btrfs_fs_info *fs_info,
935                                u64 objectid,
936                                struct btrfs_root *root)
937 {
938         int ret;
939         u32 blocksize;
940         u64 generation;
941
942         __setup_root(tree_root->nodesize, tree_root->leafsize,
943                      tree_root->sectorsize, tree_root->stripesize,
944                      root, fs_info, objectid);
945         ret = btrfs_find_last_root(tree_root, objectid,
946                                    &root->root_item, &root->root_key);
947         BUG_ON(ret);
948
949         generation = btrfs_root_generation(&root->root_item);
950         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
951         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
952                                      blocksize, generation);
953         BUG_ON(!root->node);
954         return 0;
955 }
956
957 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
958                              struct btrfs_fs_info *fs_info)
959 {
960         struct extent_buffer *eb;
961         struct btrfs_root *log_root_tree = fs_info->log_root_tree;
962         u64 start = 0;
963         u64 end = 0;
964         int ret;
965
966         if (!log_root_tree)
967                 return 0;
968
969         while (1) {
970                 ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
971                                     0, &start, &end, EXTENT_DIRTY);
972                 if (ret)
973                         break;
974
975                 clear_extent_dirty(&log_root_tree->dirty_log_pages,
976                                    start, end, GFP_NOFS);
977         }
978         eb = fs_info->log_root_tree->node;
979
980         WARN_ON(btrfs_header_level(eb) != 0);
981         WARN_ON(btrfs_header_nritems(eb) != 0);
982
983         ret = btrfs_free_reserved_extent(fs_info->tree_root,
984                                 eb->start, eb->len);
985         BUG_ON(ret);
986
987         free_extent_buffer(eb);
988         kfree(fs_info->log_root_tree);
989         fs_info->log_root_tree = NULL;
990         return 0;
991 }
992
993 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
994                                          struct btrfs_fs_info *fs_info)
995 {
996         struct btrfs_root *root;
997         struct btrfs_root *tree_root = fs_info->tree_root;
998         struct extent_buffer *leaf;
999
1000         root = kzalloc(sizeof(*root), GFP_NOFS);
1001         if (!root)
1002                 return ERR_PTR(-ENOMEM);
1003
1004         __setup_root(tree_root->nodesize, tree_root->leafsize,
1005                      tree_root->sectorsize, tree_root->stripesize,
1006                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1007
1008         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1009         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1010         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1011         /*
1012          * log trees do not get reference counted because they go away
1013          * before a real commit is actually done.  They do store pointers
1014          * to file data extents, and those reference counts still get
1015          * updated (along with back refs to the log tree).
1016          */
1017         root->ref_cows = 0;
1018
1019         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1020                                       0, BTRFS_TREE_LOG_OBJECTID,
1021                                       trans->transid, 0, 0, 0);
1022         if (IS_ERR(leaf)) {
1023                 kfree(root);
1024                 return ERR_CAST(leaf);
1025         }
1026
1027         root->node = leaf;
1028         btrfs_set_header_nritems(root->node, 0);
1029         btrfs_set_header_level(root->node, 0);
1030         btrfs_set_header_bytenr(root->node, root->node->start);
1031         btrfs_set_header_generation(root->node, trans->transid);
1032         btrfs_set_header_owner(root->node, BTRFS_TREE_LOG_OBJECTID);
1033
1034         write_extent_buffer(root->node, root->fs_info->fsid,
1035                             (unsigned long)btrfs_header_fsid(root->node),
1036                             BTRFS_FSID_SIZE);
1037         btrfs_mark_buffer_dirty(root->node);
1038         btrfs_tree_unlock(root->node);
1039         return root;
1040 }
1041
1042 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1043                              struct btrfs_fs_info *fs_info)
1044 {
1045         struct btrfs_root *log_root;
1046
1047         log_root = alloc_log_tree(trans, fs_info);
1048         if (IS_ERR(log_root))
1049                 return PTR_ERR(log_root);
1050         WARN_ON(fs_info->log_root_tree);
1051         fs_info->log_root_tree = log_root;
1052         return 0;
1053 }
1054
1055 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1056                        struct btrfs_root *root)
1057 {
1058         struct btrfs_root *log_root;
1059         struct btrfs_inode_item *inode_item;
1060
1061         log_root = alloc_log_tree(trans, root->fs_info);
1062         if (IS_ERR(log_root))
1063                 return PTR_ERR(log_root);
1064
1065         log_root->last_trans = trans->transid;
1066         log_root->root_key.offset = root->root_key.objectid;
1067
1068         inode_item = &log_root->root_item.inode;
1069         inode_item->generation = cpu_to_le64(1);
1070         inode_item->size = cpu_to_le64(3);
1071         inode_item->nlink = cpu_to_le32(1);
1072         inode_item->nbytes = cpu_to_le64(root->leafsize);
1073         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1074
1075         btrfs_set_root_bytenr(&log_root->root_item, log_root->node->start);
1076         btrfs_set_root_generation(&log_root->root_item, trans->transid);
1077
1078         WARN_ON(root->log_root);
1079         root->log_root = log_root;
1080         root->log_transid = 0;
1081         return 0;
1082 }
1083
1084 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1085                                                struct btrfs_key *location)
1086 {
1087         struct btrfs_root *root;
1088         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1089         struct btrfs_path *path;
1090         struct extent_buffer *l;
1091         u64 highest_inode;
1092         u64 generation;
1093         u32 blocksize;
1094         int ret = 0;
1095
1096         root = kzalloc(sizeof(*root), GFP_NOFS);
1097         if (!root)
1098                 return ERR_PTR(-ENOMEM);
1099         if (location->offset == (u64)-1) {
1100                 ret = find_and_setup_root(tree_root, fs_info,
1101                                           location->objectid, root);
1102                 if (ret) {
1103                         kfree(root);
1104                         return ERR_PTR(ret);
1105                 }
1106                 goto insert;
1107         }
1108
1109         __setup_root(tree_root->nodesize, tree_root->leafsize,
1110                      tree_root->sectorsize, tree_root->stripesize,
1111                      root, fs_info, location->objectid);
1112
1113         path = btrfs_alloc_path();
1114         BUG_ON(!path);
1115         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1116         if (ret != 0) {
1117                 if (ret > 0)
1118                         ret = -ENOENT;
1119                 goto out;
1120         }
1121         l = path->nodes[0];
1122         read_extent_buffer(l, &root->root_item,
1123                btrfs_item_ptr_offset(l, path->slots[0]),
1124                sizeof(root->root_item));
1125         memcpy(&root->root_key, location, sizeof(*location));
1126         ret = 0;
1127 out:
1128         btrfs_release_path(root, path);
1129         btrfs_free_path(path);
1130         if (ret) {
1131                 kfree(root);
1132                 return ERR_PTR(ret);
1133         }
1134         generation = btrfs_root_generation(&root->root_item);
1135         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1136         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1137                                      blocksize, generation);
1138         BUG_ON(!root->node);
1139 insert:
1140         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1141                 root->ref_cows = 1;
1142                 ret = btrfs_find_highest_inode(root, &highest_inode);
1143                 if (ret == 0) {
1144                         root->highest_inode = highest_inode;
1145                         root->last_inode_alloc = highest_inode;
1146                 }
1147         }
1148         return root;
1149 }
1150
1151 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1152                                         u64 root_objectid)
1153 {
1154         struct btrfs_root *root;
1155
1156         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
1157                 return fs_info->tree_root;
1158         if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
1159                 return fs_info->extent_root;
1160
1161         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1162                                  (unsigned long)root_objectid);
1163         return root;
1164 }
1165
1166 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1167                                               struct btrfs_key *location)
1168 {
1169         struct btrfs_root *root;
1170         int ret;
1171
1172         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1173                 return fs_info->tree_root;
1174         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1175                 return fs_info->extent_root;
1176         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1177                 return fs_info->chunk_root;
1178         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1179                 return fs_info->dev_root;
1180         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1181                 return fs_info->csum_root;
1182
1183         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1184                                  (unsigned long)location->objectid);
1185         if (root)
1186                 return root;
1187
1188         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1189         if (IS_ERR(root))
1190                 return root;
1191
1192         set_anon_super(&root->anon_super, NULL);
1193
1194         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1195                                 (unsigned long)root->root_key.objectid,
1196                                 root);
1197         if (ret) {
1198                 free_extent_buffer(root->node);
1199                 kfree(root);
1200                 return ERR_PTR(ret);
1201         }
1202         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
1203                 ret = btrfs_find_dead_roots(fs_info->tree_root,
1204                                             root->root_key.objectid, root);
1205                 BUG_ON(ret);
1206                 btrfs_orphan_cleanup(root);
1207         }
1208         return root;
1209 }
1210
1211 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1212                                       struct btrfs_key *location,
1213                                       const char *name, int namelen)
1214 {
1215         struct btrfs_root *root;
1216         int ret;
1217
1218         root = btrfs_read_fs_root_no_name(fs_info, location);
1219         if (!root)
1220                 return NULL;
1221
1222         if (root->in_sysfs)
1223                 return root;
1224
1225         ret = btrfs_set_root_name(root, name, namelen);
1226         if (ret) {
1227                 free_extent_buffer(root->node);
1228                 kfree(root);
1229                 return ERR_PTR(ret);
1230         }
1231 #if 0
1232         ret = btrfs_sysfs_add_root(root);
1233         if (ret) {
1234                 free_extent_buffer(root->node);
1235                 kfree(root->name);
1236                 kfree(root);
1237                 return ERR_PTR(ret);
1238         }
1239 #endif
1240         root->in_sysfs = 1;
1241         return root;
1242 }
1243
1244 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1245 {
1246         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1247         int ret = 0;
1248         struct btrfs_device *device;
1249         struct backing_dev_info *bdi;
1250 #if 0
1251         if ((bdi_bits & (1 << BDI_write_congested)) &&
1252             btrfs_congested_async(info, 0))
1253                 return 1;
1254 #endif
1255         list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1256                 if (!device->bdev)
1257                         continue;
1258                 bdi = blk_get_backing_dev_info(device->bdev);
1259                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1260                         ret = 1;
1261                         break;
1262                 }
1263         }
1264         return ret;
1265 }
1266
1267 /*
1268  * this unplugs every device on the box, and it is only used when page
1269  * is null
1270  */
1271 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1272 {
1273         struct btrfs_device *device;
1274         struct btrfs_fs_info *info;
1275
1276         info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1277         list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1278                 if (!device->bdev)
1279                         continue;
1280
1281                 bdi = blk_get_backing_dev_info(device->bdev);
1282                 if (bdi->unplug_io_fn)
1283                         bdi->unplug_io_fn(bdi, page);
1284         }
1285 }
1286
1287 static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1288 {
1289         struct inode *inode;
1290         struct extent_map_tree *em_tree;
1291         struct extent_map *em;
1292         struct address_space *mapping;
1293         u64 offset;
1294
1295         /* the generic O_DIRECT read code does this */
1296         if (1 || !page) {
1297                 __unplug_io_fn(bdi, page);
1298                 return;
1299         }
1300
1301         /*
1302          * page->mapping may change at any time.  Get a consistent copy
1303          * and use that for everything below
1304          */
1305         smp_mb();
1306         mapping = page->mapping;
1307         if (!mapping)
1308                 return;
1309
1310         inode = mapping->host;
1311
1312         /*
1313          * don't do the expensive searching for a small number of
1314          * devices
1315          */
1316         if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1317                 __unplug_io_fn(bdi, page);
1318                 return;
1319         }
1320
1321         offset = page_offset(page);
1322
1323         em_tree = &BTRFS_I(inode)->extent_tree;
1324         spin_lock(&em_tree->lock);
1325         em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1326         spin_unlock(&em_tree->lock);
1327         if (!em) {
1328                 __unplug_io_fn(bdi, page);
1329                 return;
1330         }
1331
1332         if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1333                 free_extent_map(em);
1334                 __unplug_io_fn(bdi, page);
1335                 return;
1336         }
1337         offset = offset - em->start;
1338         btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1339                           em->block_start + offset, page);
1340         free_extent_map(em);
1341 }
1342
1343 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1344 {
1345         bdi_init(bdi);
1346         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1347         bdi->state              = 0;
1348         bdi->capabilities       = default_backing_dev_info.capabilities;
1349         bdi->unplug_io_fn       = btrfs_unplug_io_fn;
1350         bdi->unplug_io_data     = info;
1351         bdi->congested_fn       = btrfs_congested_fn;
1352         bdi->congested_data     = info;
1353         return 0;
1354 }
1355
1356 static int bio_ready_for_csum(struct bio *bio)
1357 {
1358         u64 length = 0;
1359         u64 buf_len = 0;
1360         u64 start = 0;
1361         struct page *page;
1362         struct extent_io_tree *io_tree = NULL;
1363         struct btrfs_fs_info *info = NULL;
1364         struct bio_vec *bvec;
1365         int i;
1366         int ret;
1367
1368         bio_for_each_segment(bvec, bio, i) {
1369                 page = bvec->bv_page;
1370                 if (page->private == EXTENT_PAGE_PRIVATE) {
1371                         length += bvec->bv_len;
1372                         continue;
1373                 }
1374                 if (!page->private) {
1375                         length += bvec->bv_len;
1376                         continue;
1377                 }
1378                 length = bvec->bv_len;
1379                 buf_len = page->private >> 2;
1380                 start = page_offset(page) + bvec->bv_offset;
1381                 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1382                 info = BTRFS_I(page->mapping->host)->root->fs_info;
1383         }
1384         /* are we fully contained in this bio? */
1385         if (buf_len <= length)
1386                 return 1;
1387
1388         ret = extent_range_uptodate(io_tree, start + length,
1389                                     start + buf_len - 1);
1390         if (ret == 1)
1391                 return ret;
1392         return ret;
1393 }
1394
1395 /*
1396  * called by the kthread helper functions to finally call the bio end_io
1397  * functions.  This is where read checksum verification actually happens
1398  */
1399 static void end_workqueue_fn(struct btrfs_work *work)
1400 {
1401         struct bio *bio;
1402         struct end_io_wq *end_io_wq;
1403         struct btrfs_fs_info *fs_info;
1404         int error;
1405
1406         end_io_wq = container_of(work, struct end_io_wq, work);
1407         bio = end_io_wq->bio;
1408         fs_info = end_io_wq->info;
1409
1410         /* metadata bio reads are special because the whole tree block must
1411          * be checksummed at once.  This makes sure the entire block is in
1412          * ram and up to date before trying to verify things.  For
1413          * blocksize <= pagesize, it is basically a noop
1414          */
1415         if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1416             !bio_ready_for_csum(bio)) {
1417                 btrfs_queue_worker(&fs_info->endio_meta_workers,
1418                                    &end_io_wq->work);
1419                 return;
1420         }
1421         error = end_io_wq->error;
1422         bio->bi_private = end_io_wq->private;
1423         bio->bi_end_io = end_io_wq->end_io;
1424         kfree(end_io_wq);
1425         bio_endio(bio, error);
1426 }
1427
1428 static int cleaner_kthread(void *arg)
1429 {
1430         struct btrfs_root *root = arg;
1431
1432         do {
1433                 smp_mb();
1434                 if (root->fs_info->closing)
1435                         break;
1436
1437                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1438                 mutex_lock(&root->fs_info->cleaner_mutex);
1439                 btrfs_clean_old_snapshots(root);
1440                 mutex_unlock(&root->fs_info->cleaner_mutex);
1441
1442                 if (freezing(current)) {
1443                         refrigerator();
1444                 } else {
1445                         smp_mb();
1446                         if (root->fs_info->closing)
1447                                 break;
1448                         set_current_state(TASK_INTERRUPTIBLE);
1449                         schedule();
1450                         __set_current_state(TASK_RUNNING);
1451                 }
1452         } while (!kthread_should_stop());
1453         return 0;
1454 }
1455
1456 static int transaction_kthread(void *arg)
1457 {
1458         struct btrfs_root *root = arg;
1459         struct btrfs_trans_handle *trans;
1460         struct btrfs_transaction *cur;
1461         unsigned long now;
1462         unsigned long delay;
1463         int ret;
1464
1465         do {
1466                 smp_mb();
1467                 if (root->fs_info->closing)
1468                         break;
1469
1470                 delay = HZ * 30;
1471                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1472                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1473
1474                 mutex_lock(&root->fs_info->trans_mutex);
1475                 cur = root->fs_info->running_transaction;
1476                 if (!cur) {
1477                         mutex_unlock(&root->fs_info->trans_mutex);
1478                         goto sleep;
1479                 }
1480
1481                 now = get_seconds();
1482                 if (now < cur->start_time || now - cur->start_time < 30) {
1483                         mutex_unlock(&root->fs_info->trans_mutex);
1484                         delay = HZ * 5;
1485                         goto sleep;
1486                 }
1487                 mutex_unlock(&root->fs_info->trans_mutex);
1488                 trans = btrfs_start_transaction(root, 1);
1489                 ret = btrfs_commit_transaction(trans, root);
1490
1491 sleep:
1492                 wake_up_process(root->fs_info->cleaner_kthread);
1493                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1494
1495                 if (freezing(current)) {
1496                         refrigerator();
1497                 } else {
1498                         if (root->fs_info->closing)
1499                                 break;
1500                         set_current_state(TASK_INTERRUPTIBLE);
1501                         schedule_timeout(delay);
1502                         __set_current_state(TASK_RUNNING);
1503                 }
1504         } while (!kthread_should_stop());
1505         return 0;
1506 }
1507
1508 struct btrfs_root *open_ctree(struct super_block *sb,
1509                               struct btrfs_fs_devices *fs_devices,
1510                               char *options)
1511 {
1512         u32 sectorsize;
1513         u32 nodesize;
1514         u32 leafsize;
1515         u32 blocksize;
1516         u32 stripesize;
1517         u64 generation;
1518         u64 features;
1519         struct btrfs_key location;
1520         struct buffer_head *bh;
1521         struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1522                                                  GFP_NOFS);
1523         struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1524                                                  GFP_NOFS);
1525         struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1526                                                GFP_NOFS);
1527         struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1528                                                 GFP_NOFS);
1529         struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1530                                                 GFP_NOFS);
1531         struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1532                                               GFP_NOFS);
1533         struct btrfs_root *log_tree_root;
1534
1535         int ret;
1536         int err = -EINVAL;
1537
1538         struct btrfs_super_block *disk_super;
1539
1540         if (!extent_root || !tree_root || !fs_info ||
1541             !chunk_root || !dev_root || !csum_root) {
1542                 err = -ENOMEM;
1543                 goto fail;
1544         }
1545         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1546         INIT_LIST_HEAD(&fs_info->trans_list);
1547         INIT_LIST_HEAD(&fs_info->dead_roots);
1548         INIT_LIST_HEAD(&fs_info->hashers);
1549         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1550         spin_lock_init(&fs_info->delalloc_lock);
1551         spin_lock_init(&fs_info->new_trans_lock);
1552         spin_lock_init(&fs_info->ref_cache_lock);
1553
1554         init_completion(&fs_info->kobj_unregister);
1555         fs_info->tree_root = tree_root;
1556         fs_info->extent_root = extent_root;
1557         fs_info->csum_root = csum_root;
1558         fs_info->chunk_root = chunk_root;
1559         fs_info->dev_root = dev_root;
1560         fs_info->fs_devices = fs_devices;
1561         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1562         INIT_LIST_HEAD(&fs_info->space_info);
1563         btrfs_mapping_init(&fs_info->mapping_tree);
1564         atomic_set(&fs_info->nr_async_submits, 0);
1565         atomic_set(&fs_info->async_delalloc_pages, 0);
1566         atomic_set(&fs_info->async_submit_draining, 0);
1567         atomic_set(&fs_info->nr_async_bios, 0);
1568         atomic_set(&fs_info->throttles, 0);
1569         atomic_set(&fs_info->throttle_gen, 0);
1570         fs_info->sb = sb;
1571         fs_info->max_extent = (u64)-1;
1572         fs_info->max_inline = 8192 * 1024;
1573         setup_bdi(fs_info, &fs_info->bdi);
1574         fs_info->btree_inode = new_inode(sb);
1575         fs_info->btree_inode->i_ino = 1;
1576         fs_info->btree_inode->i_nlink = 1;
1577
1578         fs_info->thread_pool_size = min_t(unsigned long,
1579                                           num_online_cpus() + 2, 8);
1580
1581         INIT_LIST_HEAD(&fs_info->ordered_extents);
1582         spin_lock_init(&fs_info->ordered_extent_lock);
1583
1584         sb->s_blocksize = 4096;
1585         sb->s_blocksize_bits = blksize_bits(4096);
1586
1587         /*
1588          * we set the i_size on the btree inode to the max possible int.
1589          * the real end of the address space is determined by all of
1590          * the devices in the system
1591          */
1592         fs_info->btree_inode->i_size = OFFSET_MAX;
1593         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1594         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1595
1596         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1597                              fs_info->btree_inode->i_mapping,
1598                              GFP_NOFS);
1599         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1600                              GFP_NOFS);
1601
1602         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1603
1604         spin_lock_init(&fs_info->block_group_cache_lock);
1605         fs_info->block_group_cache_tree.rb_node = NULL;
1606
1607         extent_io_tree_init(&fs_info->pinned_extents,
1608                              fs_info->btree_inode->i_mapping, GFP_NOFS);
1609         fs_info->do_barriers = 1;
1610
1611         INIT_LIST_HEAD(&fs_info->dead_reloc_roots);
1612         btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree);
1613         btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree);
1614
1615         BTRFS_I(fs_info->btree_inode)->root = tree_root;
1616         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1617                sizeof(struct btrfs_key));
1618         insert_inode_hash(fs_info->btree_inode);
1619
1620         mutex_init(&fs_info->trans_mutex);
1621         mutex_init(&fs_info->tree_log_mutex);
1622         mutex_init(&fs_info->drop_mutex);
1623         mutex_init(&fs_info->pinned_mutex);
1624         mutex_init(&fs_info->chunk_mutex);
1625         mutex_init(&fs_info->transaction_kthread_mutex);
1626         mutex_init(&fs_info->cleaner_mutex);
1627         mutex_init(&fs_info->volume_mutex);
1628         mutex_init(&fs_info->tree_reloc_mutex);
1629         init_waitqueue_head(&fs_info->transaction_throttle);
1630         init_waitqueue_head(&fs_info->transaction_wait);
1631         init_waitqueue_head(&fs_info->async_submit_wait);
1632
1633         __setup_root(4096, 4096, 4096, 4096, tree_root,
1634                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
1635
1636
1637         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1638         if (!bh)
1639                 goto fail_iput;
1640
1641         memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1642         memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1643                sizeof(fs_info->super_for_commit));
1644         brelse(bh);
1645
1646         memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1647
1648         disk_super = &fs_info->super_copy;
1649         if (!btrfs_super_root(disk_super))
1650                 goto fail_iput;
1651
1652         ret = btrfs_parse_options(tree_root, options);
1653         if (ret) {
1654                 err = ret;
1655                 goto fail_iput;
1656         }
1657
1658         features = btrfs_super_incompat_flags(disk_super) &
1659                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
1660         if (features) {
1661                 printk(KERN_ERR "BTRFS: couldn't mount because of "
1662                        "unsupported optional features (%Lx).\n",
1663                        features);
1664                 err = -EINVAL;
1665                 goto fail_iput;
1666         }
1667
1668         features = btrfs_super_compat_ro_flags(disk_super) &
1669                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
1670         if (!(sb->s_flags & MS_RDONLY) && features) {
1671                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1672                        "unsupported option features (%Lx).\n",
1673                        features);
1674                 err = -EINVAL;
1675                 goto fail_iput;
1676         }
1677
1678         /*
1679          * we need to start all the end_io workers up front because the
1680          * queue work function gets called at interrupt time, and so it
1681          * cannot dynamically grow.
1682          */
1683         btrfs_init_workers(&fs_info->workers, "worker",
1684                            fs_info->thread_pool_size);
1685
1686         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1687                            fs_info->thread_pool_size);
1688
1689         btrfs_init_workers(&fs_info->submit_workers, "submit",
1690                            min_t(u64, fs_devices->num_devices,
1691                            fs_info->thread_pool_size));
1692
1693         /* a higher idle thresh on the submit workers makes it much more
1694          * likely that bios will be send down in a sane order to the
1695          * devices
1696          */
1697         fs_info->submit_workers.idle_thresh = 64;
1698
1699         fs_info->workers.idle_thresh = 16;
1700         fs_info->workers.ordered = 1;
1701
1702         fs_info->delalloc_workers.idle_thresh = 2;
1703         fs_info->delalloc_workers.ordered = 1;
1704
1705         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1706         btrfs_init_workers(&fs_info->endio_workers, "endio",
1707                            fs_info->thread_pool_size);
1708         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1709                            fs_info->thread_pool_size);
1710         btrfs_init_workers(&fs_info->endio_meta_write_workers,
1711                            "endio-meta-write", fs_info->thread_pool_size);
1712         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1713                            fs_info->thread_pool_size);
1714
1715         /*
1716          * endios are largely parallel and should have a very
1717          * low idle thresh
1718          */
1719         fs_info->endio_workers.idle_thresh = 4;
1720         fs_info->endio_meta_workers.idle_thresh = 4;
1721
1722         fs_info->endio_write_workers.idle_thresh = 64;
1723         fs_info->endio_meta_write_workers.idle_thresh = 64;
1724
1725         btrfs_start_workers(&fs_info->workers, 1);
1726         btrfs_start_workers(&fs_info->submit_workers, 1);
1727         btrfs_start_workers(&fs_info->delalloc_workers, 1);
1728         btrfs_start_workers(&fs_info->fixup_workers, 1);
1729         btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1730         btrfs_start_workers(&fs_info->endio_meta_workers,
1731                             fs_info->thread_pool_size);
1732         btrfs_start_workers(&fs_info->endio_meta_write_workers,
1733                             fs_info->thread_pool_size);
1734         btrfs_start_workers(&fs_info->endio_write_workers,
1735                             fs_info->thread_pool_size);
1736
1737         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1738         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1739                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1740
1741         nodesize = btrfs_super_nodesize(disk_super);
1742         leafsize = btrfs_super_leafsize(disk_super);
1743         sectorsize = btrfs_super_sectorsize(disk_super);
1744         stripesize = btrfs_super_stripesize(disk_super);
1745         tree_root->nodesize = nodesize;
1746         tree_root->leafsize = leafsize;
1747         tree_root->sectorsize = sectorsize;
1748         tree_root->stripesize = stripesize;
1749
1750         sb->s_blocksize = sectorsize;
1751         sb->s_blocksize_bits = blksize_bits(sectorsize);
1752
1753         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1754                     sizeof(disk_super->magic))) {
1755                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1756                 goto fail_sb_buffer;
1757         }
1758
1759         mutex_lock(&fs_info->chunk_mutex);
1760         ret = btrfs_read_sys_array(tree_root);
1761         mutex_unlock(&fs_info->chunk_mutex);
1762         if (ret) {
1763                 printk(KERN_WARNING "btrfs: failed to read the system "
1764                        "array on %s\n", sb->s_id);
1765                 goto fail_sys_array;
1766         }
1767
1768         blocksize = btrfs_level_size(tree_root,
1769                                      btrfs_super_chunk_root_level(disk_super));
1770         generation = btrfs_super_chunk_root_generation(disk_super);
1771
1772         __setup_root(nodesize, leafsize, sectorsize, stripesize,
1773                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1774
1775         chunk_root->node = read_tree_block(chunk_root,
1776                                            btrfs_super_chunk_root(disk_super),
1777                                            blocksize, generation);
1778         BUG_ON(!chunk_root->node);
1779
1780         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1781            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1782            BTRFS_UUID_SIZE);
1783
1784         mutex_lock(&fs_info->chunk_mutex);
1785         ret = btrfs_read_chunk_tree(chunk_root);
1786         mutex_unlock(&fs_info->chunk_mutex);
1787         if (ret) {
1788                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1789                        sb->s_id);
1790                 goto fail_chunk_root;
1791         }
1792
1793         btrfs_close_extra_devices(fs_devices);
1794
1795         blocksize = btrfs_level_size(tree_root,
1796                                      btrfs_super_root_level(disk_super));
1797         generation = btrfs_super_generation(disk_super);
1798
1799         tree_root->node = read_tree_block(tree_root,
1800                                           btrfs_super_root(disk_super),
1801                                           blocksize, generation);
1802         if (!tree_root->node)
1803                 goto fail_chunk_root;
1804
1805
1806         ret = find_and_setup_root(tree_root, fs_info,
1807                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1808         if (ret)
1809                 goto fail_tree_root;
1810         extent_root->track_dirty = 1;
1811
1812         ret = find_and_setup_root(tree_root, fs_info,
1813                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
1814         dev_root->track_dirty = 1;
1815         if (ret)
1816                 goto fail_extent_root;
1817
1818         ret = find_and_setup_root(tree_root, fs_info,
1819                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
1820         if (ret)
1821                 goto fail_extent_root;
1822
1823         csum_root->track_dirty = 1;
1824
1825         btrfs_read_block_groups(extent_root);
1826
1827         fs_info->generation = generation;
1828         fs_info->last_trans_committed = generation;
1829         fs_info->data_alloc_profile = (u64)-1;
1830         fs_info->metadata_alloc_profile = (u64)-1;
1831         fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1832         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1833                                                "btrfs-cleaner");
1834         if (IS_ERR(fs_info->cleaner_kthread))
1835                 goto fail_csum_root;
1836
1837         fs_info->transaction_kthread = kthread_run(transaction_kthread,
1838                                                    tree_root,
1839                                                    "btrfs-transaction");
1840         if (IS_ERR(fs_info->transaction_kthread))
1841                 goto fail_cleaner;
1842
1843         if (btrfs_super_log_root(disk_super) != 0) {
1844                 u64 bytenr = btrfs_super_log_root(disk_super);
1845
1846                 if (fs_devices->rw_devices == 0) {
1847                         printk(KERN_WARNING "Btrfs log replay required "
1848                                "on RO media\n");
1849                         err = -EIO;
1850                         goto fail_trans_kthread;
1851                 }
1852                 blocksize =
1853                      btrfs_level_size(tree_root,
1854                                       btrfs_super_log_root_level(disk_super));
1855
1856                 log_tree_root = kzalloc(sizeof(struct btrfs_root),
1857                                                       GFP_NOFS);
1858
1859                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1860                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1861
1862                 log_tree_root->node = read_tree_block(tree_root, bytenr,
1863                                                       blocksize,
1864                                                       generation + 1);
1865                 ret = btrfs_recover_log_trees(log_tree_root);
1866                 BUG_ON(ret);
1867
1868                 if (sb->s_flags & MS_RDONLY) {
1869                         ret =  btrfs_commit_super(tree_root);
1870                         BUG_ON(ret);
1871                 }
1872         }
1873
1874         if (!(sb->s_flags & MS_RDONLY)) {
1875                 ret = btrfs_cleanup_reloc_trees(tree_root);
1876                 BUG_ON(ret);
1877         }
1878
1879         location.objectid = BTRFS_FS_TREE_OBJECTID;
1880         location.type = BTRFS_ROOT_ITEM_KEY;
1881         location.offset = (u64)-1;
1882
1883         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
1884         if (!fs_info->fs_root)
1885                 goto fail_trans_kthread;
1886         return tree_root;
1887
1888 fail_trans_kthread:
1889         kthread_stop(fs_info->transaction_kthread);
1890 fail_cleaner:
1891         kthread_stop(fs_info->cleaner_kthread);
1892
1893         /*
1894          * make sure we're done with the btree inode before we stop our
1895          * kthreads
1896          */
1897         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1898         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1899
1900 fail_csum_root:
1901         free_extent_buffer(csum_root->node);
1902 fail_extent_root:
1903         free_extent_buffer(extent_root->node);
1904 fail_tree_root:
1905         free_extent_buffer(tree_root->node);
1906 fail_chunk_root:
1907         free_extent_buffer(chunk_root->node);
1908 fail_sys_array:
1909         free_extent_buffer(dev_root->node);
1910 fail_sb_buffer:
1911         btrfs_stop_workers(&fs_info->fixup_workers);
1912         btrfs_stop_workers(&fs_info->delalloc_workers);
1913         btrfs_stop_workers(&fs_info->workers);
1914         btrfs_stop_workers(&fs_info->endio_workers);
1915         btrfs_stop_workers(&fs_info->endio_meta_workers);
1916         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1917         btrfs_stop_workers(&fs_info->endio_write_workers);
1918         btrfs_stop_workers(&fs_info->submit_workers);
1919 fail_iput:
1920         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1921         iput(fs_info->btree_inode);
1922
1923         btrfs_close_devices(fs_info->fs_devices);
1924         btrfs_mapping_tree_free(&fs_info->mapping_tree);
1925         bdi_destroy(&fs_info->bdi);
1926
1927 fail:
1928         kfree(extent_root);
1929         kfree(tree_root);
1930         kfree(fs_info);
1931         kfree(chunk_root);
1932         kfree(dev_root);
1933         kfree(csum_root);
1934         return ERR_PTR(err);
1935 }
1936
1937 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1938 {
1939         char b[BDEVNAME_SIZE];
1940
1941         if (uptodate) {
1942                 set_buffer_uptodate(bh);
1943         } else {
1944                 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1945                         printk(KERN_WARNING "lost page write due to "
1946                                         "I/O error on %s\n",
1947                                        bdevname(bh->b_bdev, b));
1948                 }
1949                 /* note, we dont' set_buffer_write_io_error because we have
1950                  * our own ways of dealing with the IO errors
1951                  */
1952                 clear_buffer_uptodate(bh);
1953         }
1954         unlock_buffer(bh);
1955         put_bh(bh);
1956 }
1957
1958 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
1959 {
1960         struct buffer_head *bh;
1961         struct buffer_head *latest = NULL;
1962         struct btrfs_super_block *super;
1963         int i;
1964         u64 transid = 0;
1965         u64 bytenr;
1966
1967         /* we would like to check all the supers, but that would make
1968          * a btrfs mount succeed after a mkfs from a different FS.
1969          * So, we need to add a special mount option to scan for
1970          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1971          */
1972         for (i = 0; i < 1; i++) {
1973                 bytenr = btrfs_sb_offset(i);
1974                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
1975                         break;
1976                 bh = __bread(bdev, bytenr / 4096, 4096);
1977                 if (!bh)
1978                         continue;
1979
1980                 super = (struct btrfs_super_block *)bh->b_data;
1981                 if (btrfs_super_bytenr(super) != bytenr ||
1982                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
1983                             sizeof(super->magic))) {
1984                         brelse(bh);
1985                         continue;
1986                 }
1987
1988                 if (!latest || btrfs_super_generation(super) > transid) {
1989                         brelse(latest);
1990                         latest = bh;
1991                         transid = btrfs_super_generation(super);
1992                 } else {
1993                         brelse(bh);
1994                 }
1995         }
1996         return latest;
1997 }
1998
1999 static int write_dev_supers(struct btrfs_device *device,
2000                             struct btrfs_super_block *sb,
2001                             int do_barriers, int wait, int max_mirrors)
2002 {
2003         struct buffer_head *bh;
2004         int i;
2005         int ret;
2006         int errors = 0;
2007         u32 crc;
2008         u64 bytenr;
2009         int last_barrier = 0;
2010
2011         if (max_mirrors == 0)
2012                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2013
2014         /* make sure only the last submit_bh does a barrier */
2015         if (do_barriers) {
2016                 for (i = 0; i < max_mirrors; i++) {
2017                         bytenr = btrfs_sb_offset(i);
2018                         if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2019                             device->total_bytes)
2020                                 break;
2021                         last_barrier = i;
2022                 }
2023         }
2024
2025         for (i = 0; i < max_mirrors; i++) {
2026                 bytenr = btrfs_sb_offset(i);
2027                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2028                         break;
2029
2030                 if (wait) {
2031                         bh = __find_get_block(device->bdev, bytenr / 4096,
2032                                               BTRFS_SUPER_INFO_SIZE);
2033                         BUG_ON(!bh);
2034                         brelse(bh);
2035                         wait_on_buffer(bh);
2036                         if (buffer_uptodate(bh)) {
2037                                 brelse(bh);
2038                                 continue;
2039                         }
2040                 } else {
2041                         btrfs_set_super_bytenr(sb, bytenr);
2042
2043                         crc = ~(u32)0;
2044                         crc = btrfs_csum_data(NULL, (char *)sb +
2045                                               BTRFS_CSUM_SIZE, crc,
2046                                               BTRFS_SUPER_INFO_SIZE -
2047                                               BTRFS_CSUM_SIZE);
2048                         btrfs_csum_final(crc, sb->csum);
2049
2050                         bh = __getblk(device->bdev, bytenr / 4096,
2051                                       BTRFS_SUPER_INFO_SIZE);
2052                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2053
2054                         set_buffer_uptodate(bh);
2055                         get_bh(bh);
2056                         lock_buffer(bh);
2057                         bh->b_end_io = btrfs_end_buffer_write_sync;
2058                 }
2059
2060                 if (i == last_barrier && do_barriers && device->barriers) {
2061                         ret = submit_bh(WRITE_BARRIER, bh);
2062                         if (ret == -EOPNOTSUPP) {
2063                                 printk("btrfs: disabling barriers on dev %s\n",
2064                                        device->name);
2065                                 set_buffer_uptodate(bh);
2066                                 device->barriers = 0;
2067                                 get_bh(bh);
2068                                 lock_buffer(bh);
2069                                 ret = submit_bh(WRITE, bh);
2070                         }
2071                 } else {
2072                         ret = submit_bh(WRITE, bh);
2073                 }
2074
2075                 if (!ret && wait) {
2076                         wait_on_buffer(bh);
2077                         if (!buffer_uptodate(bh))
2078                                 errors++;
2079                 } else if (ret) {
2080                         errors++;
2081                 }
2082                 if (wait)
2083                         brelse(bh);
2084         }
2085         return errors < i ? 0 : -1;
2086 }
2087
2088 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2089 {
2090         struct list_head *head = &root->fs_info->fs_devices->devices;
2091         struct btrfs_device *dev;
2092         struct btrfs_super_block *sb;
2093         struct btrfs_dev_item *dev_item;
2094         int ret;
2095         int do_barriers;
2096         int max_errors;
2097         int total_errors = 0;
2098         u64 flags;
2099
2100         max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2101         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2102
2103         sb = &root->fs_info->super_for_commit;
2104         dev_item = &sb->dev_item;
2105         list_for_each_entry(dev, head, dev_list) {
2106                 if (!dev->bdev) {
2107                         total_errors++;
2108                         continue;
2109                 }
2110                 if (!dev->in_fs_metadata || !dev->writeable)
2111                         continue;
2112
2113                 btrfs_set_stack_device_generation(dev_item, 0);
2114                 btrfs_set_stack_device_type(dev_item, dev->type);
2115                 btrfs_set_stack_device_id(dev_item, dev->devid);
2116                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2117                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2118                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2119                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2120                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2121                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2122                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2123
2124                 flags = btrfs_super_flags(sb);
2125                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2126
2127                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2128                 if (ret)
2129                         total_errors++;
2130         }
2131         if (total_errors > max_errors) {
2132                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2133                        total_errors);
2134                 BUG();
2135         }
2136
2137         total_errors = 0;
2138         list_for_each_entry(dev, head, dev_list) {
2139                 if (!dev->bdev)
2140                         continue;
2141                 if (!dev->in_fs_metadata || !dev->writeable)
2142                         continue;
2143
2144                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2145                 if (ret)
2146                         total_errors++;
2147         }
2148         if (total_errors > max_errors) {
2149                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2150                        total_errors);
2151                 BUG();
2152         }
2153         return 0;
2154 }
2155
2156 int write_ctree_super(struct btrfs_trans_handle *trans,
2157                       struct btrfs_root *root, int max_mirrors)
2158 {
2159         int ret;
2160
2161         ret = write_all_supers(root, max_mirrors);
2162         return ret;
2163 }
2164
2165 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2166 {
2167         radix_tree_delete(&fs_info->fs_roots_radix,
2168                           (unsigned long)root->root_key.objectid);
2169         if (root->anon_super.s_dev) {
2170                 down_write(&root->anon_super.s_umount);
2171                 kill_anon_super(&root->anon_super);
2172         }
2173         if (root->node)
2174                 free_extent_buffer(root->node);
2175         if (root->commit_root)
2176                 free_extent_buffer(root->commit_root);
2177         kfree(root->name);
2178         kfree(root);
2179         return 0;
2180 }
2181
2182 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2183 {
2184         int ret;
2185         struct btrfs_root *gang[8];
2186         int i;
2187
2188         while (1) {
2189                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2190                                              (void **)gang, 0,
2191                                              ARRAY_SIZE(gang));
2192                 if (!ret)
2193                         break;
2194                 for (i = 0; i < ret; i++)
2195                         btrfs_free_fs_root(fs_info, gang[i]);
2196         }
2197         return 0;
2198 }
2199
2200 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2201 {
2202         u64 root_objectid = 0;
2203         struct btrfs_root *gang[8];
2204         int i;
2205         int ret;
2206
2207         while (1) {
2208                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2209                                              (void **)gang, root_objectid,
2210                                              ARRAY_SIZE(gang));
2211                 if (!ret)
2212                         break;
2213                 for (i = 0; i < ret; i++) {
2214                         root_objectid = gang[i]->root_key.objectid;
2215                         ret = btrfs_find_dead_roots(fs_info->tree_root,
2216                                                     root_objectid, gang[i]);
2217                         BUG_ON(ret);
2218                         btrfs_orphan_cleanup(gang[i]);
2219                 }
2220                 root_objectid++;
2221         }
2222         return 0;
2223 }
2224
2225 int btrfs_commit_super(struct btrfs_root *root)
2226 {
2227         struct btrfs_trans_handle *trans;
2228         int ret;
2229
2230         mutex_lock(&root->fs_info->cleaner_mutex);
2231         btrfs_clean_old_snapshots(root);
2232         mutex_unlock(&root->fs_info->cleaner_mutex);
2233         trans = btrfs_start_transaction(root, 1);
2234         ret = btrfs_commit_transaction(trans, root);
2235         BUG_ON(ret);
2236         /* run commit again to drop the original snapshot */
2237         trans = btrfs_start_transaction(root, 1);
2238         btrfs_commit_transaction(trans, root);
2239         ret = btrfs_write_and_wait_transaction(NULL, root);
2240         BUG_ON(ret);
2241
2242         ret = write_ctree_super(NULL, root, 0);
2243         return ret;
2244 }
2245
2246 int close_ctree(struct btrfs_root *root)
2247 {
2248         struct btrfs_fs_info *fs_info = root->fs_info;
2249         int ret;
2250
2251         fs_info->closing = 1;
2252         smp_mb();
2253
2254         kthread_stop(root->fs_info->transaction_kthread);
2255         kthread_stop(root->fs_info->cleaner_kthread);
2256
2257         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2258                 ret =  btrfs_commit_super(root);
2259                 if (ret)
2260                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2261         }
2262
2263         if (fs_info->delalloc_bytes) {
2264                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2265                        fs_info->delalloc_bytes);
2266         }
2267         if (fs_info->total_ref_cache_size) {
2268                 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2269                        (unsigned long long)fs_info->total_ref_cache_size);
2270         }
2271
2272         if (fs_info->extent_root->node)
2273                 free_extent_buffer(fs_info->extent_root->node);
2274
2275         if (fs_info->tree_root->node)
2276                 free_extent_buffer(fs_info->tree_root->node);
2277
2278         if (root->fs_info->chunk_root->node)
2279                 free_extent_buffer(root->fs_info->chunk_root->node);
2280
2281         if (root->fs_info->dev_root->node)
2282                 free_extent_buffer(root->fs_info->dev_root->node);
2283
2284         if (root->fs_info->csum_root->node)
2285                 free_extent_buffer(root->fs_info->csum_root->node);
2286
2287         btrfs_free_block_groups(root->fs_info);
2288
2289         del_fs_roots(fs_info);
2290
2291         iput(fs_info->btree_inode);
2292
2293         btrfs_stop_workers(&fs_info->fixup_workers);
2294         btrfs_stop_workers(&fs_info->delalloc_workers);
2295         btrfs_stop_workers(&fs_info->workers);
2296         btrfs_stop_workers(&fs_info->endio_workers);
2297         btrfs_stop_workers(&fs_info->endio_meta_workers);
2298         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2299         btrfs_stop_workers(&fs_info->endio_write_workers);
2300         btrfs_stop_workers(&fs_info->submit_workers);
2301
2302 #if 0
2303         while (!list_empty(&fs_info->hashers)) {
2304                 struct btrfs_hasher *hasher;
2305                 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
2306                                     hashers);
2307                 list_del(&hasher->hashers);
2308                 crypto_free_hash(&fs_info->hash_tfm);
2309                 kfree(hasher);
2310         }
2311 #endif
2312         btrfs_close_devices(fs_info->fs_devices);
2313         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2314
2315         bdi_destroy(&fs_info->bdi);
2316
2317         kfree(fs_info->extent_root);
2318         kfree(fs_info->tree_root);
2319         kfree(fs_info->chunk_root);
2320         kfree(fs_info->dev_root);
2321         kfree(fs_info->csum_root);
2322         return 0;
2323 }
2324
2325 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2326 {
2327         int ret;
2328         struct inode *btree_inode = buf->first_page->mapping->host;
2329
2330         ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
2331         if (!ret)
2332                 return ret;
2333
2334         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2335                                     parent_transid);
2336         return !ret;
2337 }
2338
2339 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2340 {
2341         struct inode *btree_inode = buf->first_page->mapping->host;
2342         return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2343                                           buf);
2344 }
2345
2346 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2347 {
2348         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2349         u64 transid = btrfs_header_generation(buf);
2350         struct inode *btree_inode = root->fs_info->btree_inode;
2351
2352         btrfs_set_lock_blocking(buf);
2353
2354         btrfs_assert_tree_locked(buf);
2355         if (transid != root->fs_info->generation) {
2356                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2357                        "found %llu running %llu\n",
2358                         (unsigned long long)buf->start,
2359                         (unsigned long long)transid,
2360                         (unsigned long long)root->fs_info->generation);
2361                 WARN_ON(1);
2362         }
2363         set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
2364 }
2365
2366 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2367 {
2368         /*
2369          * looks as though older kernels can get into trouble with
2370          * this code, they end up stuck in balance_dirty_pages forever
2371          */
2372         struct extent_io_tree *tree;
2373         u64 num_dirty;
2374         u64 start = 0;
2375         unsigned long thresh = 32 * 1024 * 1024;
2376         tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
2377
2378         if (current_is_pdflush() || current->flags & PF_MEMALLOC)
2379                 return;
2380
2381         num_dirty = count_range_bits(tree, &start, (u64)-1,
2382                                      thresh, EXTENT_DIRTY);
2383         if (num_dirty > thresh) {
2384                 balance_dirty_pages_ratelimited_nr(
2385                                    root->fs_info->btree_inode->i_mapping, 1);
2386         }
2387         return;
2388 }
2389
2390 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2391 {
2392         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2393         int ret;
2394         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2395         if (ret == 0)
2396                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2397         return ret;
2398 }
2399
2400 int btree_lock_page_hook(struct page *page)
2401 {
2402         struct inode *inode = page->mapping->host;
2403         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2404         struct extent_buffer *eb;
2405         unsigned long len;
2406         u64 bytenr = page_offset(page);
2407
2408         if (page->private == EXTENT_PAGE_PRIVATE)
2409                 goto out;
2410
2411         len = page->private >> 2;
2412         eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2413         if (!eb)
2414                 goto out;
2415
2416         btrfs_tree_lock(eb);
2417         btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2418         btrfs_tree_unlock(eb);
2419         free_extent_buffer(eb);
2420 out:
2421         lock_page(page);
2422         return 0;
2423 }
2424
2425 static struct extent_io_ops btree_extent_io_ops = {
2426         .write_cache_pages_lock_hook = btree_lock_page_hook,
2427         .readpage_end_io_hook = btree_readpage_end_io_hook,
2428         .submit_bio_hook = btree_submit_bio_hook,
2429         /* note we're sharing with inode.c for the merge bio hook */
2430         .merge_bio_hook = btrfs_merge_bio_hook,
2431 };