Btrfs: Fix extent_buffer usage when nodesize != leafsize
authorChris Mason <chris.mason@oracle.com>
Wed, 9 Jan 2008 20:55:33 +0000 (15:55 -0500)
committerChris Mason <chris.mason@oracle.com>
Thu, 25 Sep 2008 15:03:59 +0000 (11:03 -0400)
Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_map.c

index a481b970608c4a8d08edbc08fbcc82c166c73328..0338f8fd382d989748647f7c24ab079bc6bed7a1 100644 (file)
@@ -197,9 +197,23 @@ int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
        if (found_start != start) {
                printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
                       start, found_start, len);
+               WARN_ON(1);
+               goto err;
+       }
+       if (eb->first_page != page) {
+               printk("bad first page %lu %lu\n", eb->first_page->index,
+                      page->index);
+               WARN_ON(1);
+               goto err;
+       }
+       if (!PageUptodate(page)) {
+               printk("csum not up to date page %lu\n", page->index);
+               WARN_ON(1);
+               goto err;
        }
        found_level = btrfs_header_level(eb);
        csum_tree_block(root, eb, 0);
+err:
        free_extent_buffer(eb);
 out:
        return 0;
@@ -368,7 +382,10 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                     struct extent_buffer *buf)
 {
        struct inode *btree_inode = root->fs_info->btree_inode;
-       clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree, buf);
+       if (btrfs_header_generation(buf) ==
+           root->fs_info->running_transaction->transid)
+               clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->extent_tree,
+                                         buf);
        return 0;
 }
 
@@ -897,8 +914,11 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
 
 void btrfs_throttle(struct btrfs_root *root)
 {
-       if (root->fs_info->throttles)
-               congestion_wait(WRITE, HZ/10);
+       struct backing_dev_info *bdi;
+
+       bdi = root->fs_info->sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
+       if (root->fs_info->throttles && bdi_write_congested(bdi))
+               congestion_wait(WRITE, HZ/20);
 }
 
 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
index 99a8b0f0d31867cbc33c72a249b47ac04360b498..2c569b4d59d44a8eb9b2e85603bf8a68b8821a04 100644 (file)
@@ -1212,6 +1212,7 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
                                u64 header_transid =
                                        btrfs_header_generation(buf);
                                if (header_transid == transid) {
+                                       clean_tree_block(NULL, root, buf);
                                        free_extent_buffer(buf);
                                        return 1;
                                }
@@ -1249,7 +1250,6 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
        key.objectid = bytenr;
        btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
        key.offset = num_bytes;
-
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -1648,8 +1648,6 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
                               search_start, search_end, hint_byte, ins,
                               trans->alloc_exclude_start,
                               trans->alloc_exclude_nr, data);
-if (ret)
-printk("find free extent returns %d\n", ret);
        BUG_ON(ret);
        if (ret)
                return ret;
@@ -1764,7 +1762,16 @@ struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                  0, 0, 0);
                return ERR_PTR(-ENOMEM);
        }
+       btrfs_set_header_generation(buf, trans->transid);
+       clean_tree_block(trans, root, buf);
+       wait_on_tree_block_writeback(root, buf);
        btrfs_set_buffer_uptodate(buf);
+
+       if (PageDirty(buf->first_page)) {
+               printk("page %lu dirty\n", buf->first_page->index);
+               WARN_ON(1);
+       }
+
        set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
                         buf->start + buf->len - 1, GFP_NOFS);
        set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->extent_tree,
index 9d6aefa937c4b717b31b1e6e23bd64e1235772ea..f3a384ed700c5bb053918c5adc3233b5b4ae8326 100644 (file)
@@ -1663,6 +1663,13 @@ void set_page_extent_mapped(struct page *page)
        }
 }
 
+void set_page_extent_head(struct page *page, unsigned long len)
+{
+       WARN_ON(page->private && page->private == EXTENT_PAGE_PRIVATE &&
+               PageDirty(page));
+       set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
+}
+
 /*
  * basic readpage implementation.  Locked extent state structs are inserted
  * into the tree that are removed when the IO is done (by the end_io
@@ -2490,8 +2497,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
                mark_page_accessed(page0);
                set_page_extent_mapped(page0);
                WARN_ON(!PageUptodate(page0));
-               set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
-                                len << 2);
+               set_page_extent_head(page0, len);
        } else {
                i = 0;
        }
@@ -2505,8 +2511,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
                mark_page_accessed(p);
                if (i == 0) {
                        eb->first_page = p;
-                       set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
-                                        len << 2);
+                       set_page_extent_head(p, len);
                } else {
                        set_page_private(p, EXTENT_PAGE_PRIVATE);
                }
@@ -2569,8 +2574,7 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
 
                if (i == 0) {
                        eb->first_page = p;
-                       set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
-                                        len << 2);
+                       set_page_extent_head(p, len);
                } else {
                        set_page_private(p, EXTENT_PAGE_PRIVATE);
                }
@@ -2643,6 +2647,11 @@ int clear_extent_buffer_dirty(struct extent_map_tree *tree,
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
                lock_page(page);
+               if (i == 0)
+                       set_page_extent_head(page, eb->len);
+               else
+                       set_page_private(page, EXTENT_PAGE_PRIVATE);
+
                /*
                 * if we're on the last page or the first page and the
                 * block isn't aligned on a page boundary, do extra checks
@@ -2697,9 +2706,12 @@ int set_extent_buffer_dirty(struct extent_map_tree *tree,
                 */
                if (i == 0) {
                        lock_page(page);
-                       set_page_private(page,
-                                        EXTENT_PAGE_PRIVATE_FIRST_PAGE |
-                                        eb->len << 2);
+                       set_page_extent_head(page, eb->len);
+               } else if (PagePrivate(page) &&
+                          page->private != EXTENT_PAGE_PRIVATE) {
+                       lock_page(page);
+                       set_page_extent_mapped(page);
+                       unlock_page(page);
                }
                __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
                if (i == 0)