btrfs: use assertion helpers for extent buffer read lock counters
[sfrench/cifs-2.6.git] / fs / btrfs / extent_io.c
index ab705183d749709f004e28c2f02b56243f1b23ff..c189b018ed693e25f6e9d54d65d4d58a9ba6d367 100644 (file)
@@ -232,14 +232,17 @@ void __cold extent_io_exit(void)
        bioset_exit(&btrfs_bioset);
 }
 
-void extent_io_tree_init(struct extent_io_tree *tree,
+void extent_io_tree_init(struct btrfs_fs_info *fs_info,
+                        struct extent_io_tree *tree, unsigned int owner,
                         void *private_data)
 {
+       tree->fs_info = fs_info;
        tree->state = RB_ROOT;
        tree->ops = NULL;
        tree->dirty_bytes = 0;
        spin_lock_init(&tree->lock);
        tree->private_data = private_data;
+       tree->owner = owner;
 }
 
 static struct extent_state *alloc_extent_state(gfp_t mask)
@@ -400,7 +403,7 @@ static void merge_state(struct extent_io_tree *tree,
        struct extent_state *other;
        struct rb_node *other_node;
 
-       if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
+       if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
                return;
 
        other_node = rb_prev(&state->rb_node);
@@ -611,6 +614,7 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
        int clear = 0;
 
        btrfs_debug_check_extent_io_range(tree, start, end);
+       trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
 
        if (bits & EXTENT_DELALLOC)
                bits |= EXTENT_NORESERVE;
@@ -618,7 +622,7 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
        if (delete)
                bits |= ~EXTENT_CTLBITS;
 
-       if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
+       if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
                clear = 1;
 again:
        if (!prealloc && gfpflags_allow_blocking(mask)) {
@@ -850,7 +854,7 @@ static void cache_state(struct extent_state *state,
                        struct extent_state **cached_ptr)
 {
        return cache_state_if_flags(state, cached_ptr,
-                                   EXTENT_IOBITS | EXTENT_BOUNDARY);
+                                   EXTENT_LOCKED | EXTENT_BOUNDARY);
 }
 
 /*
@@ -880,6 +884,7 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
        u64 last_end;
 
        btrfs_debug_check_extent_io_range(tree, start, end);
+       trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
 
 again:
        if (!prealloc && gfpflags_allow_blocking(mask)) {
@@ -1112,6 +1117,8 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
        bool first_iteration = true;
 
        btrfs_debug_check_extent_io_range(tree, start, end);
+       trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
+                                      clear_bits);
 
 again:
        if (!prealloc) {
@@ -2995,11 +3002,11 @@ static int __do_readpage(struct extent_io_tree *tree,
                 */
                if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
                    prev_em_start && *prev_em_start != (u64)-1 &&
-                   *prev_em_start != em->orig_start)
+                   *prev_em_start != em->start)
                        force_bio_submit = true;
 
                if (prev_em_start)
-                       *prev_em_start = em->orig_start;
+                       *prev_em_start = em->start;
 
                free_extent_map(em);
                em = NULL;
@@ -4166,10 +4173,9 @@ static int try_release_extent_state(struct extent_io_tree *tree,
        u64 end = start + PAGE_SIZE - 1;
        int ret = 1;
 
-       if (test_range_bit(tree, start, end,
-                          EXTENT_IOBITS, 0, NULL))
+       if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
                ret = 0;
-       else {
+       else {
                /*
                 * at this point we can safely clear everything except the
                 * locked bit and the nodatasum bit
@@ -4222,8 +4228,7 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
                        }
                        if (!test_range_bit(tree, em->start,
                                            extent_map_end(em) - 1,
-                                           EXTENT_LOCKED | EXTENT_WRITEBACK,
-                                           0, NULL)) {
+                                           EXTENT_LOCKED, 0, NULL)) {
                                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                                        &btrfs_inode->runtime_flags);
                                remove_extent_mapping(map, em);
@@ -4673,11 +4678,8 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
        eb->bflags = 0;
        rwlock_init(&eb->lock);
        atomic_set(&eb->write_locks, 0);
-       atomic_set(&eb->read_locks, 0);
        atomic_set(&eb->blocking_readers, 0);
        atomic_set(&eb->blocking_writers, 0);
-       atomic_set(&eb->spinning_readers, 0);
-       atomic_set(&eb->spinning_writers, 0);
        eb->lock_nested = 0;
        init_waitqueue_head(&eb->write_lock_wq);
        init_waitqueue_head(&eb->read_lock_wq);
@@ -4695,6 +4697,12 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
                > MAX_INLINE_EXTENT_BUFFER_SIZE);
        BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
 
+#ifdef CONFIG_BTRFS_DEBUG
+       atomic_set(&eb->spinning_writers, 0);
+       atomic_set(&eb->spinning_readers, 0);
+       atomic_set(&eb->read_locks, 0);
+#endif
+
        return eb;
 }