btrfs: remove stale definition of BUFFER_LRU_MAX
[sfrench/cifs-2.6.git] / fs / btrfs / extent_io.c
index fc0451c3e24e485cb3be0779eb0448cbf8a68c8c..7eb7f8a30ec24fdab33f1b24a9cc644548fe49ee 100644 (file)
@@ -109,8 +109,6 @@ static inline void __btrfs_debug_check_extent_io_range(const char *caller,
 #define btrfs_debug_check_extent_io_range(c, s, e)     do {} while (0)
 #endif
 
-#define BUFFER_LRU_MAX 64
-
 struct tree_entry {
        u64 start;
        u64 end;
@@ -170,15 +168,38 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
        return blk_status_to_errno(ret);
 }
 
-static void flush_write_bio(struct extent_page_data *epd)
+/* Cleanup unsubmitted bios */
+static void end_write_bio(struct extent_page_data *epd, int ret)
 {
        if (epd->bio) {
-               int ret;
+               epd->bio->bi_status = errno_to_blk_status(ret);
+               bio_endio(epd->bio);
+               epd->bio = NULL;
+       }
+}
 
+/*
+ * Submit bio from extent page data via submit_one_bio
+ *
+ * Return 0 if everything is OK.
+ * Return <0 for error.
+ */
+static int __must_check flush_write_bio(struct extent_page_data *epd)
+{
+       int ret = 0;
+
+       if (epd->bio) {
                ret = submit_one_bio(epd->bio, 0, 0);
-               BUG_ON(ret < 0); /* -ENOMEM */
+               /*
+                * Clean up of epd->bio is handled by its endio function.
+                * And endio is either triggered by successful bio execution
+                * or the error handler of submit bio hook.
+                * So at this point, no matter what happened, we don't need
+                * to clean up epd->bio.
+                */
                epd->bio = NULL;
        }
+       return ret;
 }
 
 int __init extent_io_init(void)
@@ -2614,8 +2635,6 @@ static void end_bio_extent_readpage(struct bio *bio)
                        if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
                                               &eb->bflags))
                                btree_readahead_hook(eb, -EIO);
-
-                       ret = -EIO;
                }
 readpage_ok:
                if (likely(uptodate)) {
@@ -3076,7 +3095,7 @@ out:
        return ret;
 }
 
-static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
+static inline void contiguous_readpages(struct extent_io_tree *tree,
                                             struct page *pages[], int nr_pages,
                                             u64 start, u64 end,
                                             struct extent_map **em_cached,
@@ -3107,46 +3126,6 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
        }
 }
 
-static void __extent_readpages(struct extent_io_tree *tree,
-                              struct page *pages[],
-                              int nr_pages,
-                              struct extent_map **em_cached,
-                              struct bio **bio, unsigned long *bio_flags,
-                              u64 *prev_em_start)
-{
-       u64 start = 0;
-       u64 end = 0;
-       u64 page_start;
-       int index;
-       int first_index = 0;
-
-       for (index = 0; index < nr_pages; index++) {
-               page_start = page_offset(pages[index]);
-               if (!end) {
-                       start = page_start;
-                       end = start + PAGE_SIZE - 1;
-                       first_index = index;
-               } else if (end + 1 == page_start) {
-                       end += PAGE_SIZE;
-               } else {
-                       __do_contiguous_readpages(tree, &pages[first_index],
-                                                 index - first_index, start,
-                                                 end, em_cached,
-                                                 bio, bio_flags,
-                                                 prev_em_start);
-                       start = page_start;
-                       end = start + PAGE_SIZE - 1;
-                       first_index = index;
-               }
-       }
-
-       if (end)
-               __do_contiguous_readpages(tree, &pages[first_index],
-                                         index - first_index, start,
-                                         end, em_cached, bio,
-                                         bio_flags, prev_em_start);
-}
-
 static int __extent_read_full_page(struct extent_io_tree *tree,
                                   struct page *page,
                                   get_extent_t *get_extent,
@@ -3426,6 +3405,9 @@ done:
  * records are inserted to lock ranges in the tree, and as dirty areas
  * are found, they are marked writeback.  Then the lock bits are removed
  * and the end_io handler clears the writeback ranges
+ *
+ * Return 0 if everything goes well.
+ * Return <0 for error.
  */
 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                              struct extent_page_data *epd)
@@ -3495,6 +3477,7 @@ done:
                end_extent_writepage(page, ret, start, page_end);
        }
        unlock_page(page);
+       ASSERT(ret <= 0);
        return ret;
 
 done_unlocked:
@@ -3507,18 +3490,27 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
                       TASK_UNINTERRUPTIBLE);
 }
 
+/*
+ * Lock eb pages and flush the bio if we can't the locks
+ *
+ * Return  0 if nothing went wrong
+ * Return >0 is same as 0, except bio is not submitted
+ * Return <0 if something went wrong, no page is locked
+ */
 static noinline_for_stack int
 lock_extent_buffer_for_io(struct extent_buffer *eb,
                          struct btrfs_fs_info *fs_info,
                          struct extent_page_data *epd)
 {
-       int i, num_pages;
+       int i, num_pages, failed_page_nr;
        int flush = 0;
        int ret = 0;
 
        if (!btrfs_try_tree_write_lock(eb)) {
+               ret = flush_write_bio(epd);
+               if (ret < 0)
+                       return ret;
                flush = 1;
-               flush_write_bio(epd);
                btrfs_tree_lock(eb);
        }
 
@@ -3527,7 +3519,9 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
                if (!epd->sync_io)
                        return 0;
                if (!flush) {
-                       flush_write_bio(epd);
+                       ret = flush_write_bio(epd);
+                       if (ret < 0)
+                               return ret;
                        flush = 1;
                }
                while (1) {
@@ -3568,7 +3562,11 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
 
                if (!trylock_page(p)) {
                        if (!flush) {
-                               flush_write_bio(epd);
+                               ret = flush_write_bio(epd);
+                               if (ret < 0) {
+                                       failed_page_nr = i;
+                                       goto err_unlock;
+                               }
                                flush = 1;
                        }
                        lock_page(p);
@@ -3576,6 +3574,11 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
        }
 
        return ret;
+err_unlock:
+       /* Unlock already locked pages */
+       for (i = 0; i < failed_page_nr; i++)
+               unlock_page(eb->pages[i]);
+       return ret;
 }
 
 static void end_extent_buffer_writeback(struct extent_buffer *eb)
@@ -3859,7 +3862,12 @@ retry:
                index = 0;
                goto retry;
        }
-       flush_write_bio(&epd);
+       ASSERT(ret <= 0);
+       if (ret < 0) {
+               end_write_bio(&epd, ret);
+               return ret;
+       }
+       ret = flush_write_bio(&epd);
        return ret;
 }
 
@@ -3956,7 +3964,8 @@ retry:
                         * tmpfs file mapping
                         */
                        if (!trylock_page(page)) {
-                               flush_write_bio(epd);
+                               ret = flush_write_bio(epd);
+                               BUG_ON(ret < 0);
                                lock_page(page);
                        }
 
@@ -3966,8 +3975,10 @@ retry:
                        }
 
                        if (wbc->sync_mode != WB_SYNC_NONE) {
-                               if (PageWriteback(page))
-                                       flush_write_bio(epd);
+                               if (PageWriteback(page)) {
+                                       ret = flush_write_bio(epd);
+                                       BUG_ON(ret < 0);
+                               }
                                wait_on_page_writeback(page);
                        }
 
@@ -3978,11 +3989,6 @@ retry:
                        }
 
                        ret = __extent_writepage(page, wbc, epd);
-
-                       if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
-                               unlock_page(page);
-                               ret = 0;
-                       }
                        if (ret < 0) {
                                /*
                                 * done_index is set past this page,
@@ -4036,8 +4042,14 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
        };
 
        ret = __extent_writepage(page, wbc, &epd);
+       ASSERT(ret <= 0);
+       if (ret < 0) {
+               end_write_bio(&epd, ret);
+               return ret;
+       }
 
-       flush_write_bio(&epd);
+       ret = flush_write_bio(&epd);
+       ASSERT(ret <= 0);
        return ret;
 }
 
@@ -4077,7 +4089,12 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
                start += PAGE_SIZE;
        }
 
-       flush_write_bio(&epd);
+       ASSERT(ret <= 0);
+       if (ret < 0) {
+               end_write_bio(&epd, ret);
+               return ret;
+       }
+       ret = flush_write_bio(&epd);
        return ret;
 }
 
@@ -4093,7 +4110,12 @@ int extent_writepages(struct address_space *mapping,
        };
 
        ret = extent_write_cache_pages(mapping, wbc, &epd);
-       flush_write_bio(&epd);
+       ASSERT(ret <= 0);
+       if (ret < 0) {
+               end_write_bio(&epd, ret);
+               return ret;
+       }
+       ret = flush_write_bio(&epd);
        return ret;
 }
 
@@ -4109,6 +4131,8 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages,
        u64 prev_em_start = (u64)-1;
 
        while (!list_empty(pages)) {
+               u64 contig_end = 0;
+
                for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
                        struct page *page = lru_to_page(pages);
 
@@ -4117,14 +4141,22 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages,
                        if (add_to_page_cache_lru(page, mapping, page->index,
                                                readahead_gfp_mask(mapping))) {
                                put_page(page);
-                               continue;
+                               break;
                        }
 
                        pagepool[nr++] = page;
+                       contig_end = page_offset(page) + PAGE_SIZE - 1;
                }
 
-               __extent_readpages(tree, pagepool, nr, &em_cached, &bio,
-                                  &bio_flags, &prev_em_start);
+               if (nr) {
+                       u64 contig_start = page_offset(pagepool[0]);
+
+                       ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
+
+                       contiguous_readpages(tree, pagepool, nr, contig_start,
+                                    contig_end, &em_cached, &bio, &bio_flags,
+                                    &prev_em_start);
+               }
        }
 
        if (em_cached)
@@ -4677,12 +4709,9 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
        eb->fs_info = fs_info;
        eb->bflags = 0;
        rwlock_init(&eb->lock);
-       atomic_set(&eb->write_locks, 0);
-       atomic_set(&eb->read_locks, 0);
        atomic_set(&eb->blocking_readers, 0);
        atomic_set(&eb->blocking_writers, 0);
-       atomic_set(&eb->spinning_readers, 0);
-       eb->lock_nested = 0;
+       eb->lock_nested = false;
        init_waitqueue_head(&eb->write_lock_wq);
        init_waitqueue_head(&eb->read_lock_wq);
 
@@ -4701,6 +4730,9 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
 
 #ifdef CONFIG_BTRFS_DEBUG
        atomic_set(&eb->spinning_writers, 0);
+       atomic_set(&eb->spinning_readers, 0);
+       atomic_set(&eb->read_locks, 0);
+       atomic_set(&eb->write_locks, 0);
 #endif
 
        return eb;