btrfs: reserve extra space during evict
[sfrench/cifs-2.6.git] / fs / mpage.c
index b73638db9866cd31f516cc5b0f11850825e5824b..c820dc9bebab6143a759c829ed478cb90931048a 100644 (file)
@@ -133,6 +133,17 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
        } while (page_bh != head);
 }
 
+struct mpage_readpage_args {
+       struct bio *bio;
+       struct page *page;
+       unsigned int nr_pages;
+       bool is_readahead;
+       sector_t last_block_in_bio;
+       struct buffer_head map_bh;
+       unsigned long first_logical_block;
+       get_block_t *get_block;
+};
+
 /*
  * This is the worker routine which does all the work of mapping the disk
  * blocks and constructs largest possible bios, submits them for IO if the
@@ -142,16 +153,14 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
  * represent the validity of its disk mapping and to decide when to do the next
  * get_block() call.
  */
-static struct bio *
-do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
-               sector_t *last_block_in_bio, struct buffer_head *map_bh,
-               unsigned long *first_logical_block, get_block_t get_block,
-               gfp_t gfp)
+static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
 {
+       struct page *page = args->page;
        struct inode *inode = page->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
        const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
        const unsigned blocksize = 1 << blkbits;
+       struct buffer_head *map_bh = &args->map_bh;
        sector_t block_in_file;
        sector_t last_block;
        sector_t last_block_in_file;
@@ -161,14 +170,24 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
        struct block_device *bdev = NULL;
        int length;
        int fully_mapped = 1;
+       int op_flags;
        unsigned nblocks;
        unsigned relative_block;
+       gfp_t gfp;
+
+       if (args->is_readahead) {
+               op_flags = REQ_RAHEAD;
+               gfp = readahead_gfp_mask(page->mapping);
+       } else {
+               op_flags = 0;
+               gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+       }
 
        if (page_has_buffers(page))
                goto confused;
 
        block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
-       last_block = block_in_file + nr_pages * blocks_per_page;
+       last_block = block_in_file + args->nr_pages * blocks_per_page;
        last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
        if (last_block > last_block_in_file)
                last_block = last_block_in_file;
@@ -178,9 +197,10 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
         * Map blocks using the result from the previous get_blocks call first.
         */
        nblocks = map_bh->b_size >> blkbits;
-       if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
-                       block_in_file < (*first_logical_block + nblocks)) {
-               unsigned map_offset = block_in_file - *first_logical_block;
+       if (buffer_mapped(map_bh) &&
+                       block_in_file > args->first_logical_block &&
+                       block_in_file < (args->first_logical_block + nblocks)) {
+               unsigned map_offset = block_in_file - args->first_logical_block;
                unsigned last = nblocks - map_offset;
 
                for (relative_block = 0; ; relative_block++) {
@@ -208,9 +228,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
 
                if (block_in_file < last_block) {
                        map_bh->b_size = (last_block-block_in_file) << blkbits;
-                       if (get_block(inode, block_in_file, map_bh, 0))
+                       if (args->get_block(inode, block_in_file, map_bh, 0))
                                goto confused;
-                       *first_logical_block = block_in_file;
+                       args->first_logical_block = block_in_file;
                }
 
                if (!buffer_mapped(map_bh)) {
@@ -273,43 +293,45 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
        /*
         * This page will go to BIO.  Do we need to send this BIO off first?
         */
-       if (bio && (*last_block_in_bio != blocks[0] - 1))
-               bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
+       if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
+               args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
 
 alloc_new:
-       if (bio == NULL) {
+       if (args->bio == NULL) {
                if (first_hole == blocks_per_page) {
                        if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
                                                                page))
                                goto out;
                }
-               bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-                               min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
-               if (bio == NULL)
+               args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
+                                       min_t(int, args->nr_pages,
+                                             BIO_MAX_PAGES),
+                                       gfp);
+               if (args->bio == NULL)
                        goto confused;
        }
 
        length = first_hole << blkbits;
-       if (bio_add_page(bio, page, length, 0) < length) {
-               bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
+       if (bio_add_page(args->bio, page, length, 0) < length) {
+               args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
                goto alloc_new;
        }
 
-       relative_block = block_in_file - *first_logical_block;
+       relative_block = block_in_file - args->first_logical_block;
        nblocks = map_bh->b_size >> blkbits;
        if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
            (first_hole != blocks_per_page))
-               bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
+               args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
        else
-               *last_block_in_bio = blocks[blocks_per_page - 1];
+               args->last_block_in_bio = blocks[blocks_per_page - 1];
 out:
-       return bio;
+       return args->bio;
 
 confused:
-       if (bio)
-               bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
+       if (args->bio)
+               args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
        if (!PageUptodate(page))
-               block_read_full_page(page, get_block);
+               block_read_full_page(page, args->get_block);
        else
                unlock_page(page);
        goto out;
@@ -363,15 +385,12 @@ int
 mpage_readpages(struct address_space *mapping, struct list_head *pages,
                                unsigned nr_pages, get_block_t get_block)
 {
-       struct bio *bio = NULL;
+       struct mpage_readpage_args args = {
+               .get_block = get_block,
+               .is_readahead = true,
+       };
        unsigned page_idx;
-       sector_t last_block_in_bio = 0;
-       struct buffer_head map_bh;
-       unsigned long first_logical_block = 0;
-       gfp_t gfp = readahead_gfp_mask(mapping);
 
-       map_bh.b_state = 0;
-       map_bh.b_size = 0;
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                struct page *page = lru_to_page(pages);
 
@@ -379,18 +398,16 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping,
                                        page->index,
-                                       gfp)) {
-                       bio = do_mpage_readpage(bio, page,
-                                       nr_pages - page_idx,
-                                       &last_block_in_bio, &map_bh,
-                                       &first_logical_block,
-                                       get_block, gfp);
+                                       readahead_gfp_mask(mapping))) {
+                       args.page = page;
+                       args.nr_pages = nr_pages - page_idx;
+                       args.bio = do_mpage_readpage(&args);
                }
                put_page(page);
        }
        BUG_ON(!list_empty(pages));
-       if (bio)
-               mpage_bio_submit(REQ_OP_READ, 0, bio);
+       if (args.bio)
+               mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
        return 0;
 }
 EXPORT_SYMBOL(mpage_readpages);
@@ -400,18 +417,15 @@ EXPORT_SYMBOL(mpage_readpages);
  */
 int mpage_readpage(struct page *page, get_block_t get_block)
 {
-       struct bio *bio = NULL;
-       sector_t last_block_in_bio = 0;
-       struct buffer_head map_bh;
-       unsigned long first_logical_block = 0;
-       gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+       struct mpage_readpage_args args = {
+               .page = page,
+               .nr_pages = 1,
+               .get_block = get_block,
+       };
 
-       map_bh.b_state = 0;
-       map_bh.b_size = 0;
-       bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
-                       &map_bh, &first_logical_block, get_block, gfp);
-       if (bio)
-               mpage_bio_submit(REQ_OP_READ, 0, bio);
+       args.bio = do_mpage_readpage(&args);
+       if (args.bio)
+               mpage_bio_submit(REQ_OP_READ, 0, args.bio);
        return 0;
 }
 EXPORT_SYMBOL(mpage_readpage);