mm/readahead: add page_cache_sync_ra and page_cache_async_ra
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 16 Oct 2020 03:06:28 +0000 (20:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Oct 2020 18:11:16 +0000 (11:11 -0700)
Reimplement page_cache_sync_readahead() and page_cache_async_readahead()
as wrappers around versions of the function which take a readahead_control
in preparation for making do_sync_mmap_readahead() pass down an RAC
struct.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Link: https://lkml.kernel.org/r/20200903140844.14194-8-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/pagemap.h
mm/readahead.c

index 37f209ccef0f9f805cfcdaccff5346a03b652c27..c77b7c31b2e49563a5c5d853bbd9c55cc725a29b 100644 (file)
@@ -761,16 +761,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
 void delete_from_page_cache_batch(struct address_space *mapping,
                                  struct pagevec *pvec);
 
-#define VM_READAHEAD_PAGES     (SZ_128K / PAGE_SIZE)
-
-void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
-               struct file *, pgoff_t index, unsigned long req_count);
-void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
-               struct file *, struct page *, pgoff_t index,
-               unsigned long req_count);
-void page_cache_ra_unbounded(struct readahead_control *,
-               unsigned long nr_to_read, unsigned long lookahead_count);
-
 /*
  * Like add_to_page_cache_locked, but used to add newly allocated pages:
  * the page is new, so we can just run __SetPageLocked() against it.
@@ -818,6 +808,60 @@ struct readahead_control {
                ._index = i,                                            \
        }
 
+#define VM_READAHEAD_PAGES     (SZ_128K / PAGE_SIZE)
+
+void page_cache_ra_unbounded(struct readahead_control *,
+               unsigned long nr_to_read, unsigned long lookahead_count);
+void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *,
+               unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct file_ra_state *,
+               struct page *, unsigned long req_count);
+
+/**
+ * page_cache_sync_readahead - generic file readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_sync_readahead() should be called when a cache miss happened:
+ * it will submit the read.  The readahead logic may decide to piggyback more
+ * pages onto the read request if access patterns suggest it will improve
+ * performance.
+ */
+static inline
+void page_cache_sync_readahead(struct address_space *mapping,
+               struct file_ra_state *ra, struct file *file, pgoff_t index,
+               unsigned long req_count)
+{
+       DEFINE_READAHEAD(ractl, file, mapping, index);
+       page_cache_sync_ra(&ractl, ra, req_count);
+}
+
+/**
+ * page_cache_async_readahead - file readahead for marked pages
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @page: The page at @index which triggered the readahead call.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_async_readahead() should be called when a page is used which
+ * is marked as PageReadahead; this is a marker to suggest that the application
+ * has used up enough of the readahead window that we should start pulling in
+ * more pages.
+ */
+static inline
+void page_cache_async_readahead(struct address_space *mapping,
+               struct file_ra_state *ra, struct file *file,
+               struct page *page, pgoff_t index, unsigned long req_count)
+{
+       DEFINE_READAHEAD(ractl, file, mapping, index);
+       page_cache_async_ra(&ractl, ra, page, req_count);
+}
+
 /**
  * readahead_page - Get the next page to read.
  * @rac: The current readahead request.
index 3115ced5faaef521acf5492c25857e21284b6c62..620ac83f35ccffd880ea8f0fc7f70ac64661a9e5 100644 (file)
@@ -550,25 +550,9 @@ readit:
        do_page_cache_ra(ractl, ra->size, ra->async_size);
 }
 
-/**
- * page_cache_sync_readahead - generic file readahead
- * @mapping: address_space which holds the pagecache and I/O vectors
- * @ra: file_ra_state which holds the readahead state
- * @filp: passed on to ->readpage() and ->readpages()
- * @index: Index of first page to be read.
- * @req_count: Total number of pages being read by the caller.
- *
- * page_cache_sync_readahead() should be called when a cache miss happened:
- * it will submit the read.  The readahead logic may decide to piggyback more
- * pages onto the read request if access patterns suggest it will improve
- * performance.
- */
-void page_cache_sync_readahead(struct address_space *mapping,
-                              struct file_ra_state *ra, struct file *filp,
-                              pgoff_t index, unsigned long req_count)
+void page_cache_sync_ra(struct readahead_control *ractl,
+               struct file_ra_state *ra, unsigned long req_count)
 {
-       DEFINE_READAHEAD(ractl, filp, mapping, index);
-
        /* no read-ahead */
        if (!ra->ra_pages)
                return;
@@ -577,38 +561,20 @@ void page_cache_sync_readahead(struct address_space *mapping,
                return;
 
        /* be dumb */
-       if (filp && (filp->f_mode & FMODE_RANDOM)) {
-               force_page_cache_ra(&ractl, req_count);
+       if (ractl->file && (ractl->file->f_mode & FMODE_RANDOM)) {
+               force_page_cache_ra(ractl, req_count);
                return;
        }
 
        /* do read-ahead */
-       ondemand_readahead(&ractl, ra, false, req_count);
+       ondemand_readahead(ractl, ra, false, req_count);
 }
-EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
+EXPORT_SYMBOL_GPL(page_cache_sync_ra);
 
-/**
- * page_cache_async_readahead - file readahead for marked pages
- * @mapping: address_space which holds the pagecache and I/O vectors
- * @ra: file_ra_state which holds the readahead state
- * @filp: passed on to ->readpage() and ->readpages()
- * @page: The page at @index which triggered the readahead call.
- * @index: Index of first page to be read.
- * @req_count: Total number of pages being read by the caller.
- *
- * page_cache_async_readahead() should be called when a page is used which
- * is marked as PageReadahead; this is a marker to suggest that the application
- * has used up enough of the readahead window that we should start pulling in
- * more pages.
- */
-void
-page_cache_async_readahead(struct address_space *mapping,
-                          struct file_ra_state *ra, struct file *filp,
-                          struct page *page, pgoff_t index,
-                          unsigned long req_count)
+void page_cache_async_ra(struct readahead_control *ractl,
+               struct file_ra_state *ra, struct page *page,
+               unsigned long req_count)
 {
-       DEFINE_READAHEAD(ractl, filp, mapping, index);
-
        /* no read-ahead */
        if (!ra->ra_pages)
                return;
@@ -624,16 +590,16 @@ page_cache_async_readahead(struct address_space *mapping,
        /*
         * Defer asynchronous read-ahead on IO congestion.
         */
-       if (inode_read_congested(mapping->host))
+       if (inode_read_congested(ractl->mapping->host))
                return;
 
        if (blk_cgroup_congested())
                return;
 
        /* do read-ahead */
-       ondemand_readahead(&ractl, ra, true, req_count);
+       ondemand_readahead(ractl, ra, true, req_count);
 }
-EXPORT_SYMBOL_GPL(page_cache_async_readahead);
+EXPORT_SYMBOL_GPL(page_cache_async_ra);
 
 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 {