fuse: realloc page array
authorMiklos Szeredi <mszeredi@redhat.com>
Mon, 1 Oct 2018 08:07:06 +0000 (10:07 +0200)
committerMiklos Szeredi <mszeredi@redhat.com>
Mon, 1 Oct 2018 08:07:06 +0000 (10:07 +0200)
Writeback caching currently allocates requests with the maximum number of
possible pages, while the actual number of pages per request depends on a
couple of factors that cannot be determined when the request is allocated
(whether page is already under writeback, whether page is contiguous with
previous pages already added to a request).

This patch allows such requests to start with no page allocation (all pages
inline) and grow the page array on demand.

If the max_pages tunable remains the default value, then this will mean
just one allocation that is the same size as before.  If the tunable is
larger, then this adds at most 3 additional memory allocations (which is
generously compensated by the improved performance from the larger
request).

Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/fuse_i.h

index 69d4df78a417c92337e141a115b6c3fc85fe64d4..ae813e609932168ec2e74056fb598aca0ad65907 100644 (file)
@@ -54,6 +54,18 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
        __set_bit(FR_PENDING, &req->flags);
 }
 
+static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags,
+                                         struct fuse_page_desc **desc)
+{
+       struct page **pages;
+
+       pages = kzalloc(npages * (sizeof(struct page *) +
+                                 sizeof(struct fuse_page_desc)), flags);
+       *desc = (void *) pages + npages * sizeof(struct page *);
+
+       return pages;
+}
+
 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
 {
        struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
@@ -63,13 +75,12 @@ static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
 
                WARN_ON(npages > FUSE_MAX_MAX_PAGES);
                if (npages > FUSE_REQ_INLINE_PAGES) {
-                       pages = kzalloc(npages * (sizeof(*pages) +
-                                                 sizeof(*page_descs)), flags);
+                       pages = fuse_req_pages_alloc(npages, flags,
+                                                    &page_descs);
                        if (!pages) {
                                kmem_cache_free(fuse_req_cachep, req);
                                return NULL;
                        }
-                       page_descs = (void *) pages + npages * sizeof(*pages);
                } else if (npages) {
                        pages = req->inline_pages;
                        page_descs = req->inline_page_descs;
@@ -91,11 +102,41 @@ struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
        return __fuse_request_alloc(npages, GFP_NOFS);
 }
 
-void fuse_request_free(struct fuse_req *req)
+static void fuse_req_pages_free(struct fuse_req *req)
 {
        if (req->pages != req->inline_pages)
                kfree(req->pages);
+}
+
+bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
+                           gfp_t flags)
+{
+       struct page **pages;
+       struct fuse_page_desc *page_descs;
+       unsigned int npages = min_t(unsigned int,
+                                   max_t(unsigned int, req->max_pages * 2,
+                                         FUSE_DEFAULT_MAX_PAGES_PER_REQ),
+                                   fc->max_pages);
+       WARN_ON(npages <= req->max_pages);
 
+       pages = fuse_req_pages_alloc(npages, flags, &page_descs);
+       if (!pages)
+               return false;
+
+       memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages);
+       memcpy(page_descs, req->page_descs,
+              sizeof(struct fuse_page_desc) * req->max_pages);
+       fuse_req_pages_free(req);
+       req->pages = pages;
+       req->page_descs = page_descs;
+       req->max_pages = npages;
+
+       return true;
+}
+
+void fuse_request_free(struct fuse_req *req)
+{
+       fuse_req_pages_free(req);
        kmem_cache_free(fuse_req_cachep, req);
 }
 
index 035843b501fe5b19bdd682ae6950d3bb30bc7144..f5507198ea00e0eda40c26ff4adfbabcd689a16a 100644 (file)
@@ -1827,7 +1827,13 @@ static int fuse_writepages_fill(struct page *page,
             data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
                fuse_writepages_send(data);
                data->req = NULL;
+       } else if (req && req->num_pages == req->max_pages) {
+               if (!fuse_req_realloc_pages(fc, req, GFP_NOFS)) {
+                       fuse_writepages_send(data);
+                       req = data->req = NULL;
+               }
        }
+
        err = -ENOMEM;
        tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
        if (!tmp_page)
@@ -1850,7 +1856,7 @@ static int fuse_writepages_fill(struct page *page,
                struct fuse_inode *fi = get_fuse_inode(inode);
 
                err = -ENOMEM;
-               req = fuse_request_alloc_nofs(fc->max_pages);
+               req = fuse_request_alloc_nofs(FUSE_REQ_INLINE_PAGES);
                if (!req) {
                        __free_page(tmp_page);
                        goto out_unlock;
index 3d578745c852497eca2846910b975fa2e9acce22..b7d96e7b5e0f51114161a0868e2b25ae70e67a0f 100644 (file)
@@ -879,6 +879,10 @@ struct fuse_req *fuse_request_alloc(unsigned npages);
 
 struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
 
+bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
+                           gfp_t flags);
+
+
 /**
  * Free a request
  */