mm: support THPs in zero_user_segments
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 15 Dec 2020 03:12:59 +0000 (19:12 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Dec 2020 20:13:45 +0000 (12:13 -0800)
We can only kmap() one subpage of a THP at a time, so loop over all
relevant subpages, skipping ones which don't need to be zeroed.  This is
too large to inline when THPs are enabled and we actually need highmem, so
put it in highmem.c.

[willy@infradead.org: start1 was allowed to be less than start2]

Link: https://lkml.kernel.org/r/20201124041507.28996-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/highmem.h
mm/highmem.c

index 14e6202ce47f1395b61a45dc2d7455723dfdc0aa..8e21fe82b3a35cb1b1263f1a1e0ae0402f74e055 100644 (file)
@@ -284,13 +284,22 @@ static inline void clear_highpage(struct page *page)
        kunmap_atomic(kaddr);
 }
 
+/*
+ * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
+ * If we pass in a head page, we can zero up to the size of the compound page.
+ */
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+               unsigned start2, unsigned end2);
+#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
 static inline void zero_user_segments(struct page *page,
-       unsigned start1, unsigned end1,
-       unsigned start2, unsigned end2)
+               unsigned start1, unsigned end1,
+               unsigned start2, unsigned end2)
 {
        void *kaddr = kmap_atomic(page);
+       unsigned int i;
 
-       BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+       BUG_ON(end1 > page_size(page) || end2 > page_size(page));
 
        if (end1 > start1)
                memset(kaddr + start1, 0, end1 - start1);
@@ -299,8 +308,10 @@ static inline void zero_user_segments(struct page *page,
                memset(kaddr + start2, 0, end2 - start2);
 
        kunmap_atomic(kaddr);
-       flush_dcache_page(page);
+       for (i = 0; i < compound_nr(page); i++)
+               flush_dcache_page(page + i);
 }
+#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
 
 static inline void zero_user_segment(struct page *page,
        unsigned start, unsigned end)
index 1352a27951e3b7b021f92ccd6f97193ac1e425b0..0ee87a9e0cbf24879b208d3eb390a0105773ed78 100644 (file)
@@ -369,6 +369,58 @@ void kunmap_high(struct page *page)
 }
 
 EXPORT_SYMBOL(kunmap_high);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+               unsigned start2, unsigned end2)
+{
+       unsigned int i;
+
+       BUG_ON(end1 > page_size(page) || end2 > page_size(page));
+
+       for (i = 0; i < compound_nr(page); i++) {
+               void *kaddr = NULL;
+
+               if (start1 < PAGE_SIZE || start2 < PAGE_SIZE)
+                       kaddr = kmap_atomic(page + i);
+
+               if (start1 >= PAGE_SIZE) {
+                       start1 -= PAGE_SIZE;
+                       end1 -= PAGE_SIZE;
+               } else {
+                       unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
+
+                       if (end1 > start1)
+                               memset(kaddr + start1, 0, this_end - start1);
+                       end1 -= this_end;
+                       start1 = 0;
+               }
+
+               if (start2 >= PAGE_SIZE) {
+                       start2 -= PAGE_SIZE;
+                       end2 -= PAGE_SIZE;
+               } else {
+                       unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
+
+                       if (end2 > start2)
+                               memset(kaddr + start2, 0, this_end - start2);
+                       end2 -= this_end;
+                       start2 = 0;
+               }
+
+               if (kaddr) {
+                       kunmap_atomic(kaddr);
+                       flush_dcache_page(page + i);
+               }
+
+               if (!end1 && !end2)
+                       break;
+       }
+
+       BUG_ON((start1 | start2 | end1 | end2) != 0);
+}
+EXPORT_SYMBOL(zero_user_segments);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_HIGHMEM */
 
 #if defined(HASHED_PAGE_VIRTUAL)