mm: return zero_resv_unavail optimization
authorPavel Tatashin <pavel.tatashin@microsoft.com>
Fri, 26 Oct 2018 22:10:21 +0000 (15:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2018 23:38:15 +0000 (16:38 -0700)
When checking for valid pfns in zero_resv_unavail(), it is not necessary
to verify that pfns within pageblock_nr_pages ranges are valid, only the
first one needs to be checked.  This is because memory for pages are
allocated in contiguous chunks that contain pageblock_nr_pages struct
pages.

Link: http://lkml.kernel.org/r/20181002143821.5112-3-msys.mizuma@gmail.com
Signed-off-by: Pavel Tatashin <pavel.tatashin@microsoft.com>
Signed-off-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index 6d863c5afa086bd455f844d01389a0dbfd4848bd..863d46da6586d9b988aa5c004a46f85ca389dfe4 100644 (file)
@@ -6509,6 +6509,29 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
 }
 
 #if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
+
+/*
+ * Zero all valid struct pages in range [spfn, epfn), return number of struct
+ * pages zeroed
+ */
+static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
+{
+       unsigned long pfn;
+       u64 pgcnt = 0;
+
+       for (pfn = spfn; pfn < epfn; pfn++) {
+               if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
+                       pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
+                               + pageblock_nr_pages - 1;
+                       continue;
+               }
+               mm_zero_struct_page(pfn_to_page(pfn));
+               pgcnt++;
+       }
+
+       return pgcnt;
+}
+
 /*
  * Only struct pages that are backed by physical memory are zeroed and
  * initialized by going through __init_single_page(). But, there are some
@@ -6524,7 +6547,6 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
 void __init zero_resv_unavail(void)
 {
        phys_addr_t start, end;
-       unsigned long pfn;
        u64 i, pgcnt;
        phys_addr_t next = 0;
 
@@ -6534,34 +6556,18 @@ void __init zero_resv_unavail(void)
        pgcnt = 0;
        for_each_mem_range(i, &memblock.memory, NULL,
                        NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
-               if (next < start) {
-                       for (pfn = PFN_DOWN(next); pfn < PFN_UP(start); pfn++) {
-                               if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
-                                       continue;
-                               mm_zero_struct_page(pfn_to_page(pfn));
-                               pgcnt++;
-                       }
-               }
+               if (next < start)
+                       pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start));
                next = end;
        }
-       for (pfn = PFN_DOWN(next); pfn < max_pfn; pfn++) {
-               if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
-                       continue;
-               mm_zero_struct_page(pfn_to_page(pfn));
-               pgcnt++;
-       }
-
+       pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn);
 
        /*
         * Struct pages that do not have backing memory. This could be because
         * firmware is using some of this memory, or for some other reasons.
-        * Once memblock is changed so such behaviour is not allowed: i.e.
-        * list of "reserved" memory must be a subset of list of "memory", then
-        * this code can be removed.
         */
        if (pgcnt)
                pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
-
 }
 #endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */