mm/page_isolation.c: convert SKIP_HWPOISON to MEMORY_OFFLINE
authorDavid Hildenbrand <david@redhat.com>
Sun, 1 Dec 2019 01:54:07 +0000 (17:54 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 1 Dec 2019 20:59:04 +0000 (12:59 -0800)
We have two types of users of page isolation:

 1. Memory offlining:  Offline memory so it can be unplugged. Memory
                       won't be touched.

 2. Memory allocation: Allocate memory (e.g., alloc_contig_range()) to
                       become the owner of the memory and make use of
                       it.

For example, in case we want to offline memory, we can ignore (skip
over) PageHWPoison() pages, as the memory won't get used.  We can allow
to offline memory.  In contrast, we don't want to allow to allocate such
memory.

Let's generalize the approach so we can special case other types of
pages we want to skip over in case we offline memory.  While at it, also
pass the same flags to test_pages_isolated().

Link: http://lkml.kernel.org/r/20191021172353.3056-3-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Pingfan Liu <kernelfans@gmail.com>
Cc: Qian Cai <cai@lca.pw>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Pavel Tatashin <pavel.tatashin@microsoft.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/page-isolation.h
mm/memory_hotplug.c
mm/page_alloc.c
mm/page_isolation.c

index 1099c2fee20f6c8abc1087e327142abc41ccc831..6861df759fad577efcd63edf110137c692f7a2cb 100644 (file)
@@ -30,7 +30,7 @@ static inline bool is_migrate_isolate(int migratetype)
 }
 #endif
 
-#define SKIP_HWPOISON  0x1
+#define MEMORY_OFFLINE 0x1
 #define REPORT_FAILURE 0x2
 
 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
@@ -58,7 +58,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  * Test all pages in [start_pfn, end_pfn) are isolated or not.
  */
 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
-                       bool skip_hwpoisoned_pages);
+                       int isol_flags);
 
 struct page *alloc_migrate_target(struct page *page, unsigned long private);
 
index 929d4209e78ba57cbb5ed4d3bb44851af2b0c1d1..84ab3298cce9bd6c352f91235c5e4c748baef9da 100644 (file)
@@ -1187,7 +1187,8 @@ static bool is_pageblock_removable_nolock(unsigned long pfn)
        if (!zone_spans_pfn(zone, pfn))
                return false;
 
-       return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON);
+       return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE,
+                                   MEMORY_OFFLINE);
 }
 
 /* Checks if this range of memory is likely to be hot-removable. */
@@ -1402,7 +1403,8 @@ static int
 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
                        void *data)
 {
-       return test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
+       return test_pages_isolated(start_pfn, start_pfn + nr_pages,
+                                  MEMORY_OFFLINE);
 }
 
 static int __init cmdline_parse_movable_node(char *p)
@@ -1513,7 +1515,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        /* set above range as isolated */
        ret = start_isolate_page_range(start_pfn, end_pfn,
                                       MIGRATE_MOVABLE,
-                                      SKIP_HWPOISON | REPORT_FAILURE);
+                                      MEMORY_OFFLINE | REPORT_FAILURE);
        if (ret < 0) {
                reason = "failure to isolate range";
                goto failed_removal;
index 293c8c14541571884ebe07c2183429918c070b71..c289b02aaa3b1f5093ef294c44ca59ede78b3809 100644 (file)
@@ -8261,7 +8261,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                 * The HWPoisoned page may be not in buddy system, and
                 * page_count() is not 0.
                 */
-               if ((flags & SKIP_HWPOISON) && PageHWPoison(page))
+               if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
                        continue;
 
                if (__PageMovable(page))
@@ -8477,7 +8477,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
        }
 
        /* Make sure the range is really isolated. */
-       if (test_pages_isolated(outer_start, end, false)) {
+       if (test_pages_isolated(outer_start, end, 0)) {
                pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
                        __func__, outer_start, end);
                ret = -EBUSY;
index 89c19c0feadb95374a9724af6d98f962c1b8768b..04ee1663cdbe83732640d0b2692333c3256b4d2e 100644 (file)
@@ -168,7 +168,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  * @migratetype:       Migrate type to set in error recovery.
  * @flags:             The following flags are allowed (they can be combined in
  *                     a bit mask)
- *                     SKIP_HWPOISON - ignore hwpoison pages
+ *                     MEMORY_OFFLINE - isolate to offline (!allocate) memory
+ *                                      e.g., skip over PageHWPoison() pages
  *                     REPORT_FAILURE - report details about the failure to
  *                     isolate the range
  *
@@ -257,7 +258,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
  */
 static unsigned long
 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
-                                 bool skip_hwpoisoned_pages)
+                                 int flags)
 {
        struct page *page;
 
@@ -274,7 +275,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
                         * simple way to verify that as VM_BUG_ON(), though.
                         */
                        pfn += 1 << page_order(page);
-               else if (skip_hwpoisoned_pages && PageHWPoison(page))
+               else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
                        /* A HWPoisoned page cannot be also PageBuddy */
                        pfn++;
                else
@@ -286,7 +287,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
 
 /* Caller should ensure that requested range is in a single zone */
 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
-                       bool skip_hwpoisoned_pages)
+                       int isol_flags)
 {
        unsigned long pfn, flags;
        struct page *page;
@@ -308,8 +309,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
        /* Check all pages are free or marked as ISOLATED */
        zone = page_zone(page);
        spin_lock_irqsave(&zone->lock, flags);
-       pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
-                                               skip_hwpoisoned_pages);
+       pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
        spin_unlock_irqrestore(&zone->lock, flags);
 
        trace_test_pages_isolated(start_pfn, end_pfn, pfn);