mm, page_alloc: move_freepages should not examine struct page of reserved memory
[sfrench/cifs-2.6.git] / mm / page_alloc.c
index e515bfcf7f288fbea352cbafc7daf4f0f2320ca3..9c9194959271cfc0d9214bf60bb09b96c5b1a96a 100644 (file)
@@ -450,7 +450,7 @@ static inline unsigned long *get_pageblock_bitmap(struct page *page,
                                                        unsigned long pfn)
 {
 #ifdef CONFIG_SPARSEMEM
-       return __pfn_to_section(pfn)->pageblock_flags;
+       return section_to_usemap(__pfn_to_section(pfn));
 #else
        return page_zone(page)->pageblock_flags;
 #endif /* CONFIG_SPARSEMEM */
@@ -2238,27 +2238,12 @@ static int move_freepages(struct zone *zone,
        unsigned int order;
        int pages_moved = 0;
 
-#ifndef CONFIG_HOLES_IN_ZONE
-       /*
-        * page_zone is not safe to call in this context when
-        * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
-        * anyway as we check zone boundaries in move_freepages_block().
-        * Remove at a later date when no bug reports exist related to
-        * grouping pages by mobility
-        */
-       VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
-                 pfn_valid(page_to_pfn(end_page)) &&
-                 page_zone(start_page) != page_zone(end_page));
-#endif
        for (page = start_page; page <= end_page;) {
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
                        continue;
                }
 
-               /* Make sure we are not inadvertently changing nodes */
-               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
                if (!PageBuddy(page)) {
                        /*
                         * We assume that pages that could be isolated for
@@ -2273,6 +2258,10 @@ static int move_freepages(struct zone *zone,
                        continue;
                }
 
+               /* Make sure we are not inadvertently changing nodes */
+               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+               VM_BUG_ON_PAGE(page_zone(page) != zone, page);
+
                order = page_order(page);
                move_to_free_area(page, &zone->free_area[order], migratetype);
                page += 1 << order;
@@ -5926,7 +5915,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
        unsigned long start = jiffies;
        int nid = pgdat->node_id;
 
-       if (WARN_ON_ONCE(!pgmap || !is_dev_zone(zone)))
+       if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
                return;
 
        /*
@@ -5974,7 +5963,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
                 * pfn out of zone.
                 *
                 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
-                * because this is done early in sparse_add_one_section
+                * because this is done early in section_activate()
                 */
                if (!(pfn & (pageblock_nr_pages - 1))) {
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
@@ -7351,12 +7340,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                               (u64)zone_movable_pfn[i] << PAGE_SHIFT);
        }
 
-       /* Print out the early node map */
+       /*
+        * Print out the early node map, and initialize the
+        * subsection-map relative to active online memory ranges to
+        * enable future "sub-section" extensions of the memory map.
+        */
        pr_info("Early memory node ranges\n");
-       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
+       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
                pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
                        (u64)start_pfn << PAGE_SHIFT,
                        ((u64)end_pfn << PAGE_SHIFT) - 1);
+               subsection_map_init(start_pfn, end_pfn - start_pfn);
+       }
 
        /* Initialise every node */
        mminit_verify_pageflags_layout();