unsigned long start_pfn, end_pfn;
int i, this_nid;
-#ifdef CONFIG_SPARSEMEM_EXTREME
- if (!mem_section) {
- unsigned long size, align;
-
- size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
- align = 1 << (INTERNODE_CACHE_SHIFT);
- mem_section = memblock_virt_alloc(size, align);
- }
-#endif
-
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
memory_present(this_nid, start_pfn, end_pfn);
}
* race condition. So you can't expect this function should be exact.
*/
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
+ int migratetype,
bool skip_hwpoisoned_pages)
{
unsigned long pfn, iter, found;
- int mt;
/*
* For avoiding noise data, lru_add_drain_all() should be called
*/
if (zone_idx(zone) == ZONE_MOVABLE)
return false;
- mt = get_pageblock_migratetype(page);
- if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
+
+ /*
+ * CMA allocations (alloc_contig_range) really need to mark isolate
+ * CMA pageblocks even when they are not movable in fact so consider
+ * them movable here.
+ */
+ if (is_migrate_cma(migratetype) &&
+ is_migrate_cma(get_pageblock_migratetype(page)))
return false;
pfn = page_to_pfn(page);
page = pfn_to_page(check);
+ if (PageReserved(page))
+ return true;
+
/*
* Hugepages are not in LRU lists, but they're movable.
* We need not scan over tail pages bacause we don't
if (!zone_spans_pfn(zone, pfn))
return false;
- return !has_unmovable_pages(zone, page, 0, true);
+ return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
}
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)