Drain per-cpu lists when high-order allocations fail
authorMel Gorman <mel@csn.ul.ie>
Tue, 16 Oct 2007 08:25:50 +0000 (01:25 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 16 Oct 2007 16:42:59 +0000 (09:42 -0700)
Per-cpu pages can accidentally cause fragmentation because they are free, but
pinned pages in an otherwise contiguous block.  When this patch is applied,
the per-cpu caches are drained after the direct-reclaim is entered if the
requested order is greater than 0.  It simply reuses the code used by suspend
and hotplug.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index fea1e3b56c3d6626c5cf59bccd14f84ea37fbaf4..aa7e5d2f28a52693d3bfa023c306e498bbdbe925 100644 (file)
@@ -876,7 +876,9 @@ void mark_free_pages(struct zone *zone)
        }
        spin_unlock_irqrestore(&zone->lock, flags);
 }
        }
        spin_unlock_irqrestore(&zone->lock, flags);
 }
+#endif /* CONFIG_PM */
 
 
+#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
 /*
  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  */
 /*
  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  */
@@ -888,7 +890,28 @@ void drain_local_pages(void)
        __drain_pages(smp_processor_id());
        local_irq_restore(flags);       
 }
        __drain_pages(smp_processor_id());
        local_irq_restore(flags);       
 }
-#endif /* CONFIG_HIBERNATION */
+
+void smp_drain_local_pages(void *arg)
+{
+       drain_local_pages();
+}
+
+/*
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator
+ */
+void drain_all_local_pages(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __drain_pages(smp_processor_id());
+       local_irq_restore(flags);
+
+       smp_call_function(smp_drain_local_pages, NULL, 0, 1);
+}
+#else
+void drain_all_local_pages(void) {}
+#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
 
 /*
  * Free a 0-order page
 
 /*
  * Free a 0-order page
@@ -1480,6 +1503,9 @@ nofail_alloc:
 
        cond_resched();
 
 
        cond_resched();
 
+       if (order != 0)
+               drain_all_local_pages();
+
        if (likely(did_some_progress)) {
                page = get_page_from_freelist(gfp_mask, order,
                                                zonelist, alloc_flags);
        if (likely(did_some_progress)) {
                page = get_page_from_freelist(gfp_mask, order,
                                                zonelist, alloc_flags);