Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / mm / page_alloc.c
index 6991ccec9c322ffb843110bb69cf2326d64b266c..15c2050c629b1d8aacb2f36aac7ac09c54c95449 100644 (file)
@@ -670,6 +670,7 @@ out:
 
 void free_compound_page(struct page *page)
 {
+       mem_cgroup_uncharge(page);
        __free_pages_ok(page, compound_order(page));
 }
 
@@ -3954,15 +3955,23 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
        if (compaction_failed(compact_result))
                goto check_priority;
 
+       /*
+        * compaction was skipped because there are not enough order-0 pages
+        * to work with, so we retry only if it looks like reclaim can help.
+        */
+       if (compaction_needs_reclaim(compact_result)) {
+               ret = compaction_zonelist_suitable(ac, order, alloc_flags);
+               goto out;
+       }
+
        /*
         * make sure the compaction wasn't deferred or didn't bail out early
         * due to locks contention before we declare that we should give up.
-        * But do not retry if the given zonelist is not suitable for
-        * compaction.
+        * But the next retry should use a higher priority if allowed, so
+        * we don't just keep bailing out endlessly.
         */
        if (compaction_withdrawn(compact_result)) {
-               ret = compaction_zonelist_suitable(ac, order, alloc_flags);
-               goto out;
+               goto check_priority;
        }
 
        /*
@@ -4458,6 +4467,28 @@ retry_cpuset:
                if (page)
                        goto got_pg;
 
+                if (order >= pageblock_order && (gfp_mask & __GFP_IO)) {
+                       /*
+                        * If allocating entire pageblock(s) and compaction
+                        * failed because all zones are below low watermarks
+                        * or is prohibited because it recently failed at this
+                        * order, fail immediately.
+                        *
+                        * Reclaim is
+                        *  - potentially very expensive because zones are far
+                        *    below their low watermarks or this is part of very
+                        *    bursty high order allocations,
+                        *  - not guaranteed to help because isolate_freepages()
+                        *    may not iterate over freed pages as part of its
+                        *    linear scan, and
+                        *  - unlikely to make entire pageblocks free on its
+                        *    own.
+                        */
+                       if (compact_result == COMPACT_SKIPPED ||
+                           compact_result == COMPACT_DEFERRED)
+                               goto nopage;
+               }
+
                /*
                 * Checks for costly allocations with __GFP_NORETRY, which
                 * includes THP page fault allocations
@@ -5971,7 +6002,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
                }
        }
 
-       pr_info("%s initialised, %lu pages in %ums\n", dev_name(pgmap->dev),
+       pr_info("%s initialised %lu pages in %ums\n", __func__,
                size, jiffies_to_msecs(jiffies - start));
 }
 
@@ -6638,9 +6669,11 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void pgdat_init_split_queue(struct pglist_data *pgdat)
 {
-       spin_lock_init(&pgdat->split_queue_lock);
-       INIT_LIST_HEAD(&pgdat->split_queue);
-       pgdat->split_queue_len = 0;
+       struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
+
+       spin_lock_init(&ds_queue->split_queue_lock);
+       INIT_LIST_HEAD(&ds_queue->split_queue);
+       ds_queue->split_queue_len = 0;
 }
 #else
 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
@@ -8196,7 +8229,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                        if (!hugepage_migration_supported(page_hstate(head)))
                                goto unmovable;
 
-                       skip_pages = (1 << compound_order(head)) - (page - head);
+                       skip_pages = compound_nr(head) - (page - head);
                        iter += skip_pages - 1;
                        continue;
                }