slub: indicate slab_fix() uses printf formats
[sfrench/cifs-2.6.git] / mm / page_alloc.c
index d1f5de1c1283b08075b3bb5177ea5b1214a58c0f..2bf03c76504b0697e8f3f1d7626a6fd879a9abb2 100644 (file)
@@ -5053,9 +5053,13 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
         * Skip populated array elements to determine if any pages need
         * to be allocated before disabling IRQs.
         */
-       while (page_array && page_array[nr_populated] && nr_populated < nr_pages)
+       while (page_array && nr_populated < nr_pages && page_array[nr_populated])
                nr_populated++;
 
+       /* Already populated array? */
+       if (unlikely(page_array && nr_pages - nr_populated == 0))
+               return nr_populated;
+
        /* Use the single page allocator for one page. */
        if (nr_pages - nr_populated == 1)
                goto failed;
@@ -6396,7 +6400,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
                return;
 
        /*
-        * The call to memmap_init_zone should have already taken care
+        * The call to memmap_init should have already taken care
         * of the pages reserved for the memmap, so we can just jump to
         * the end of that region and start processing the device pages.
         */
@@ -6461,7 +6465,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
 /*
  * Only struct pages that correspond to ranges defined by memblock.memory
  * are zeroed and initialized by going through __init_single_page() during
- * memmap_init_zone().
+ * memmap_init_zone_range().
  *
  * But, there could be struct pages that correspond to holes in
  * memblock.memory. This can happen because of the following reasons:
@@ -6480,9 +6484,9 @@ static void __meminit zone_init_free_lists(struct zone *zone)
  *   zone/node above the hole except for the trailing pages in the last
  *   section that will be appended to the zone/node below.
  */
-static u64 __meminit init_unavailable_range(unsigned long spfn,
-                                           unsigned long epfn,
-                                           int zone, int node)
+static void __init init_unavailable_range(unsigned long spfn,
+                                         unsigned long epfn,
+                                         int zone, int node)
 {
        unsigned long pfn;
        u64 pgcnt = 0;
@@ -6498,56 +6502,77 @@ static u64 __meminit init_unavailable_range(unsigned long spfn,
                pgcnt++;
        }
 
-       return pgcnt;
+       if (pgcnt)
+               pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
+                       node, zone_names[zone], pgcnt);
 }
 #else
-static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
-                                        int zone, int node)
+static inline void init_unavailable_range(unsigned long spfn,
+                                         unsigned long epfn,
+                                         int zone, int node)
 {
-       return 0;
 }
 #endif
 
-void __meminit __weak memmap_init_zone(struct zone *zone)
+static void __init memmap_init_zone_range(struct zone *zone,
+                                         unsigned long start_pfn,
+                                         unsigned long end_pfn,
+                                         unsigned long *hole_pfn)
 {
        unsigned long zone_start_pfn = zone->zone_start_pfn;
        unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
-       int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone);
-       static unsigned long hole_pfn;
+       int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
+
+       start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
+       end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
+
+       if (start_pfn >= end_pfn)
+               return;
+
+       memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
+                         zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
+
+       if (*hole_pfn < start_pfn)
+               init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
+
+       *hole_pfn = end_pfn;
+}
+
+static void __init memmap_init(void)
+{
        unsigned long start_pfn, end_pfn;
-       u64 pgcnt = 0;
+       unsigned long hole_pfn = 0;
+       int i, j, zone_id, nid;
 
-       for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
-               start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
-               end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
+       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+               struct pglist_data *node = NODE_DATA(nid);
+
+               for (j = 0; j < MAX_NR_ZONES; j++) {
+                       struct zone *zone = node->node_zones + j;
 
-               if (end_pfn > start_pfn)
-                       memmap_init_range(end_pfn - start_pfn, nid,
-                                       zone_id, start_pfn, zone_end_pfn,
-                                       MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
+                       if (!populated_zone(zone))
+                               continue;
 
-               if (hole_pfn < start_pfn)
-                       pgcnt += init_unavailable_range(hole_pfn, start_pfn,
-                                                       zone_id, nid);
-               hole_pfn = end_pfn;
+                       memmap_init_zone_range(zone, start_pfn, end_pfn,
+                                              &hole_pfn);
+                       zone_id = j;
+               }
        }
 
 #ifdef CONFIG_SPARSEMEM
        /*
-        * Initialize the hole in the range [zone_end_pfn, section_end].
-        * If zone boundary falls in the middle of a section, this hole
-        * will be re-initialized during the call to this function for the
-        * higher zone.
+        * Initialize the memory map for hole in the range [memory_end,
+        * section_end].
+        * Append the pages in this hole to the highest zone in the last
+        * node.
+        * The call to init_unavailable_range() is outside the ifdef to
+        * silence the compiler warining about zone_id set but not used;
+        * for FLATMEM it is a nop anyway
         */
-       end_pfn = round_up(zone_end_pfn, PAGES_PER_SECTION);
+       end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
        if (hole_pfn < end_pfn)
-               pgcnt += init_unavailable_range(hole_pfn, end_pfn,
-                                               zone_id, nid);
 #endif
-
-       if (pgcnt)
-               pr_info("  %s zone: %llu pages in unavailable ranges\n",
-                       zone->name, pgcnt);
+               init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
 }
 
 static int zone_batchsize(struct zone *zone)
@@ -7250,7 +7275,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat)
                set_pageblock_order();
                setup_usemap(zone);
                init_currently_empty_zone(zone, zone->zone_start_pfn, size);
-               memmap_init_zone(zone);
        }
 }
 
@@ -7776,6 +7800,8 @@ void __init free_area_init(unsigned long *max_zone_pfn)
                        node_set_state(nid, N_MEMORY);
                check_for_memory(pgdat, nid);
        }
+
+       memmap_init();
 }
 
 static int __init cmdline_parse_core(char *p, unsigned long *core,