mm, sparse: do not swamp log with huge vmemmap allocation failures
[sfrench/cifs-2.6.git] / mm / sparse-vmemmap.c
index 478ce6d4a2c4e77141967e82bacaa7b3661e4d34..17acf01791fa832e1c8414cecc98034f2d652662 100644 (file)
@@ -42,7 +42,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
                                unsigned long align,
                                unsigned long goal)
 {
-       return memblock_virt_alloc_try_nid(size, align, goal,
+       return memblock_virt_alloc_try_nid_raw(size, align, goal,
                                            BOOTMEM_ALLOC_ACCESSIBLE, node);
 }
 
@@ -53,13 +53,20 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 {
        /* If the main allocator is up use that, fallback to bootmem. */
        if (slab_is_available()) {
+               gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
+               int order = get_order(size);
+               static bool warned;
                struct page *page;
 
-               page = alloc_pages_node(node,
-                       GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
-                       get_order(size));
+               page = alloc_pages_node(node, gfp_mask, order);
                if (page)
                        return page_address(page);
+
+               if (!warned) {
+                       warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
+                                  "vmemmap alloc failure: order:%u", order);
+                       warned = true;
+               }
                return NULL;
        } else
                return __earlyonly_bootmem_alloc(node, size, size,
@@ -180,11 +187,22 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
        return pte;
 }
 
+static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
+{
+       void *p = vmemmap_alloc_block(size, node);
+
+       if (!p)
+               return NULL;
+       memset(p, 0, size);
+
+       return p;
+}
+
 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
 {
        pmd_t *pmd = pmd_offset(pud, addr);
        if (pmd_none(*pmd)) {
-               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
                if (!p)
                        return NULL;
                pmd_populate_kernel(&init_mm, pmd, p);
@@ -196,7 +214,7 @@ pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
 {
        pud_t *pud = pud_offset(p4d, addr);
        if (pud_none(*pud)) {
-               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
                if (!p)
                        return NULL;
                pud_populate(&init_mm, pud, p);
@@ -208,7 +226,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
 {
        p4d_t *p4d = p4d_offset(pgd, addr);
        if (p4d_none(*p4d)) {
-               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
                if (!p)
                        return NULL;
                p4d_populate(&init_mm, p4d, p);
@@ -220,7 +238,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
 {
        pgd_t *pgd = pgd_offset_k(addr);
        if (pgd_none(*pgd)) {
-               void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+               void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
                if (!p)
                        return NULL;
                pgd_populate(&init_mm, pgd, p);