x86: fix memtest print out
[sfrench/cifs-2.6.git] / arch / x86 / mm / init_64.c
index 5fe880fc305d03c90131948bab8c4cd16096e9f9..210243e94d841122a82143ff45cc7b8294ac71a7 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/sections.h>
 #include <asm/kdebug.h>
 #include <asm/numa.h>
+#include <asm/cacheflush.h>
 
 const struct dma_mapping_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
@@ -53,6 +54,26 @@ static unsigned long dma_reserve __initdata;
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
+int direct_gbpages __meminitdata
+#ifdef CONFIG_DIRECT_GBPAGES
+                               = 1
+#endif
+;
+
+static int __init parse_direct_gbpages_off(char *arg)
+{
+       direct_gbpages = 0;
+       return 0;
+}
+early_param("nogbpages", parse_direct_gbpages_off);
+
+static int __init parse_direct_gbpages_on(char *arg)
+{
+       direct_gbpages = 1;
+       return 0;
+}
+early_param("gbpages", parse_direct_gbpages_on);
+
 /*
  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  * physical space so we can cache the place of the first one and move
@@ -68,9 +89,6 @@ void show_mem(void)
 
        printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
-       printk(KERN_INFO "Free swap:       %6ldkB\n",
-               nr_swap_pages << (PAGE_SHIFT-10));
-
        for_each_online_pgdat(pgdat) {
                for (i = 0; i < pgdat->node_spanned_pages; ++i) {
                        /*
@@ -170,6 +188,34 @@ set_pte_phys(unsigned long vaddr, unsigned long phys, pgprot_t prot)
        __flush_tlb_one(vaddr);
 }
 
+/*
+ * The head.S code sets up the kernel high mapping:
+ *
+ *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
+ *
+ * phys_addr holds the negative offset to the kernel, which is added
+ * to the compile time generated pmds. This results in invalid pmds up
+ * to the point where we hit the physaddr 0 mapping.
+ *
+ * We limit the mappings to the region from _text to _end.  _end is
+ * rounded up to the 2MB boundary. This catches the invalid pmds as
+ * well, as they are located before _text:
+ */
+void __init cleanup_highmap(void)
+{
+       unsigned long vaddr = __START_KERNEL_map;
+       unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1;
+       pmd_t *pmd = level2_kernel_pgt;
+       pmd_t *last_pmd = pmd + PTRS_PER_PMD;
+
+       for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
+               if (!pmd_present(*pmd))
+                       continue;
+               if (vaddr < (unsigned long) _text || vaddr > end)
+                       set_pmd(pmd, __pmd(0));
+       }
+}
+
 /* NOTE: this is meant to be run only at boot */
 void __init
 __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
@@ -321,7 +367,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
                }
 
                if (pud_val(*pud)) {
-                       phys_pmd_update(pud, addr, end);
+                       if (!pud_large(*pud))
+                               phys_pmd_update(pud, addr, end);
+                       continue;
+               }
+
+               if (direct_gbpages) {
+                       set_pte((pte_t *)pud,
+                               pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
                        continue;
                }
 
@@ -342,9 +395,11 @@ static void __init find_early_table_space(unsigned long end)
        unsigned long puds, pmds, tables, start;
 
        puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-       pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-       tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
-                round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+       tables = round_up(puds * sizeof(pud_t), PAGE_SIZE);
+       if (!direct_gbpages) {
+               pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+               tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
+       }
 
        /*
         * RED-PEN putting page tables only on node 0 could
@@ -364,6 +419,124 @@ static void __init find_early_table_space(unsigned long end)
                (table_start << PAGE_SHIFT) + tables);
 }
 
+static void __init init_gbpages(void)
+{
+       if (direct_gbpages && cpu_has_gbpages)
+               printk(KERN_INFO "Using GB pages for direct mapping\n");
+       else
+               direct_gbpages = 0;
+}
+
+#ifdef CONFIG_MEMTEST_BOOTPARAM
+
+static void __init memtest(unsigned long start_phys, unsigned long size,
+                                unsigned pattern)
+{
+       unsigned long i;
+       unsigned long *start;
+       unsigned long start_bad;
+       unsigned long last_bad;
+       unsigned long val;
+       unsigned long start_phys_aligned;
+       unsigned long count;
+       unsigned long incr;
+
+       switch (pattern) {
+       case 0:
+               val = 0UL;
+               break;
+       case 1:
+               val = -1UL;
+               break;
+       case 2:
+               val = 0x5555555555555555UL;
+               break;
+       case 3:
+               val = 0xaaaaaaaaaaaaaaaaUL;
+               break;
+       default:
+               return;
+       }
+
+       incr = sizeof(unsigned long);
+       start_phys_aligned = ALIGN(start_phys, incr);
+       count = (size - (start_phys_aligned - start_phys))/incr;
+       start = __va(start_phys_aligned);
+       start_bad = 0;
+       last_bad = 0;
+
+       for (i = 0; i < count; i++)
+               start[i] = val;
+       for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
+               if (*start != val) {
+                       if (start_phys_aligned == last_bad + incr) {
+                               last_bad += incr;
+                       } else {
+                               if (start_bad) {
+                                       printk(KERN_CONT "\n  %016lx bad mem addr %016lx - %016lx reserved",
+                                               val, start_bad, last_bad + incr);
+                                       reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
+                               }
+                               start_bad = last_bad = start_phys_aligned;
+                       }
+               }
+       }
+       if (start_bad) {
+               printk(KERN_CONT "\n  %016lx bad mem addr %016lx - %016lx reserved",
+                       val, start_bad, last_bad + incr);
+               reserve_early(start_bad, last_bad - start_bad, "BAD RAM");
+       }
+
+}
+
+static int memtest_pattern __initdata = CONFIG_MEMTEST_BOOTPARAM_VALUE;
+
+static int __init parse_memtest(char *arg)
+{
+       if (arg)
+               memtest_pattern = simple_strtoul(arg, NULL, 0);
+       return 0;
+}
+
+early_param("memtest", parse_memtest);
+
+static void __init early_memtest(unsigned long start, unsigned long end)
+{
+       unsigned long t_start, t_size;
+       unsigned pattern;
+
+       if (!memtest_pattern)
+               return;
+
+       printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
+       for (pattern = 0; pattern < memtest_pattern; pattern++) {
+               t_start = start;
+               t_size = 0;
+               while (t_start < end) {
+                       t_start = find_e820_area_size(t_start, &t_size, 1);
+
+                       /* done ? */
+                       if (t_start >= end)
+                               break;
+                       if (t_start + t_size > end)
+                               t_size = end - t_start;
+
+                       printk(KERN_CONT "\n  %016lx - %016lx pattern %d",
+                               t_start, t_start + t_size, pattern);
+
+                       memtest(t_start, t_size, pattern);
+
+                       t_start += t_size;
+               }
+       }
+       printk(KERN_CONT "\n");
+}
+#else
+static void __init early_memtest(unsigned long start, unsigned long end)
+{
+}
+#endif
+
 /*
  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
  * This runs before bootmem is initialized and gets pages directly from
@@ -372,8 +545,9 @@ static void __init find_early_table_space(unsigned long end)
 void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
 {
        unsigned long next;
+       unsigned long start_phys = start, end_phys = end;
 
-       pr_debug("init_memory_mapping\n");
+       printk(KERN_INFO "init_memory_mapping\n");
 
        /*
         * Find space for the kernel direct mapping tables.
@@ -382,8 +556,10 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
         * memory mapped. Unfortunately this is done currently before the
         * nodes are discovered.
         */
-       if (!after_bootmem)
+       if (!after_bootmem) {
+               init_gbpages();
                find_early_table_space(end);
+       }
 
        start = (unsigned long)__va(start);
        end = (unsigned long)__va(end);
@@ -414,6 +590,9 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
        if (!after_bootmem)
                reserve_early(table_start << PAGE_SHIFT,
                                 table_end << PAGE_SHIFT, "PGTABLE");
+
+       if (!after_bootmem)
+               early_memtest(start_phys, end_phys);
 }
 
 #ifndef CONFIG_NUMA
@@ -487,14 +666,6 @@ void __init mem_init(void)
 
        /* clear_bss() already clear the empty_zero_page */
 
-       /* temporary debugging - double check it's true: */
-       {
-               int i;
-
-               for (i = 0; i < 1024; i++)
-                       WARN_ON_ONCE(empty_zero_page[i]);
-       }
-
        reservedpages = 0;
 
        /* this will put all low memory onto the freelists */
@@ -528,13 +699,15 @@ void __init mem_init(void)
                reservedpages << (PAGE_SHIFT-10),
                datasize >> 10,
                initsize >> 10);
+
+       cpa_init();
 }
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
-       unsigned long addr;
+       unsigned long addr = begin;
 
-       if (begin >= end)
+       if (addr >= end)
                return;
 
        /*
@@ -549,7 +722,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
 #else
        printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
 
-       for (addr = begin; addr < end; addr += PAGE_SIZE) {
+       for (; addr < end; addr += PAGE_SIZE) {
                ClearPageReserved(virt_to_page(addr));
                init_page_count(virt_to_page(addr));
                memset((void *)(addr & ~(PAGE_SIZE-1)),
@@ -573,24 +746,7 @@ EXPORT_SYMBOL_GPL(rodata_test_data);
 
 void mark_rodata_ro(void)
 {
-       unsigned long start = (unsigned long)_stext, end;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       /* It must still be possible to apply SMP alternatives. */
-       if (num_possible_cpus() > 1)
-               start = (unsigned long)_etext;
-#endif
-
-#ifdef CONFIG_KPROBES
-       start = (unsigned long)__start_rodata;
-#endif
-
-       end = (unsigned long)__end_rodata;
-       start = (start + PAGE_SIZE - 1) & PAGE_MASK;
-       end &= PAGE_MASK;
-       if (end <= start)
-               return;
-
+       unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
 
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
@@ -613,6 +769,7 @@ void mark_rodata_ro(void)
        set_memory_ro(start, (end-start) >> PAGE_SHIFT);
 #endif
 }
+
 #endif
 
 #ifdef CONFIG_BLK_DEV_INITRD