x86: convert CPA users to the new set_page_ API
[sfrench/cifs-2.6.git] / arch / x86 / mm / init_64.c
index 0f9c8c890658043773018ad32e8786953dae8c79..05bb12db0b09cb065f5223bc18923c0f8b761249 100644 (file)
@@ -43,6 +43,8 @@
 #include <asm/proto.h>
 #include <asm/smp.h>
 #include <asm/sections.h>
+#include <asm/kdebug.h>
+#include <asm/numa.h>
 
 #ifndef Dprintk
 #define Dprintk(x...)
@@ -174,7 +176,8 @@ __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
        set_pte_phys(address, phys, prot);
 }
 
-unsigned long __meminitdata table_start, table_end;
+static unsigned long __initdata table_start;
+static unsigned long __meminitdata table_end;
 
 static __meminit void *alloc_low_page(unsigned long *phys)
 { 
@@ -224,8 +227,8 @@ __meminit void *early_ioremap(unsigned long addr, unsigned long size)
                vaddr += addr & ~PMD_MASK;
                addr &= PMD_MASK;
                for (i = 0; i < pmds; i++, addr += PMD_SIZE)
-                       set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
-               __flush_tlb();
+                       set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
+               __flush_tlb_all();
                return (void *)vaddr;
        next:
                ;
@@ -246,7 +249,7 @@ __meminit void early_iounmap(void *addr, unsigned long size)
        pmd = level2_kernel_pgt + pmd_index(vaddr);
        for (i = 0; i < pmds; i++)
                pmd_clear(pmd + i);
-       __flush_tlb();
+       __flush_tlb_all();
 }
 
 static void __meminit
@@ -268,7 +271,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
                if (pmd_val(*pmd))
                        continue;
 
-               entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
+               entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address;
                entry &= __supported_pte_mask;
                set_pmd(pmd, __pmd(entry));
        }
@@ -314,7 +317,7 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
                spin_unlock(&init_mm.page_table_lock);
                unmap_low_page(pmd);
        }
-       __flush_tlb();
+       __flush_tlb_all();
 } 
 
 static void __init find_early_table_space(unsigned long end)
@@ -385,6 +388,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
        if (!after_bootmem)
                mmu_cr4_features = read_cr4();
        __flush_tlb_all();
+
+       reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
 }
 
 #ifndef CONFIG_NUMA
@@ -484,34 +489,6 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
-/*
- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
- * just online the pages.
- */
-int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
-{
-       int err = -EIO;
-       unsigned long pfn;
-       unsigned long total = 0, mem = 0;
-       for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
-               if (pfn_valid(pfn)) {
-                       online_page(pfn_to_page(pfn));
-                       err = 0;
-                       mem++;
-               }
-               total++;
-       }
-       if (!err) {
-               z->spanned_pages += total;
-               z->present_pages += mem;
-               z->zone_pgdat->node_spanned_pages += total;
-               z->zone_pgdat->node_present_pages += mem;
-       }
-       return err;
-}
-#endif
-
 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
                         kcore_vsyscall;
 
@@ -521,8 +498,15 @@ void __init mem_init(void)
 
        pci_iommu_alloc();
 
-       /* clear the zero-page */
-       memset(empty_zero_page, 0, PAGE_SIZE);
+       /* clear_bss() already clear the empty_zero_page */
+
+       /* temporary debugging - double check it's true: */
+       {
+               int i;
+
+               for (i = 0; i < 1024; i++)
+                       WARN_ON_ONCE(empty_zero_page[i]);
+       }
 
        reservedpages = 0;
 
@@ -572,8 +556,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
                init_page_count(virt_to_page(addr));
                memset((void *)(addr & ~(PAGE_SIZE-1)),
                        POISON_FREE_INITMEM, PAGE_SIZE);
-               if (addr >= __START_KERNEL_map)
-                       change_page_attr_addr(addr, 1, __pgprot(0));
                free_page(addr);
                totalram_pages++;
        }
@@ -610,18 +592,28 @@ void mark_rodata_ro(void)
        if (end <= start)
                return;
 
-       change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
+       set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
 
        /*
-        * change_page_attr_addr() requires a global_flush_tlb() call after it.
+        * set_memory_*() requires a global_flush_tlb() call after it.
         * We do this after the printk so that if something went wrong in the
         * change, the printk gets out at least to give a better debug hint
         * of who is the culprit.
         */
        global_flush_tlb();
+
+#ifdef CONFIG_CPA_DEBUG
+       printk("Testing CPA: undo %lx-%lx\n", start, end);
+       set_memory_rw(start, (end-start) >> PAGE_SHIFT);
+       global_flush_tlb();
+
+       printk("Testing CPA: again\n");
+       set_memory_ro(start, (end-start) >> PAGE_SHIFT);
+       global_flush_tlb();
+#endif
 }
 #endif
 
@@ -768,8 +760,7 @@ int __meminit vmemmap_populate(struct page *start_page,
                        if (!p)
                                return -ENOMEM;
 
-                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
-                       mk_pte_huge(entry);
+                       entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE);
                        set_pmd(pmd, __pmd(pte_val(entry)));
 
                        printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",