x86: convert CPA users to the new set_page_ API
[sfrench/cifs-2.6.git] / arch / x86 / mm / init_64.c
index 15e05a004fcfd67f17cfcc7e78cec3416b92b8e5..05bb12db0b09cb065f5223bc18923c0f8b761249 100644 (file)
@@ -176,7 +176,8 @@ __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
        set_pte_phys(address, phys, prot);
 }
 
-unsigned long __meminitdata table_start, table_end;
+static unsigned long __initdata table_start;
+static unsigned long __meminitdata table_end;
 
 static __meminit void *alloc_low_page(unsigned long *phys)
 { 
@@ -227,7 +228,7 @@ __meminit void *early_ioremap(unsigned long addr, unsigned long size)
                addr &= PMD_MASK;
                for (i = 0; i < pmds; i++, addr += PMD_SIZE)
                        set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
-               __flush_tlb();
+               __flush_tlb_all();
                return (void *)vaddr;
        next:
                ;
@@ -248,7 +249,7 @@ __meminit void early_iounmap(void *addr, unsigned long size)
        pmd = level2_kernel_pgt + pmd_index(vaddr);
        for (i = 0; i < pmds; i++)
                pmd_clear(pmd + i);
-       __flush_tlb();
+       __flush_tlb_all();
 }
 
 static void __meminit
@@ -316,7 +317,7 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
                spin_unlock(&init_mm.page_table_lock);
                unmap_low_page(pmd);
        }
-       __flush_tlb();
+       __flush_tlb_all();
 } 
 
 static void __init find_early_table_space(unsigned long end)
@@ -387,6 +388,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
        if (!after_bootmem)
                mmu_cr4_features = read_cr4();
        __flush_tlb_all();
+
+       reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
 }
 
 #ifndef CONFIG_NUMA
@@ -495,8 +498,15 @@ void __init mem_init(void)
 
        pci_iommu_alloc();
 
-       /* clear the zero-page */
-       memset(empty_zero_page, 0, PAGE_SIZE);
+       /* clear_bss() already clear the empty_zero_page */
+
+       /* temporary debugging - double check it's true: */
+       {
+               int i;
+
+               for (i = 0; i < 1024; i++)
+                       WARN_ON_ONCE(empty_zero_page[i]);
+       }
 
        reservedpages = 0;
 
@@ -546,8 +556,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
                init_page_count(virt_to_page(addr));
                memset((void *)(addr & ~(PAGE_SIZE-1)),
                        POISON_FREE_INITMEM, PAGE_SIZE);
-               if (addr >= __START_KERNEL_map)
-                       change_page_attr_addr(addr, 1, __pgprot(0));
                free_page(addr);
                totalram_pages++;
        }
@@ -584,18 +592,28 @@ void mark_rodata_ro(void)
        if (end <= start)
                return;
 
-       change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
+       set_memory_ro(start, (end - start) >> PAGE_SHIFT);
 
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
 
        /*
-        * change_page_attr_addr() requires a global_flush_tlb() call after it.
+        * set_memory_*() requires a global_flush_tlb() call after it.
         * We do this after the printk so that if something went wrong in the
         * change, the printk gets out at least to give a better debug hint
         * of who is the culprit.
         */
        global_flush_tlb();
+
+#ifdef CONFIG_CPA_DEBUG
+       printk("Testing CPA: undo %lx-%lx\n", start, end);
+       set_memory_rw(start, (end-start) >> PAGE_SHIFT);
+       global_flush_tlb();
+
+       printk("Testing CPA: again\n");
+       set_memory_ro(start, (end-start) >> PAGE_SHIFT);
+       global_flush_tlb();
+#endif
 }
 #endif