x86: add kernel_map_pages() to 64-bit
authorIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:34:03 +0000 (13:34 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:34:03 +0000 (13:34 +0100)
needed for DEBUG_PAGEALLOC support and for unification.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/mm/pageattr_64.c

index 4053832d4108419ac50effd4c7d8b780eb54eaf7..e1c860800ff15963566962f5e7f0448e0755b9dd 100644 (file)
@@ -260,3 +260,33 @@ void global_flush_tlb(void)
        on_each_cpu(flush_kernel_map, NULL, 1, 1);
 }
 EXPORT_SYMBOL(global_flush_tlb);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       if (PageHighMem(page))
+               return;
+       if (!enable) {
+               debug_check_no_locks_freed(page_address(page),
+                                          numpages * PAGE_SIZE);
+       }
+
+       /*
+        * If page allocator is not up yet then do not call c_p_a():
+        */
+       if (!debug_pagealloc_enabled)
+               return;
+
+       /*
+        * the return value is ignored - the calls cannot fail,
+        * large pages are disabled at boot time.
+        */
+       change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+
+       /*
+        * we should perform an IPI and flush all tlbs,
+        * but that can deadlock->flush only current cpu.
+        */
+       __flush_tlb_all();
+}
+#endif