x86/cpu_entry_area: Move it to a separate unit
authorThomas Gleixner <tglx@linutronix.de>
Wed, 20 Dec 2017 17:28:54 +0000 (18:28 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 22 Dec 2017 19:13:04 +0000 (20:13 +0100)
Separate the cpu_entry_area code out of cpu/common.c and the fixmap.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/cpu_entry_area.h [new file with mode: 0644]
arch/x86/include/asm/fixmap.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/traps.c
arch/x86/mm/Makefile
arch/x86/mm/cpu_entry_area.c [new file with mode: 0644]

diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
new file mode 100644 (file)
index 0000000..5471826
--- /dev/null
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _ASM_X86_CPU_ENTRY_AREA_H
+#define _ASM_X86_CPU_ENTRY_AREA_H
+
+#include <linux/percpu-defs.h>
+#include <asm/processor.h>
+
+/*
+ * cpu_entry_area is a percpu region that contains things needed by the CPU
+ * and early entry/exit code.  Real types aren't used for all fields here
+ * to avoid circular header dependencies.
+ *
+ * Every field is a virtual alias of some other allocated backing store.
+ * There is no direct allocation of a struct cpu_entry_area.
+ */
+struct cpu_entry_area {
+       char gdt[PAGE_SIZE];
+
+       /*
+        * The GDT is just below entry_stack and thus serves (on x86_64) as
+        * a a read-only guard page.
+        */
+       struct entry_stack_page entry_stack_page;
+
+       /*
+        * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
+        * we need task switches to work, and task switches write to the TSS.
+        */
+       struct tss_struct tss;
+
+       char entry_trampoline[PAGE_SIZE];
+
+#ifdef CONFIG_X86_64
+       /*
+        * Exception stacks used for IST entries.
+        *
+        * In the future, this should have a separate slot for each stack
+        * with guard pages between them.
+        */
+       char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+#endif
+};
+
+#define CPU_ENTRY_AREA_SIZE    (sizeof(struct cpu_entry_area))
+#define CPU_ENTRY_AREA_PAGES   (CPU_ENTRY_AREA_SIZE / PAGE_SIZE)
+
+DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+
+extern void setup_cpu_entry_areas(void);
+
+#endif
index 8153b8d86a3c394984ba55e7b7b3688838be2a3b..fb801662a23055fcde49331e1c6116762d8c4272 100644 (file)
@@ -25,6 +25,7 @@
 #else
 #include <uapi/asm/vsyscall.h>
 #endif
+#include <asm/cpu_entry_area.h>
 
 /*
  * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
@@ -44,46 +45,6 @@ extern unsigned long __FIXADDR_TOP;
                         PAGE_SIZE)
 #endif
 
-/*
- * cpu_entry_area is a percpu region in the fixmap that contains things
- * needed by the CPU and early entry/exit code.  Real types aren't used
- * for all fields here to avoid circular header dependencies.
- *
- * Every field is a virtual alias of some other allocated backing store.
- * There is no direct allocation of a struct cpu_entry_area.
- */
-struct cpu_entry_area {
-       char gdt[PAGE_SIZE];
-
-       /*
-        * The GDT is just below entry_stack and thus serves (on x86_64) as
-        * a a read-only guard page.
-        */
-       struct entry_stack_page entry_stack_page;
-
-       /*
-        * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
-        * we need task switches to work, and task switches write to the TSS.
-        */
-       struct tss_struct tss;
-
-       char entry_trampoline[PAGE_SIZE];
-
-#ifdef CONFIG_X86_64
-       /*
-        * Exception stacks used for IST entries.
-        *
-        * In the future, this should have a separate slot for each stack
-        * with guard pages between them.
-        */
-       char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
-#endif
-};
-
-#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
-
-extern void setup_cpu_entry_areas(void);
-
 /*
  * Here we define all the compile-time 'special' virtual
  * addresses. The point is to have a constant address at
index ed4acbce37a8b4cf53da9d5aa724570501209b0f..8ddcfa4d4165bb92717137da51f174a50365456e 100644 (file)
@@ -482,102 +482,8 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
          [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
          [DEBUG_STACK - 1]                     = DEBUG_STKSZ
 };
-
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
-       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
-#endif
-
-static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page,
-                                  entry_stack_storage);
-
-static void __init
-set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
-{
-       for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
-               __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
-}
-
-/* Setup the fixmap mappings only once per-processor */
-static void __init setup_cpu_entry_area(int cpu)
-{
-#ifdef CONFIG_X86_64
-       extern char _entry_trampoline[];
-
-       /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
-       pgprot_t gdt_prot = PAGE_KERNEL_RO;
-       pgprot_t tss_prot = PAGE_KERNEL_RO;
-#else
-       /*
-        * On native 32-bit systems, the GDT cannot be read-only because
-        * our double fault handler uses a task gate, and entering through
-        * a task gate needs to change an available TSS to busy.  If the
-        * GDT is read-only, that will triple fault.  The TSS cannot be
-        * read-only because the CPU writes to it on task switches.
-        *
-        * On Xen PV, the GDT must be read-only because the hypervisor
-        * requires it.
-        */
-       pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
-               PAGE_KERNEL_RO : PAGE_KERNEL;
-       pgprot_t tss_prot = PAGE_KERNEL;
-#endif
-
-       __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
-       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page),
-                               per_cpu_ptr(&entry_stack_storage, cpu), 1,
-                               PAGE_KERNEL);
-
-       /*
-        * The Intel SDM says (Volume 3, 7.2.1):
-        *
-        *  Avoid placing a page boundary in the part of the TSS that the
-        *  processor reads during a task switch (the first 104 bytes). The
-        *  processor may not correctly perform address translations if a
-        *  boundary occurs in this area. During a task switch, the processor
-        *  reads and writes into the first 104 bytes of each TSS (using
-        *  contiguous physical addresses beginning with the physical address
-        *  of the first byte of the TSS). So, after TSS access begins, if
-        *  part of the 104 bytes is not physically contiguous, the processor
-        *  will access incorrect information without generating a page-fault
-        *  exception.
-        *
-        * There are also a lot of errata involving the TSS spanning a page
-        * boundary.  Assert that we're not doing that.
-        */
-       BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
-                     offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
-       BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
-       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
-                               &per_cpu(cpu_tss_rw, cpu),
-                               sizeof(struct tss_struct) / PAGE_SIZE,
-                               tss_prot);
-
-#ifdef CONFIG_X86_32
-       per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
 #endif
 
-#ifdef CONFIG_X86_64
-       BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
-       BUILD_BUG_ON(sizeof(exception_stacks) !=
-                    sizeof(((struct cpu_entry_area *)0)->exception_stacks));
-       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
-                               &per_cpu(exception_stacks, cpu),
-                               sizeof(exception_stacks) / PAGE_SIZE,
-                               PAGE_KERNEL);
-
-       __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
-                    __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
-#endif
-}
-
-void __init setup_cpu_entry_areas(void)
-{
-       unsigned int cpu;
-
-       for_each_possible_cpu(cpu)
-               setup_cpu_entry_area(cpu);
-}
-
 /* Load the original GDT from the per-cpu structure */
 void load_direct_gdt(int cpu)
 {
index 74136fd16f491f0443ef3be4e65604182bef32d1..464daed6894fe34927623ad943bececc850c9173 100644 (file)
@@ -52,6 +52,7 @@
 #include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/fpu/internal.h>
+#include <asm/cpu_entry_area.h>
 #include <asm/mce.h>
 #include <asm/fixmap.h>
 #include <asm/mach_traps.h>
index 7ba7f3d7f477582516a9375f9a7d189e7249493d..2e0017af8f9b068e4a9f6d49103af859b02064fd 100644 (file)
@@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o   = -pg
 endif
 
 obj-y  :=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-           pat.o pgtable.o physaddr.o setup_nx.o tlb.o
+           pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
new file mode 100644 (file)
index 0000000..235ff9c
--- /dev/null
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+
+#include <asm/cpu_entry_area.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+#include <asm/desc.h>
+
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
+
+#ifdef CONFIG_X86_64
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+#endif
+
+static void __init
+set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
+{
+       for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
+               __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
+}
+
+/* Setup the fixmap mappings only once per-processor */
+static void __init setup_cpu_entry_area(int cpu)
+{
+#ifdef CONFIG_X86_64
+       extern char _entry_trampoline[];
+
+       /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
+       pgprot_t gdt_prot = PAGE_KERNEL_RO;
+       pgprot_t tss_prot = PAGE_KERNEL_RO;
+#else
+       /*
+        * On native 32-bit systems, the GDT cannot be read-only because
+        * our double fault handler uses a task gate, and entering through
+        * a task gate needs to change an available TSS to busy.  If the
+        * GDT is read-only, that will triple fault.  The TSS cannot be
+        * read-only because the CPU writes to it on task switches.
+        *
+        * On Xen PV, the GDT must be read-only because the hypervisor
+        * requires it.
+        */
+       pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
+               PAGE_KERNEL_RO : PAGE_KERNEL;
+       pgprot_t tss_prot = PAGE_KERNEL;
+#endif
+
+       __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page),
+                               per_cpu_ptr(&entry_stack_storage, cpu), 1,
+                               PAGE_KERNEL);
+
+       /*
+        * The Intel SDM says (Volume 3, 7.2.1):
+        *
+        *  Avoid placing a page boundary in the part of the TSS that the
+        *  processor reads during a task switch (the first 104 bytes). The
+        *  processor may not correctly perform address translations if a
+        *  boundary occurs in this area. During a task switch, the processor
+        *  reads and writes into the first 104 bytes of each TSS (using
+        *  contiguous physical addresses beginning with the physical address
+        *  of the first byte of the TSS). So, after TSS access begins, if
+        *  part of the 104 bytes is not physically contiguous, the processor
+        *  will access incorrect information without generating a page-fault
+        *  exception.
+        *
+        * There are also a lot of errata involving the TSS spanning a page
+        * boundary.  Assert that we're not doing that.
+        */
+       BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
+                     offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
+       BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
+                               &per_cpu(cpu_tss_rw, cpu),
+                               sizeof(struct tss_struct) / PAGE_SIZE,
+                               tss_prot);
+
+#ifdef CONFIG_X86_32
+       per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
+#endif
+
+#ifdef CONFIG_X86_64
+       BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+       BUILD_BUG_ON(sizeof(exception_stacks) !=
+                    sizeof(((struct cpu_entry_area *)0)->exception_stacks));
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
+                               &per_cpu(exception_stacks, cpu),
+                               sizeof(exception_stacks) / PAGE_SIZE,
+                               PAGE_KERNEL);
+
+       __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
+                    __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
+#endif
+}
+
+void __init setup_cpu_entry_areas(void)
+{
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu)
+               setup_cpu_entry_area(cpu);
+}