x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c
authorAndy Lutomirski <luto@amacapital.net>
Mon, 5 May 2014 19:19:33 +0000 (12:19 -0700)
committerH. Peter Anvin <hpa@linux.intel.com>
Mon, 5 May 2014 20:18:47 +0000 (13:18 -0700)
This code is used during CPU setup, and it isn't strictly speaking
related to the 32-bit vdso.  It's easier to understand how this
works when the code is closer to its callers.

This also lets syscall32_cpu_init be static, which might save some
trivial amount of kernel text.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/4e466987204e232d7b55a53ff6b9739f12237461.1399317206.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/include/asm/proto.h
arch/x86/kernel/cpu/common.c
arch/x86/vdso/vdso32-setup.c

index 6fd3fd76979687dffc83defc1e5f4c69f468d967..a90f8972dad507240ae946b61fd8a5f217d4be52 100644 (file)
@@ -12,8 +12,6 @@ void ia32_syscall(void);
 void ia32_cstar_target(void);
 void ia32_sysenter_target(void);
 
-void syscall32_cpu_init(void);
-
 void x86_configure_nx(void);
 void x86_report_nx(void);
 
index a135239badb7fd4762ebf939ae755183660641b2..7c65b4666c243300309130b6be9ef5da1f9f0b7b 100644 (file)
@@ -953,6 +953,38 @@ static void vgetcpu_set_mode(void)
        else
                vgetcpu_mode = VGETCPU_LSL;
 }
+
+/* May not be __init: called during resume */
+static void syscall32_cpu_init(void)
+{
+       /* Load these always in case some future AMD CPU supports
+          SYSENTER from compat mode too. */
+       wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+
+       wrmsrl(MSR_CSTAR, ia32_cstar_target);
+}
+#endif
+
+#ifdef CONFIG_X86_32
+void enable_sep_cpu(void)
+{
+       int cpu = get_cpu();
+       struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+       if (!boot_cpu_has(X86_FEATURE_SEP)) {
+               put_cpu();
+               return;
+       }
+
+       tss->x86_tss.ss1 = __KERNEL_CS;
+       tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
+       wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+       wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
+       wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
+       put_cpu();
+}
 #endif
 
 void __init identify_boot_cpu(void)
index 5a657d93c6e0c700115fa0c954867e1f6c11391e..9c78d5b24874d8f873962ebb10d61f7e4c45c263 100644 (file)
@@ -75,41 +75,11 @@ static unsigned vdso32_size;
 #define        vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
 #define        vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
 
-/* May not be __init: called during resume */
-void syscall32_cpu_init(void)
-{
-       /* Load these always in case some future AMD CPU supports
-          SYSENTER from compat mode too. */
-       wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
-       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
-
-       wrmsrl(MSR_CSTAR, ia32_cstar_target);
-}
-
 #else  /* CONFIG_X86_32 */
 
 #define vdso32_sysenter()      (boot_cpu_has(X86_FEATURE_SEP))
 #define vdso32_syscall()       (0)
 
-void enable_sep_cpu(void)
-{
-       int cpu = get_cpu();
-       struct tss_struct *tss = &per_cpu(init_tss, cpu);
-
-       if (!boot_cpu_has(X86_FEATURE_SEP)) {
-               put_cpu();
-               return;
-       }
-
-       tss->x86_tss.ss1 = __KERNEL_CS;
-       tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
-       wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
-       wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
-       wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
-       put_cpu();      
-}
-
 #endif /* CONFIG_X86_64 */
 
 int __init sysenter_setup(void)