x86: provide 64-bit with a load_sp0 function.
authorGlauber de Oliveira Costa <gcosta@redhat.com>
Wed, 30 Jan 2008 12:31:31 +0000 (13:31 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:31:31 +0000 (13:31 +0100)
Paravirt guests need to inform the underlying hypervisor whenever the sp0
tss field changes. i386 already has such a function, and we use it for
x86_64 too. There's an unnecessary (for 64-bit) msr handling part in the original
version, and it is placed around an ifdef. Making no more sense in
processor_32.h, it is moved to the common header

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/kernel/process_64.c
arch/x86/kernel/smpboot_64.c
include/asm-x86/processor.h
include/asm-x86/processor_32.h

index af56104b73ff57913511fb0e88d1ba8e43cc0b07..e3a3610ade104a522e8378758032ed884ec9b7c2 100644 (file)
@@ -639,7 +639,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        /*
         * Reload esp0, LDT and the page table pointer:
         */
-       tss->x86_tss.sp0 = next->sp0;
+       load_sp0(tss, next);
 
        /* 
         * Switch DS and ES.
index 2ea02a71b6448c97202ad8ea19a16cb8fe78d7e7..5bd42ce144da541e071809edba87280a282124e9 100644 (file)
@@ -614,7 +614,7 @@ do_rest:
        start_rip = setup_trampoline();
 
        init_rsp = c_idle.idle->thread.sp;
-       per_cpu(init_tss, cpu).x86_tss.sp0 = init_rsp;
+       load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
        initial_code = start_secondary;
        clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
 
index cede9ad3dc6efa7ebba2c2f6839a75d0a8027d01..b1ea521563626b289e9037d3e024109e4fa2d4d7 100644 (file)
@@ -193,8 +193,22 @@ static inline void native_set_iopl_mask(unsigned mask)
 #endif
 }
 
+static inline void native_load_sp0(struct tss_struct *tss,
+                                  struct thread_struct *thread)
+{
+       tss->x86_tss.sp0 = thread->sp0;
+#ifdef CONFIG_X86_32
+       /* Only happens when SEP is enabled, no need to test "SEP"arately */
+       if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
+               tss->x86_tss.ss1 = thread->sysenter_cs;
+               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+       }
+#endif
+}
 
-#ifndef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #define __cpuid native_cpuid
 #define paravirt_enabled() 0
 
@@ -206,6 +220,12 @@ static inline void native_set_iopl_mask(unsigned mask)
 #define set_debugreg(value, register)                          \
        native_set_debugreg(register, value)
 
+static inline void load_sp0(struct tss_struct *tss,
+                           struct thread_struct *thread)
+{
+       native_load_sp0(tss, thread);
+}
+
 #define set_iopl_mask native_set_iopl_mask
 #endif /* CONFIG_PARAVIRT */
 
index 57b345bc3c7434a9a534dfc0a523209a3fe58d66..53037d1a6ae6d70a64f7f2113cfd026a5740cc8a 100644 (file)
@@ -278,26 +278,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
 
-static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
-{
-       tss->x86_tss.sp0 = thread->sp0;
-       /* This can only happen when SEP is enabled, no need to test "SEP"arately */
-       if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
-               tss->x86_tss.ss1 = thread->sysenter_cs;
-               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-       }
-}
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-
-static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread)
-{
-       native_load_sp0(tss, thread);
-}
-#endif /* CONFIG_PARAVIRT */
-
 /* generic versions from gas */
 #define GENERIC_NOP1   ".byte 0x90\n"
 #define GENERIC_NOP2           ".byte 0x89,0xf6\n"