KVM: SVM: restore host save area from assembly
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 7 Nov 2022 08:49:59 +0000 (03:49 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 9 Nov 2022 17:25:33 +0000 (12:25 -0500)
Allow access to the percpu area via the GS segment base, which is
needed in order to access the saved host spec_ctrl value.  In linux-next
FILL_RETURN_BUFFER also needs to access percpu data.

For simplicity, the physical address of the save area is added to struct
svm_cpu_data.

Cc: stable@vger.kernel.org
Fixes: a149180fbcf3 ("x86: Add magic AMD return-thunk")
Reported-by: Nathan Chancellor <nathan@kernel.org>
Analyzed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/kvm-asm-offsets.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm_ops.h
arch/x86/kvm/svm/vmenter.S

index f83e88b85bf214e707a052511452f6eedab6845d..1b805cd24d669ef59543745aac10bf40d1686a7b 100644 (file)
@@ -18,6 +18,7 @@ static void __used common(void)
                OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
                OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
                OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
+               OFFSET(SD_save_area_pa, svm_cpu_data, save_area_pa);
        }
 
        if (IS_ENABLED(CONFIG_KVM_INTEL)) {
index 4e3a47eb500258d243645fb1d2e22d9597a90cb1..469c1b5617af70fac4bb69df4e1db8061942e774 100644 (file)
@@ -592,7 +592,7 @@ static int svm_hardware_enable(void)
 
        wrmsrl(MSR_EFER, efer | EFER_SVME);
 
-       wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area));
+       wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
 
        if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
                /*
@@ -648,6 +648,7 @@ static void svm_cpu_uninit(int cpu)
 
        kfree(sd->sev_vmcbs);
        __free_page(sd->save_area);
+       sd->save_area_pa = 0;
        sd->save_area = NULL;
 }
 
@@ -665,6 +666,7 @@ static int svm_cpu_init(int cpu)
        if (ret)
                goto free_save_area;
 
+       sd->save_area_pa = __sme_page_pa(sd->save_area);
        return 0;
 
 free_save_area:
@@ -1450,7 +1452,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
         * Save additional host state that will be restored on VMEXIT (sev-es)
         * or subsequent vmload of host save area.
         */
-       vmsave(__sme_page_pa(sd->save_area));
+       vmsave(sd->save_area_pa);
        if (sev_es_guest(vcpu->kvm)) {
                struct sev_es_save_area *hostsa;
                hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
@@ -3905,14 +3907,10 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 
        guest_state_enter_irqoff();
 
-       if (sev_es_guest(vcpu->kvm)) {
+       if (sev_es_guest(vcpu->kvm))
                __svm_sev_es_vcpu_run(svm);
-       } else {
-               struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
-
+       else
                __svm_vcpu_run(svm);
-               vmload(__sme_page_pa(sd->save_area));
-       }
 
        guest_state_exit_irqoff();
 }
index 2af6a71126c1ef6b9a2e2e894be6e998bd1c1b8c..83955a4e520eb885c96635abddea564172aa2101 100644 (file)
@@ -287,6 +287,8 @@ struct svm_cpu_data {
        struct kvm_ldttss_desc *tss_desc;
 
        struct page *save_area;
+       unsigned long save_area_pa;
+
        struct vmcb *current_vmcb;
 
        /* index = sev_asid, value = vmcb pointer */
index 9430d6437c9f650ef87a51ce45d37825eeecdd84..36c8af87a707ac0556fb1e50157e70c6305df798 100644 (file)
@@ -61,9 +61,4 @@ static __always_inline void vmsave(unsigned long pa)
        svm_asm1(vmsave, "a" (pa), "memory");
 }
 
-static __always_inline void vmload(unsigned long pa)
-{
-       svm_asm1(vmload, "a" (pa), "memory");
-}
-
 #endif /* __KVM_X86_SVM_OPS_H */
index 5bc2ed7d79c077f0628c260fd1376fb9000e80e8..57440acfc73efa49ea65f8c9838e779d57c4d113 100644 (file)
@@ -49,6 +49,14 @@ SYM_FUNC_START(__svm_vcpu_run)
 #endif
        push %_ASM_BX
 
+       /*
+        * Save variables needed after vmexit on the stack, in inverse
+        * order compared to when they are needed.
+        */
+
+       /* Needed to restore access to percpu variables.  */
+       __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
+
        /* Save @svm. */
        push %_ASM_ARG1
 
@@ -124,6 +132,11 @@ SYM_FUNC_START(__svm_vcpu_run)
 5:     vmsave %_ASM_AX
 6:
 
+       /* Restores GSBASE among other things, allowing access to percpu data.  */
+       pop %_ASM_AX
+7:     vmload %_ASM_AX
+8:
+
 #ifdef CONFIG_RETPOLINE
        /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
        FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
@@ -187,10 +200,14 @@ SYM_FUNC_START(__svm_vcpu_run)
 50:    cmpb $0, kvm_rebooting
        jne 6b
        ud2
+70:    cmpb $0, kvm_rebooting
+       jne 8b
+       ud2
 
        _ASM_EXTABLE(1b, 10b)
        _ASM_EXTABLE(3b, 30b)
        _ASM_EXTABLE(5b, 50b)
+       _ASM_EXTABLE(7b, 70b)
 
 SYM_FUNC_END(__svm_vcpu_run)