1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
6 #include <asm/nospec-branch.h>
7 #include "kvm-asm-offsets.h"
9 #define WORD_SIZE (BITS_PER_LONG / 8)
11 /* Intentionally omit RAX as it's context switched by hardware */
12 #define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
13 #define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
14 #define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15 /* Intentionally omit RSP as it's context switched by hardware */
16 #define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
17 #define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
18 #define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21 #define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
22 #define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
23 #define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
24 #define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
25 #define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
26 #define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
27 #define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28 #define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31 #define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
33 .section .noinstr.text, "ax"
36 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
37 * @svm: struct vcpu_svm *
39 SYM_FUNC_START(__svm_vcpu_run)
53 * Save variables needed after vmexit on the stack, in inverse
54 * order compared to when they are needed.
57 /* Needed to restore access to percpu variables. */
58 __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
63 .ifnc _ASM_ARG1, _ASM_DI
64 /* Move @svm to RDI. */
65 mov %_ASM_ARG1, %_ASM_DI
69 * Use a single vmcb (vmcb01 because it's always valid) for
70 * context switching guest state via VMLOAD/VMSAVE, that way
71 * the state doesn't need to be copied between vmcb01 and
72 * vmcb02 when switching vmcbs for nested virtualization.
74 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
78 /* Get svm->current_vmcb->pa into RAX. */
79 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
80 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
82 /* Load guest registers. */
83 mov VCPU_RCX(%_ASM_DI), %_ASM_CX
84 mov VCPU_RDX(%_ASM_DI), %_ASM_DX
85 mov VCPU_RBX(%_ASM_DI), %_ASM_BX
86 mov VCPU_RBP(%_ASM_DI), %_ASM_BP
87 mov VCPU_RSI(%_ASM_DI), %_ASM_SI
89 mov VCPU_R8 (%_ASM_DI), %r8
90 mov VCPU_R9 (%_ASM_DI), %r9
91 mov VCPU_R10(%_ASM_DI), %r10
92 mov VCPU_R11(%_ASM_DI), %r11
93 mov VCPU_R12(%_ASM_DI), %r12
94 mov VCPU_R13(%_ASM_DI), %r13
95 mov VCPU_R14(%_ASM_DI), %r14
96 mov VCPU_R15(%_ASM_DI), %r15
98 mov VCPU_RDI(%_ASM_DI), %_ASM_DI
100 /* Enter guest mode */
107 /* Pop @svm to RAX while it's the only available register. */
110 /* Save all guest registers. */
111 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
112 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
113 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
114 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
115 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
116 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
118 mov %r8, VCPU_R8 (%_ASM_AX)
119 mov %r9, VCPU_R9 (%_ASM_AX)
120 mov %r10, VCPU_R10(%_ASM_AX)
121 mov %r11, VCPU_R11(%_ASM_AX)
122 mov %r12, VCPU_R12(%_ASM_AX)
123 mov %r13, VCPU_R13(%_ASM_AX)
124 mov %r14, VCPU_R14(%_ASM_AX)
125 mov %r15, VCPU_R15(%_ASM_AX)
128 /* @svm can stay in RDI from now on. */
129 mov %_ASM_AX, %_ASM_DI
131 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
135 /* Restores GSBASE among other things, allowing access to percpu data. */
140 #ifdef CONFIG_RETPOLINE
141 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
142 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
146 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
147 * untrained as soon as we exit the VM and are back to the
148 * kernel. This should be done before re-enabling interrupts
149 * because interrupt handlers won't sanitize 'ret' if the return is
155 * Clear all general purpose registers except RSP and RAX to prevent
156 * speculative use of the guest's values, even those that are reloaded
157 * via the stack. In theory, an L1 cache miss when restoring registers
158 * could lead to speculative execution with the guest's values.
159 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
160 * free. RSP and RAX are exempt as they are restored by hardware
194 10: cmpb $0, kvm_rebooting
197 30: cmpb $0, kvm_rebooting
200 50: cmpb $0, kvm_rebooting
203 70: cmpb $0, kvm_rebooting
207 _ASM_EXTABLE(1b, 10b)
208 _ASM_EXTABLE(3b, 30b)
209 _ASM_EXTABLE(5b, 50b)
210 _ASM_EXTABLE(7b, 70b)
212 SYM_FUNC_END(__svm_vcpu_run)
215 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
216 * @svm: struct vcpu_svm *
218 SYM_FUNC_START(__svm_sev_es_vcpu_run)
231 /* Get svm->current_vmcb->pa into RAX. */
232 mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX
233 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
235 /* Enter guest mode */
242 #ifdef CONFIG_RETPOLINE
243 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
244 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
248 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
249 * untrained as soon as we exit the VM and are back to the
250 * kernel. This should be done before re-enabling interrupts
251 * because interrupt handlers won't sanitize RET if the return is
270 3: cmpb $0, kvm_rebooting
276 SYM_FUNC_END(__svm_sev_es_vcpu_run)