Merge branch 'for-6.9/amd-sfh' into for-linus
[sfrench/cifs-2.6.git] / arch / x86 / kvm / svm / svm_ops.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_SVM_OPS_H
3 #define __KVM_X86_SVM_OPS_H
4
5 #include <linux/compiler_types.h>
6
7 #include "x86.h"
8
9 #define svm_asm(insn, clobber...)                               \
10 do {                                                            \
11         asm goto("1: " __stringify(insn) "\n\t" \
12                           _ASM_EXTABLE(1b, %l[fault])           \
13                           ::: clobber : fault);                 \
14         return;                                                 \
15 fault:                                                          \
16         kvm_spurious_fault();                                   \
17 } while (0)
18
19 #define svm_asm1(insn, op1, clobber...)                         \
20 do {                                                            \
21         asm goto("1: "  __stringify(insn) " %0\n\t"     \
22                           _ASM_EXTABLE(1b, %l[fault])           \
23                           :: op1 : clobber : fault);            \
24         return;                                                 \
25 fault:                                                          \
26         kvm_spurious_fault();                                   \
27 } while (0)
28
29 #define svm_asm2(insn, op1, op2, clobber...)                            \
30 do {                                                                    \
31         asm goto("1: "  __stringify(insn) " %1, %0\n\t" \
32                           _ASM_EXTABLE(1b, %l[fault])                   \
33                           :: op1, op2 : clobber : fault);               \
34         return;                                                         \
35 fault:                                                                  \
36         kvm_spurious_fault();                                           \
37 } while (0)
38
39 static inline void clgi(void)
40 {
41         svm_asm(clgi);
42 }
43
44 static inline void stgi(void)
45 {
46         svm_asm(stgi);
47 }
48
49 static inline void invlpga(unsigned long addr, u32 asid)
50 {
51         svm_asm2(invlpga, "c"(asid), "a"(addr));
52 }
53
54 /*
55  * Despite being a physical address, the portion of rAX that is consumed by
56  * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
57  * hence 'unsigned long' instead of 'hpa_t'.
58  */
59 static __always_inline void vmsave(unsigned long pa)
60 {
61         svm_asm1(vmsave, "a" (pa), "memory");
62 }
63
64 #endif /* __KVM_X86_SVM_OPS_H */