300cc159b4a0a47acdf1cab08822dfcde706d41d
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / nospec-branch.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5
6 #include <asm/alternative.h>
7 #include <asm/alternative-asm.h>
8 #include <asm/cpufeatures.h>
9
10 #ifdef __ASSEMBLY__
11
12 /*
13  * This should be used immediately before a retpoline alternative.  It tells
14  * objtool where the retpolines are so that it can make sense of the control
15  * flow by just reading the original instruction(s) and ignoring the
16  * alternatives.
17  */
18 .macro ANNOTATE_NOSPEC_ALTERNATIVE
19         .Lannotate_\@:
20         .pushsection .discard.nospec
21         .long .Lannotate_\@ - .
22         .popsection
23 .endm
24
25 /*
26  * These are the bare retpoline primitives for indirect jmp and call.
27  * Do not use these directly; they only exist to make the ALTERNATIVE
28  * invocation below less ugly.
29  */
30 .macro RETPOLINE_JMP reg:req
31         call    .Ldo_rop_\@
32 .Lspec_trap_\@:
33         pause
34         lfence
35         jmp     .Lspec_trap_\@
36 .Ldo_rop_\@:
37         mov     \reg, (%_ASM_SP)
38         ret
39 .endm
40
41 /*
42  * This is a wrapper around RETPOLINE_JMP so the called function in reg
43  * returns to the instruction after the macro.
44  */
45 .macro RETPOLINE_CALL reg:req
46         jmp     .Ldo_call_\@
47 .Ldo_retpoline_jmp_\@:
48         RETPOLINE_JMP \reg
49 .Ldo_call_\@:
50         call    .Ldo_retpoline_jmp_\@
51 .endm
52
53 /*
54  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
55  * indirect jmp/call which may be susceptible to the Spectre variant 2
56  * attack.
57  */
58 .macro JMP_NOSPEC reg:req
59 #ifdef CONFIG_RETPOLINE
60         ANNOTATE_NOSPEC_ALTERNATIVE
61         ALTERNATIVE_2 __stringify(jmp *\reg),                           \
62                 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
63                 __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
64 #else
65         jmp     *\reg
66 #endif
67 .endm
68
69 .macro CALL_NOSPEC reg:req
70 #ifdef CONFIG_RETPOLINE
71         ANNOTATE_NOSPEC_ALTERNATIVE
72         ALTERNATIVE_2 __stringify(call *\reg),                          \
73                 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
74                 __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
75 #else
76         call    *\reg
77 #endif
78 .endm
79
80 /* This clobbers the BX register */
81 .macro FILL_RETURN_BUFFER nr:req ftr:req
82 #ifdef CONFIG_RETPOLINE
83         ALTERNATIVE "", "call __clear_rsb", \ftr
84 #endif
85 .endm
86
87 #else /* __ASSEMBLY__ */
88
89 #define ANNOTATE_NOSPEC_ALTERNATIVE                             \
90         "999:\n\t"                                              \
91         ".pushsection .discard.nospec\n\t"                      \
92         ".long 999b - .\n\t"                                    \
93         ".popsection\n\t"
94
95 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
96
97 /*
98  * Since the inline asm uses the %V modifier which is only in newer GCC,
99  * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
100  */
101 # define CALL_NOSPEC                                            \
102         ANNOTATE_NOSPEC_ALTERNATIVE                             \
103         ALTERNATIVE(                                            \
104         "call *%[thunk_target]\n",                              \
105         "call __x86_indirect_thunk_%V[thunk_target]\n",         \
106         X86_FEATURE_RETPOLINE)
107 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
108
109 #elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
110 /*
111  * For i386 we use the original ret-equivalent retpoline, because
112  * otherwise we'll run out of registers. We don't care about CET
113  * here, anyway.
114  */
115 # define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",     \
116         "       jmp    904f;\n"                                 \
117         "       .align 16\n"                                    \
118         "901:   call   903f;\n"                                 \
119         "902:   pause;\n"                                       \
120         "       lfence;\n"                                      \
121         "       jmp    902b;\n"                                 \
122         "       .align 16\n"                                    \
123         "903:   addl   $4, %%esp;\n"                            \
124         "       pushl  %[thunk_target];\n"                      \
125         "       ret;\n"                                         \
126         "       .align 16\n"                                    \
127         "904:   call   901b;\n",                                \
128         X86_FEATURE_RETPOLINE)
129
130 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
131 #else /* No retpoline for C / inline asm */
132 # define CALL_NOSPEC "call *%[thunk_target]\n"
133 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
134 #endif
135
136 /* The Spectre V2 mitigation variants */
137 enum spectre_v2_mitigation {
138         SPECTRE_V2_NONE,
139         SPECTRE_V2_RETPOLINE_MINIMAL,
140         SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
141         SPECTRE_V2_RETPOLINE_GENERIC,
142         SPECTRE_V2_RETPOLINE_AMD,
143         SPECTRE_V2_IBRS,
144 };
145
146 extern char __indirect_thunk_start[];
147 extern char __indirect_thunk_end[];
148
149 /*
150  * On VMEXIT we must ensure that no RSB predictions learned in the guest
151  * can be followed in the host, by overwriting the RSB completely. Both
152  * retpoline and IBRS mitigations for Spectre v2 need this; only on future
153  * CPUs with IBRS_ALL *might* it be avoided.
154  */
155 static inline void vmexit_fill_RSB(void)
156 {
157 #ifdef CONFIG_RETPOLINE
158         alternative_input("",
159                           "call __fill_rsb",
160                           X86_FEATURE_RETPOLINE,
161                           ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
162 #endif
163 }
164
165 static inline void indirect_branch_prediction_barrier(void)
166 {
167         asm volatile(ALTERNATIVE("",
168                                  "movl %[msr], %%ecx\n\t"
169                                  "movl %[val], %%eax\n\t"
170                                  "movl $0, %%edx\n\t"
171                                  "wrmsr",
172                                  X86_FEATURE_USE_IBPB)
173                      : : [msr] "i" (MSR_IA32_PRED_CMD),
174                          [val] "i" (PRED_CMD_IBPB)
175                      : "eax", "ecx", "edx", "memory");
176 }
177
178 #endif /* __ASSEMBLY__ */
179 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */