1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14 #include <asm/percpu.h>
15 #include <asm/current.h>
18 * Call depth tracking for Intel SKL CPUs to address the RSB underflow
21 * The tracking does not use a counter. It uses uses arithmetic shift
22 * right on call entry and logical shift left on return.
24 * The depth tracking variable is initialized to 0x8000.... when the call
25 * depth is zero. The arithmetic shift right sign extends the MSB and
26 * saturates after the 12th call. The shift count is 5 for both directions
27 * so the tracking covers 12 nested calls.
30 * 0: 0x8000000000000000 0x0000000000000000
31 * 1: 0xfc00000000000000 0xf000000000000000
33 * 11: 0xfffffffffffffff8 0xfffffffffffffc00
34 * 12: 0xffffffffffffffff 0xffffffffffffffe0
36 * After a return buffer fill the depth is credited 12 calls before the
37 * next stuffing has to take place.
39 * There is a inaccuracy for situations like this:
48 * The shift count might cause this to be off by one in either direction,
49 * but there is still a cushion vs. the RSB depth. The algorithm does not
50 * claim to be perfect and it can be speculated around by the CPU, but it
51 * is considered that it obfuscates the problem enough to make exploitation
52 * extremely difficult.
54 #define RET_DEPTH_SHIFT 5
55 #define RSB_RET_STUFF_LOOPS 16
56 #define RET_DEPTH_INIT 0x8000000000000000ULL
57 #define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL
58 #define RET_DEPTH_CREDIT 0xffffffffffffffffULL
60 #ifdef CONFIG_CALL_THUNKS_DEBUG
61 # define CALL_THUNKS_DEBUG_INC_CALLS \
62 incq PER_CPU_VAR(__x86_call_count);
63 # define CALL_THUNKS_DEBUG_INC_RETS \
64 incq PER_CPU_VAR(__x86_ret_count);
65 # define CALL_THUNKS_DEBUG_INC_STUFFS \
66 incq PER_CPU_VAR(__x86_stuffs_count);
67 # define CALL_THUNKS_DEBUG_INC_CTXSW \
68 incq PER_CPU_VAR(__x86_ctxsw_count);
70 # define CALL_THUNKS_DEBUG_INC_CALLS
71 # define CALL_THUNKS_DEBUG_INC_RETS
72 # define CALL_THUNKS_DEBUG_INC_STUFFS
73 # define CALL_THUNKS_DEBUG_INC_CTXSW
76 #if defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS)
78 #include <asm/asm-offsets.h>
80 #define CREDIT_CALL_DEPTH \
81 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
83 #define RESET_CALL_DEPTH \
86 movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);
88 #define RESET_CALL_DEPTH_FROM_CALL \
91 movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \
92 CALL_THUNKS_DEBUG_INC_CALLS
94 #define INCREMENT_CALL_DEPTH \
95 sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \
96 CALL_THUNKS_DEBUG_INC_CALLS
99 #define CREDIT_CALL_DEPTH
100 #define RESET_CALL_DEPTH
101 #define RESET_CALL_DEPTH_FROM_CALL
102 #define INCREMENT_CALL_DEPTH
106 * Fill the CPU return stack buffer.
108 * Each entry in the RSB, if used for a speculative 'ret', contains an
109 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
111 * This is required in various cases for retpoline and IBRS-based
112 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
113 * eliminate potentially bogus entries from the RSB, and sometimes
114 * purely to ensure that it doesn't get empty, which on some CPUs would
115 * allow predictions from other (unwanted!) sources to be used.
117 * We define a CPP macro such that it can be used from both .S files and
118 * inline assembly. It's possible to do a .macro and then include that
119 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
122 #define RETPOLINE_THUNK_SIZE 32
123 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
126 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
128 #define __FILL_RETURN_SLOT \
129 ANNOTATE_INTRA_FUNCTION_CALL; \
135 * Stuff the entire RSB.
137 * Google experimented with loop-unrolling and this turned out to be
138 * the optimal version - two calls, each with their own speculation
139 * trap should their return address end up getting used, in a loop.
142 #define __FILL_RETURN_BUFFER(reg, nr) \
147 add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
150 /* barrier for jnz misprediction */ \
153 CALL_THUNKS_DEBUG_INC_CTXSW
156 * i386 doesn't unconditionally have LFENCE, as such it can't
159 #define __FILL_RETURN_BUFFER(reg, nr) \
161 __FILL_RETURN_SLOT; \
163 add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
167 * Stuff a single RSB slot.
169 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
170 * forced to retire before letting a RET instruction execute.
172 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
175 #define __FILL_ONE_RETURN \
177 add $(BITS_PER_LONG/8), %_ASM_SP; \
183 * This should be used immediately before an indirect jump/call. It tells
184 * objtool the subsequent indirect jump/call is vouched safe for retpoline
187 .macro ANNOTATE_RETPOLINE_SAFE
189 .pushsection .discard.retpoline_safe
195 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
196 * vs RETBleed validation.
198 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
201 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
202 * eventually turn into its own annotation.
204 .macro VALIDATE_UNRET_END
205 #if defined(CONFIG_NOINSTR_VALIDATION) && \
206 (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
207 ANNOTATE_RETPOLINE_SAFE
213 * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
214 * to the retpoline thunk with a CS prefix when the register requires
215 * a RAX prefix byte to encode. Also see apply_retpolines().
217 .macro __CS_PREFIX reg:req
218 .irp rs,r8,r9,r10,r11,r12,r13,r14,r15
226 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
227 * indirect jmp/call which may be susceptible to the Spectre variant 2
230 * NOTE: these do not take kCFI into account and are thus not comparable to C
231 * indirect calls, take care when using. The target of these should be an ENDBR
232 * instruction irrespective of kCFI.
234 .macro JMP_NOSPEC reg:req
235 #ifdef CONFIG_MITIGATION_RETPOLINE
237 jmp __x86_indirect_thunk_\reg
244 .macro CALL_NOSPEC reg:req
245 #ifdef CONFIG_MITIGATION_RETPOLINE
247 call __x86_indirect_thunk_\reg
254 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
255 * monstrosity above, manually.
257 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
258 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
259 __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
260 __stringify(nop;nop;__FILL_ONE_RETURN), \ftr2
266 * The CALL to srso_alias_untrain_ret() must be patched in directly at
267 * the spot where untraining must be done, ie., srso_alias_untrain_ret()
268 * must be the target of a CALL instruction instead of indirectly
269 * jumping to a wrapper which then calls it. Therefore, this macro is
270 * called outside of __UNTRAIN_RET below, for the time being, before the
271 * kernel can support nested alternatives with arbitrary nesting.
273 .macro CALL_UNTRAIN_RET
274 #if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
275 ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
276 "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
281 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
282 * return thunk isn't mapped into the userspace tables (then again, AMD
283 * typically has NO_MELTDOWN).
285 * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
286 * entry_ibpb() will clobber AX, CX, DX.
288 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
289 * where we have a stack but before any RET instruction.
291 .macro __UNTRAIN_RET ibpb_feature, call_depth_insns
292 #if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)
296 "call entry_ibpb", \ibpb_feature, \
297 __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
301 #define UNTRAIN_RET \
302 __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)
304 #define UNTRAIN_RET_VM \
305 __UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)
307 #define UNTRAIN_RET_FROM_CALL \
308 __UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)
311 .macro CALL_DEPTH_ACCOUNT
312 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
314 __stringify(INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
319 * Macro to execute VERW instruction that mitigate transient data sampling
320 * attacks such as MDS. On affected systems a microcode update overloaded VERW
321 * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
323 * Note: Only the memory operand variant of VERW clears the CPU buffers.
325 .macro CLEAR_CPU_BUFFERS
326 ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
330 .macro CLEAR_BRANCH_HISTORY
331 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
334 .macro CLEAR_BRANCH_HISTORY_VMEXIT
335 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
338 #define CLEAR_BRANCH_HISTORY
339 #define CLEAR_BRANCH_HISTORY_VMEXIT
342 #else /* __ASSEMBLY__ */
344 #define ANNOTATE_RETPOLINE_SAFE \
346 ".pushsection .discard.retpoline_safe\n\t" \
350 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
351 extern retpoline_thunk_t __x86_indirect_thunk_array[];
352 extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
353 extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
355 #ifdef CONFIG_MITIGATION_RETHUNK
356 extern void __x86_return_thunk(void);
358 static inline void __x86_return_thunk(void) {}
361 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
362 extern void retbleed_return_thunk(void);
364 static inline void retbleed_return_thunk(void) {}
367 extern void srso_alias_untrain_ret(void);
369 #ifdef CONFIG_MITIGATION_SRSO
370 extern void srso_return_thunk(void);
371 extern void srso_alias_return_thunk(void);
373 static inline void srso_return_thunk(void) {}
374 static inline void srso_alias_return_thunk(void) {}
377 extern void retbleed_return_thunk(void);
378 extern void srso_return_thunk(void);
379 extern void srso_alias_return_thunk(void);
381 extern void entry_untrain_ret(void);
382 extern void entry_ibpb(void);
385 extern void clear_bhb_loop(void);
388 extern void (*x86_return_thunk)(void);
390 extern void __warn_thunk(void);
392 #ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
393 extern void call_depth_return_thunk(void);
395 #define CALL_DEPTH_ACCOUNT \
397 __stringify(INCREMENT_CALL_DEPTH), \
398 X86_FEATURE_CALL_DEPTH)
400 #ifdef CONFIG_CALL_THUNKS_DEBUG
401 DECLARE_PER_CPU(u64, __x86_call_count);
402 DECLARE_PER_CPU(u64, __x86_ret_count);
403 DECLARE_PER_CPU(u64, __x86_stuffs_count);
404 DECLARE_PER_CPU(u64, __x86_ctxsw_count);
406 #else /* !CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
408 static inline void call_depth_return_thunk(void) {}
409 #define CALL_DEPTH_ACCOUNT ""
411 #endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
413 #ifdef CONFIG_MITIGATION_RETPOLINE
416 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
417 #include <asm/GEN-for-each-reg.h>
421 extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg;
422 #include <asm/GEN-for-each-reg.h>
426 extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg;
427 #include <asm/GEN-for-each-reg.h>
433 * Inline asm uses the %V modifier which is only in newer GCC
434 * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
436 # define CALL_NOSPEC \
438 ANNOTATE_RETPOLINE_SAFE \
439 "call *%[thunk_target]\n", \
440 "call __x86_indirect_thunk_%V[thunk_target]\n", \
441 X86_FEATURE_RETPOLINE, \
443 ANNOTATE_RETPOLINE_SAFE \
444 "call *%[thunk_target]\n", \
445 X86_FEATURE_RETPOLINE_LFENCE)
447 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
449 #else /* CONFIG_X86_32 */
451 * For i386 we use the original ret-equivalent retpoline, because
452 * otherwise we'll run out of registers. We don't care about CET
455 # define CALL_NOSPEC \
457 ANNOTATE_RETPOLINE_SAFE \
458 "call *%[thunk_target]\n", \
461 "901: call 903f;\n" \
466 "903: lea 4(%%esp), %%esp;\n" \
467 " pushl %[thunk_target];\n" \
470 "904: call 901b;\n", \
471 X86_FEATURE_RETPOLINE, \
473 ANNOTATE_RETPOLINE_SAFE \
474 "call *%[thunk_target]\n", \
475 X86_FEATURE_RETPOLINE_LFENCE)
477 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
479 #else /* No retpoline for C / inline asm */
480 # define CALL_NOSPEC "call *%[thunk_target]\n"
481 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
484 /* The Spectre V2 mitigation variants */
485 enum spectre_v2_mitigation {
487 SPECTRE_V2_RETPOLINE,
490 SPECTRE_V2_EIBRS_RETPOLINE,
491 SPECTRE_V2_EIBRS_LFENCE,
495 /* The indirect branch speculation control variants */
496 enum spectre_v2_user_mitigation {
497 SPECTRE_V2_USER_NONE,
498 SPECTRE_V2_USER_STRICT,
499 SPECTRE_V2_USER_STRICT_PREFERRED,
500 SPECTRE_V2_USER_PRCTL,
501 SPECTRE_V2_USER_SECCOMP,
504 /* The Speculative Store Bypass disable variants */
505 enum ssb_mitigation {
506 SPEC_STORE_BYPASS_NONE,
507 SPEC_STORE_BYPASS_DISABLE,
508 SPEC_STORE_BYPASS_PRCTL,
509 SPEC_STORE_BYPASS_SECCOMP,
512 static __always_inline
513 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
515 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
518 "d" ((u32)(val >> 32)),
519 [feature] "i" (feature)
523 extern u64 x86_pred_cmd;
525 static inline void indirect_branch_prediction_barrier(void)
527 alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
530 /* The Intel SPEC CTRL MSR base value cache */
531 extern u64 x86_spec_ctrl_base;
532 DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
533 extern void update_spec_ctrl_cond(u64 val);
534 extern u64 spec_ctrl_current(void);
537 * With retpoline, we must use IBRS to restrict branch prediction
538 * before calling into firmware.
540 * (Implemented as CPP macros due to header hell.)
542 #define firmware_restrict_branch_speculation_start() \
545 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
546 spec_ctrl_current() | SPEC_CTRL_IBRS, \
547 X86_FEATURE_USE_IBRS_FW); \
548 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
549 X86_FEATURE_USE_IBPB_FW); \
552 #define firmware_restrict_branch_speculation_end() \
554 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
555 spec_ctrl_current(), \
556 X86_FEATURE_USE_IBRS_FW); \
560 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
561 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
562 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
564 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
566 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
568 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
570 extern u16 mds_verw_sel;
572 #include <asm/segment.h>
575 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
577 * This uses the otherwise unused and obsolete VERW instruction in
578 * combination with microcode which triggers a CPU buffer flush when the
579 * instruction is executed.
581 static __always_inline void mds_clear_cpu_buffers(void)
583 static const u16 ds = __KERNEL_DS;
586 * Has to be the memory-operand variant because only that
587 * guarantees the CPU buffer flush functionality according to
588 * documentation. The register-operand variant does not.
589 * Works with any segment selector, but a valid writable
590 * data segment is the fastest variant.
592 * "cc" clobber is required because VERW modifies ZF.
594 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
598 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
600 * Clear CPU buffers if the corresponding static key is enabled
602 static __always_inline void mds_idle_clear_cpu_buffers(void)
604 if (static_branch_likely(&mds_idle_clear))
605 mds_clear_cpu_buffers();
608 #endif /* __ASSEMBLY__ */
610 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */