Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[sfrench/cifs-2.6.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17
18 #include <asm/spec-ctrl.h>
19 #include <asm/cmdline.h>
20 #include <asm/bugs.h>
21 #include <asm/processor.h>
22 #include <asm/processor-flags.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/msr.h>
25 #include <asm/paravirt.h>
26 #include <asm/alternative.h>
27 #include <asm/pgtable.h>
28 #include <asm/set_memory.h>
29 #include <asm/intel-family.h>
30 #include <asm/hypervisor.h>
31
32 static void __init spectre_v2_select_mitigation(void);
33 static void __init ssb_select_mitigation(void);
34
35 /*
36  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
37  * writes to SPEC_CTRL contain whatever reserved bits have been set.
38  */
39 u64 __ro_after_init x86_spec_ctrl_base;
40 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41
42 /*
43  * The vendor and possibly platform specific bits which can be modified in
44  * x86_spec_ctrl_base.
45  */
46 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
47
48 /*
49  * AMD specific MSR info for Speculative Store Bypass control.
50  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
51  */
52 u64 __ro_after_init x86_amd_ls_cfg_base;
53 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
54
55 void __init check_bugs(void)
56 {
57         identify_boot_cpu();
58
59         if (!IS_ENABLED(CONFIG_SMP)) {
60                 pr_info("CPU: ");
61                 print_cpu_info(&boot_cpu_data);
62         }
63
64         /*
65          * Read the SPEC_CTRL MSR to account for reserved bits which may
66          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
67          * init code as it is not enumerated and depends on the family.
68          */
69         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
70                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
71
72         /* Allow STIBP in MSR_SPEC_CTRL if supported */
73         if (boot_cpu_has(X86_FEATURE_STIBP))
74                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
75
76         /* Select the proper spectre mitigation before patching alternatives */
77         spectre_v2_select_mitigation();
78
79         /*
80          * Select proper mitigation for any exposure to the Speculative Store
81          * Bypass vulnerability.
82          */
83         ssb_select_mitigation();
84
85 #ifdef CONFIG_X86_32
86         /*
87          * Check whether we are able to run this kernel safely on SMP.
88          *
89          * - i386 is no longer supported.
90          * - In order to run on anything without a TSC, we need to be
91          *   compiled for a i486.
92          */
93         if (boot_cpu_data.x86 < 4)
94                 panic("Kernel requires i486+ for 'invlpg' and other features");
95
96         init_utsname()->machine[1] =
97                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
98         alternative_instructions();
99
100         fpu__init_check_bugs();
101 #else /* CONFIG_X86_64 */
102         alternative_instructions();
103
104         /*
105          * Make sure the first 2MB area is not mapped by huge pages
106          * There are typically fixed size MTRRs in there and overlapping
107          * MTRRs into large pages causes slow downs.
108          *
109          * Right now we don't do that with gbpages because there seems
110          * very little benefit for that case.
111          */
112         if (!direct_gbpages)
113                 set_memory_4k((unsigned long)__va(0), 1);
114 #endif
115 }
116
117 /* The kernel command line selection */
118 enum spectre_v2_mitigation_cmd {
119         SPECTRE_V2_CMD_NONE,
120         SPECTRE_V2_CMD_AUTO,
121         SPECTRE_V2_CMD_FORCE,
122         SPECTRE_V2_CMD_RETPOLINE,
123         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
124         SPECTRE_V2_CMD_RETPOLINE_AMD,
125 };
126
127 static const char *spectre_v2_strings[] = {
128         [SPECTRE_V2_NONE]                       = "Vulnerable",
129         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
130         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
131         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
132         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
133 };
134
135 #undef pr_fmt
136 #define pr_fmt(fmt)     "Spectre V2 : " fmt
137
138 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
139         SPECTRE_V2_NONE;
140
141 void
142 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
143 {
144         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
145         struct thread_info *ti = current_thread_info();
146
147         /* Is MSR_SPEC_CTRL implemented ? */
148         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
149                 /*
150                  * Restrict guest_spec_ctrl to supported values. Clear the
151                  * modifiable bits in the host base value and or the
152                  * modifiable bits from the guest value.
153                  */
154                 guestval = hostval & ~x86_spec_ctrl_mask;
155                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
156
157                 /* SSBD controlled in MSR_SPEC_CTRL */
158                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
159                     static_cpu_has(X86_FEATURE_AMD_SSBD))
160                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
161
162                 if (hostval != guestval) {
163                         msrval = setguest ? guestval : hostval;
164                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
165                 }
166         }
167
168         /*
169          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
170          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
171          */
172         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
173             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
174                 return;
175
176         /*
177          * If the host has SSBD mitigation enabled, force it in the host's
178          * virtual MSR value. If its not permanently enabled, evaluate
179          * current's TIF_SSBD thread flag.
180          */
181         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
182                 hostval = SPEC_CTRL_SSBD;
183         else
184                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
185
186         /* Sanitize the guest value */
187         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
188
189         if (hostval != guestval) {
190                 unsigned long tif;
191
192                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
193                                  ssbd_spec_ctrl_to_tif(hostval);
194
195                 speculative_store_bypass_update(tif);
196         }
197 }
198 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
199
200 static void x86_amd_ssb_disable(void)
201 {
202         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
203
204         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
205                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
206         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
207                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
208 }
209
210 #ifdef RETPOLINE
211 static bool spectre_v2_bad_module;
212
213 bool retpoline_module_ok(bool has_retpoline)
214 {
215         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
216                 return true;
217
218         pr_err("System may be vulnerable to spectre v2\n");
219         spectre_v2_bad_module = true;
220         return false;
221 }
222
223 static inline const char *spectre_v2_module_string(void)
224 {
225         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
226 }
227 #else
228 static inline const char *spectre_v2_module_string(void) { return ""; }
229 #endif
230
231 static void __init spec2_print_if_insecure(const char *reason)
232 {
233         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
234                 pr_info("%s selected on command line.\n", reason);
235 }
236
237 static void __init spec2_print_if_secure(const char *reason)
238 {
239         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
240                 pr_info("%s selected on command line.\n", reason);
241 }
242
243 static inline bool retp_compiler(void)
244 {
245         return __is_defined(RETPOLINE);
246 }
247
248 static inline bool match_option(const char *arg, int arglen, const char *opt)
249 {
250         int len = strlen(opt);
251
252         return len == arglen && !strncmp(arg, opt, len);
253 }
254
255 static const struct {
256         const char *option;
257         enum spectre_v2_mitigation_cmd cmd;
258         bool secure;
259 } mitigation_options[] = {
260         { "off",               SPECTRE_V2_CMD_NONE,              false },
261         { "on",                SPECTRE_V2_CMD_FORCE,             true },
262         { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
263         { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
264         { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
265         { "auto",              SPECTRE_V2_CMD_AUTO,              false },
266 };
267
268 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
269 {
270         char arg[20];
271         int ret, i;
272         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
273
274         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
275                 return SPECTRE_V2_CMD_NONE;
276         else {
277                 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
278                 if (ret < 0)
279                         return SPECTRE_V2_CMD_AUTO;
280
281                 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
282                         if (!match_option(arg, ret, mitigation_options[i].option))
283                                 continue;
284                         cmd = mitigation_options[i].cmd;
285                         break;
286                 }
287
288                 if (i >= ARRAY_SIZE(mitigation_options)) {
289                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
290                         return SPECTRE_V2_CMD_AUTO;
291                 }
292         }
293
294         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
295              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
296              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
297             !IS_ENABLED(CONFIG_RETPOLINE)) {
298                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
299                 return SPECTRE_V2_CMD_AUTO;
300         }
301
302         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
303             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
304                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
305                 return SPECTRE_V2_CMD_AUTO;
306         }
307
308         if (mitigation_options[i].secure)
309                 spec2_print_if_secure(mitigation_options[i].option);
310         else
311                 spec2_print_if_insecure(mitigation_options[i].option);
312
313         return cmd;
314 }
315
316 /* Check for Skylake-like CPUs (for RSB handling) */
317 static bool __init is_skylake_era(void)
318 {
319         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
320             boot_cpu_data.x86 == 6) {
321                 switch (boot_cpu_data.x86_model) {
322                 case INTEL_FAM6_SKYLAKE_MOBILE:
323                 case INTEL_FAM6_SKYLAKE_DESKTOP:
324                 case INTEL_FAM6_SKYLAKE_X:
325                 case INTEL_FAM6_KABYLAKE_MOBILE:
326                 case INTEL_FAM6_KABYLAKE_DESKTOP:
327                         return true;
328                 }
329         }
330         return false;
331 }
332
333 static void __init spectre_v2_select_mitigation(void)
334 {
335         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
336         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
337
338         /*
339          * If the CPU is not affected and the command line mode is NONE or AUTO
340          * then nothing to do.
341          */
342         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
343             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
344                 return;
345
346         switch (cmd) {
347         case SPECTRE_V2_CMD_NONE:
348                 return;
349
350         case SPECTRE_V2_CMD_FORCE:
351         case SPECTRE_V2_CMD_AUTO:
352                 if (IS_ENABLED(CONFIG_RETPOLINE))
353                         goto retpoline_auto;
354                 break;
355         case SPECTRE_V2_CMD_RETPOLINE_AMD:
356                 if (IS_ENABLED(CONFIG_RETPOLINE))
357                         goto retpoline_amd;
358                 break;
359         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
360                 if (IS_ENABLED(CONFIG_RETPOLINE))
361                         goto retpoline_generic;
362                 break;
363         case SPECTRE_V2_CMD_RETPOLINE:
364                 if (IS_ENABLED(CONFIG_RETPOLINE))
365                         goto retpoline_auto;
366                 break;
367         }
368         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
369         return;
370
371 retpoline_auto:
372         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
373         retpoline_amd:
374                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
375                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
376                         goto retpoline_generic;
377                 }
378                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
379                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
380                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
381                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
382         } else {
383         retpoline_generic:
384                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
385                                          SPECTRE_V2_RETPOLINE_MINIMAL;
386                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
387         }
388
389         spectre_v2_enabled = mode;
390         pr_info("%s\n", spectre_v2_strings[mode]);
391
392         /*
393          * If neither SMEP nor PTI are available, there is a risk of
394          * hitting userspace addresses in the RSB after a context switch
395          * from a shallow call stack to a deeper one. To prevent this fill
396          * the entire RSB, even when using IBRS.
397          *
398          * Skylake era CPUs have a separate issue with *underflow* of the
399          * RSB, when they will predict 'ret' targets from the generic BTB.
400          * The proper mitigation for this is IBRS. If IBRS is not supported
401          * or deactivated in favour of retpolines the RSB fill on context
402          * switch is required.
403          */
404         if ((!boot_cpu_has(X86_FEATURE_PTI) &&
405              !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
406                 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
407                 pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
408         }
409
410         /* Initialize Indirect Branch Prediction Barrier if supported */
411         if (boot_cpu_has(X86_FEATURE_IBPB)) {
412                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
413                 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
414         }
415
416         /*
417          * Retpoline means the kernel is safe because it has no indirect
418          * branches. But firmware isn't, so use IBRS to protect that.
419          */
420         if (boot_cpu_has(X86_FEATURE_IBRS)) {
421                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
422                 pr_info("Enabling Restricted Speculation for firmware calls\n");
423         }
424 }
425
426 #undef pr_fmt
427 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
428
429 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
430
431 /* The kernel command line selection */
432 enum ssb_mitigation_cmd {
433         SPEC_STORE_BYPASS_CMD_NONE,
434         SPEC_STORE_BYPASS_CMD_AUTO,
435         SPEC_STORE_BYPASS_CMD_ON,
436         SPEC_STORE_BYPASS_CMD_PRCTL,
437         SPEC_STORE_BYPASS_CMD_SECCOMP,
438 };
439
440 static const char *ssb_strings[] = {
441         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
442         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
443         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
444         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
445 };
446
447 static const struct {
448         const char *option;
449         enum ssb_mitigation_cmd cmd;
450 } ssb_mitigation_options[] = {
451         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
452         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
453         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
454         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
455         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
456 };
457
458 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
459 {
460         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
461         char arg[20];
462         int ret, i;
463
464         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
465                 return SPEC_STORE_BYPASS_CMD_NONE;
466         } else {
467                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
468                                           arg, sizeof(arg));
469                 if (ret < 0)
470                         return SPEC_STORE_BYPASS_CMD_AUTO;
471
472                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
473                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
474                                 continue;
475
476                         cmd = ssb_mitigation_options[i].cmd;
477                         break;
478                 }
479
480                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
481                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
482                         return SPEC_STORE_BYPASS_CMD_AUTO;
483                 }
484         }
485
486         return cmd;
487 }
488
489 static enum ssb_mitigation __init __ssb_select_mitigation(void)
490 {
491         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
492         enum ssb_mitigation_cmd cmd;
493
494         if (!boot_cpu_has(X86_FEATURE_SSBD))
495                 return mode;
496
497         cmd = ssb_parse_cmdline();
498         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
499             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
500              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
501                 return mode;
502
503         switch (cmd) {
504         case SPEC_STORE_BYPASS_CMD_AUTO:
505         case SPEC_STORE_BYPASS_CMD_SECCOMP:
506                 /*
507                  * Choose prctl+seccomp as the default mode if seccomp is
508                  * enabled.
509                  */
510                 if (IS_ENABLED(CONFIG_SECCOMP))
511                         mode = SPEC_STORE_BYPASS_SECCOMP;
512                 else
513                         mode = SPEC_STORE_BYPASS_PRCTL;
514                 break;
515         case SPEC_STORE_BYPASS_CMD_ON:
516                 mode = SPEC_STORE_BYPASS_DISABLE;
517                 break;
518         case SPEC_STORE_BYPASS_CMD_PRCTL:
519                 mode = SPEC_STORE_BYPASS_PRCTL;
520                 break;
521         case SPEC_STORE_BYPASS_CMD_NONE:
522                 break;
523         }
524
525         /*
526          * We have three CPU feature flags that are in play here:
527          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
528          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
529          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
530          */
531         if (mode == SPEC_STORE_BYPASS_DISABLE) {
532                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
533                 /*
534                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
535                  * use a completely different MSR and bit dependent on family.
536                  */
537                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
538                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
539                         x86_amd_ssb_disable();
540                 } else {
541                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
542                         x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
543                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
544                 }
545         }
546
547         return mode;
548 }
549
550 static void ssb_select_mitigation(void)
551 {
552         ssb_mode = __ssb_select_mitigation();
553
554         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
555                 pr_info("%s\n", ssb_strings[ssb_mode]);
556 }
557
558 #undef pr_fmt
559 #define pr_fmt(fmt)     "Speculation prctl: " fmt
560
561 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
562 {
563         bool update;
564
565         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
566             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
567                 return -ENXIO;
568
569         switch (ctrl) {
570         case PR_SPEC_ENABLE:
571                 /* If speculation is force disabled, enable is not allowed */
572                 if (task_spec_ssb_force_disable(task))
573                         return -EPERM;
574                 task_clear_spec_ssb_disable(task);
575                 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
576                 break;
577         case PR_SPEC_DISABLE:
578                 task_set_spec_ssb_disable(task);
579                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
580                 break;
581         case PR_SPEC_FORCE_DISABLE:
582                 task_set_spec_ssb_disable(task);
583                 task_set_spec_ssb_force_disable(task);
584                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
585                 break;
586         default:
587                 return -ERANGE;
588         }
589
590         /*
591          * If being set on non-current task, delay setting the CPU
592          * mitigation until it is next scheduled.
593          */
594         if (task == current && update)
595                 speculative_store_bypass_update_current();
596
597         return 0;
598 }
599
600 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
601                              unsigned long ctrl)
602 {
603         switch (which) {
604         case PR_SPEC_STORE_BYPASS:
605                 return ssb_prctl_set(task, ctrl);
606         default:
607                 return -ENODEV;
608         }
609 }
610
611 #ifdef CONFIG_SECCOMP
612 void arch_seccomp_spec_mitigate(struct task_struct *task)
613 {
614         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
615                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
616 }
617 #endif
618
619 static int ssb_prctl_get(struct task_struct *task)
620 {
621         switch (ssb_mode) {
622         case SPEC_STORE_BYPASS_DISABLE:
623                 return PR_SPEC_DISABLE;
624         case SPEC_STORE_BYPASS_SECCOMP:
625         case SPEC_STORE_BYPASS_PRCTL:
626                 if (task_spec_ssb_force_disable(task))
627                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
628                 if (task_spec_ssb_disable(task))
629                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
630                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
631         default:
632                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
633                         return PR_SPEC_ENABLE;
634                 return PR_SPEC_NOT_AFFECTED;
635         }
636 }
637
638 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
639 {
640         switch (which) {
641         case PR_SPEC_STORE_BYPASS:
642                 return ssb_prctl_get(task);
643         default:
644                 return -ENODEV;
645         }
646 }
647
648 void x86_spec_ctrl_setup_ap(void)
649 {
650         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
651                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
652
653         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
654                 x86_amd_ssb_disable();
655 }
656
657 #ifdef CONFIG_SYSFS
658
659 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
660                                char *buf, unsigned int bug)
661 {
662         if (!boot_cpu_has_bug(bug))
663                 return sprintf(buf, "Not affected\n");
664
665         switch (bug) {
666         case X86_BUG_CPU_MELTDOWN:
667                 if (boot_cpu_has(X86_FEATURE_PTI))
668                         return sprintf(buf, "Mitigation: PTI\n");
669
670                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
671                         return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
672
673                 break;
674
675         case X86_BUG_SPECTRE_V1:
676                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
677
678         case X86_BUG_SPECTRE_V2:
679                 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
680                                boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
681                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
682                                spectre_v2_module_string());
683
684         case X86_BUG_SPEC_STORE_BYPASS:
685                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
686
687         default:
688                 break;
689         }
690
691         return sprintf(buf, "Vulnerable\n");
692 }
693
694 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
695 {
696         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
697 }
698
699 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
700 {
701         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
702 }
703
704 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
705 {
706         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
707 }
708
709 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
710 {
711         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
712 }
713 #endif