1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bitops.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/random.h>
11 #include <linux/topology.h>
12 #include <asm/processor.h>
14 #include <asm/cacheinfo.h>
16 #include <asm/spec-ctrl.h>
19 #include <asm/pci-direct.h>
20 #include <asm/delay.h>
21 #include <asm/debugreg.h>
22 #include <asm/resctrl.h>
26 # include <asm/mmconfig.h>
31 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
36 WARN_ONCE((boot_cpu_data.x86 != 0xf),
37 "%s should only be used on K8!\n", __func__);
42 err = rdmsr_safe_regs(gprs);
44 *p = gprs[0] | ((u64)gprs[2] << 32);
49 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
53 WARN_ONCE((boot_cpu_data.x86 != 0xf),
54 "%s should only be used on K8!\n", __func__);
61 return wrmsr_safe_regs(gprs);
65 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
66 * misexecution of code under Linux. Owners of such processors should
67 * contact AMD for precise details and a CPU swap.
69 * See http://www.multimania.com/poulot/k6bug.html
70 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
71 * (Publication # 21266 Issue Date: August 1998)
73 * The following test is erm.. interesting. AMD neglected to up
74 * the chip setting when fixing the bug but they also tweaked some
75 * performance at the same time..
79 extern __visible void vide(void);
82 ".type vide, @function\n"
87 static void init_amd_k5(struct cpuinfo_x86 *c)
91 * General Systems BIOSen alias the cpu frequency registers
92 * of the Elan at 0x000df000. Unfortunately, one of the Linux
93 * drivers subsequently pokes it, and changes the CPU speed.
94 * Workaround : Remove the unneeded alias.
96 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
97 #define CBAR_ENB (0x80000000)
98 #define CBAR_KEY (0X000000CB)
99 if (c->x86_model == 9 || c->x86_model == 10) {
100 if (inl(CBAR) & CBAR_ENB)
101 outl(0 | CBAR_KEY, CBAR);
106 static void init_amd_k6(struct cpuinfo_x86 *c)
110 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
112 if (c->x86_model < 6) {
113 /* Based on AMD doc 20734R - June 2000 */
114 if (c->x86_model == 0) {
115 clear_cpu_cap(c, X86_FEATURE_APIC);
116 set_cpu_cap(c, X86_FEATURE_PGE);
121 if (c->x86_model == 6 && c->x86_stepping == 1) {
122 const int K6_BUG_LOOP = 1000000;
124 void (*f_vide)(void);
127 pr_info("AMD K6 stepping B detected - ");
130 * It looks like AMD fixed the 2.6.2 bug and improved indirect
131 * calls at the same time.
136 OPTIMIZER_HIDE_VAR(f_vide);
143 if (d > 20*K6_BUG_LOOP)
144 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
146 pr_cont("probably OK (after B9730xxxx).\n");
149 /* K6 with old style WHCR */
150 if (c->x86_model < 8 ||
151 (c->x86_model == 8 && c->x86_stepping < 8)) {
152 /* We can only write allocate on the low 508Mb */
156 rdmsr(MSR_K6_WHCR, l, h);
157 if ((l&0x0000FFFF) == 0) {
159 l = (1<<0)|((mbytes/4)<<1);
160 local_irq_save(flags);
162 wrmsr(MSR_K6_WHCR, l, h);
163 local_irq_restore(flags);
164 pr_info("Enabling old style K6 write allocation for %d Mb\n",
170 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
171 c->x86_model == 9 || c->x86_model == 13) {
172 /* The more serious chips .. */
177 rdmsr(MSR_K6_WHCR, l, h);
178 if ((l&0xFFFF0000) == 0) {
180 l = ((mbytes>>2)<<22)|(1<<16);
181 local_irq_save(flags);
183 wrmsr(MSR_K6_WHCR, l, h);
184 local_irq_restore(flags);
185 pr_info("Enabling new style K6 write allocation for %d Mb\n",
192 if (c->x86_model == 10) {
193 /* AMD Geode LX is model 10 */
194 /* placeholder for any needed mods */
200 static void init_amd_k7(struct cpuinfo_x86 *c)
206 * Bit 15 of Athlon specific MSR 15, needs to be 0
207 * to enable SSE on Palomino/Morgan/Barton CPU's.
208 * If the BIOS didn't enable it already, enable it here.
210 if (c->x86_model >= 6 && c->x86_model <= 10) {
211 if (!cpu_has(c, X86_FEATURE_XMM)) {
212 pr_info("Enabling disabled K7/SSE Support.\n");
213 msr_clear_bit(MSR_K7_HWCR, 15);
214 set_cpu_cap(c, X86_FEATURE_XMM);
219 * It's been determined by AMD that Athlons since model 8 stepping 1
220 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
221 * As per AMD technical note 27212 0.2
223 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
224 rdmsr(MSR_K7_CLK_CTL, l, h);
225 if ((l & 0xfff00000) != 0x20000000) {
226 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
227 l, ((l & 0x000fffff)|0x20000000));
228 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
232 /* calling is from identify_secondary_cpu() ? */
237 * Certain Athlons might work (for various values of 'work') in SMP
238 * but they are not certified as MP capable.
240 /* Athlon 660/661 is valid. */
241 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
242 (c->x86_stepping == 1)))
245 /* Duron 670 is valid */
246 if ((c->x86_model == 7) && (c->x86_stepping == 0))
250 * Athlon 662, Duron 671, and Athlon >model 7 have capability
251 * bit. It's worth noting that the A5 stepping (662) of some
252 * Athlon XP's have the MP bit set.
253 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
256 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
257 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
259 if (cpu_has(c, X86_FEATURE_MP))
262 /* If we get here, not a certified SMP capable AMD system. */
265 * Don't taint if we are running SMP kernel on a single non-MP
268 WARN_ONCE(1, "WARNING: This combination of AMD"
269 " processors is not suitable for SMP.\n");
270 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
276 * To workaround broken NUMA config. Read the comment in
277 * srat_detect_node().
279 static int nearby_node(int apicid)
283 for (i = apicid - 1; i >= 0; i--) {
284 node = __apicid_to_node[i];
285 if (node != NUMA_NO_NODE && node_online(node))
288 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
289 node = __apicid_to_node[i];
290 if (node != NUMA_NO_NODE && node_online(node))
293 return first_node(node_online_map); /* Shouldn't happen */
297 static void srat_detect_node(struct cpuinfo_x86 *c)
300 int cpu = smp_processor_id();
302 unsigned apicid = c->topo.apicid;
304 node = numa_cpu_node(cpu);
305 if (node == NUMA_NO_NODE)
306 node = per_cpu_llc_id(cpu);
309 * On multi-fabric platform (e.g. Numascale NumaChip) a
310 * platform-specific handler needs to be called to fixup some
313 if (x86_cpuinit.fixup_cpu_id)
314 x86_cpuinit.fixup_cpu_id(c, node);
316 if (!node_online(node)) {
318 * Two possibilities here:
320 * - The CPU is missing memory and no node was created. In
321 * that case try picking one from a nearby CPU.
323 * - The APIC IDs differ from the HyperTransport node IDs
324 * which the K8 northbridge parsing fills in. Assume
325 * they are all increased by a constant offset, but in
326 * the same order as the HT nodeids. If that doesn't
327 * result in a usable node fall back to the path for the
330 * This workaround operates directly on the mapping between
331 * APIC ID and NUMA node, assuming certain relationship
332 * between APIC ID, HT node ID and NUMA topology. As going
333 * through CPU mapping may alter the outcome, directly
334 * access __apicid_to_node[].
336 int ht_nodeid = c->topo.initial_apicid;
338 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
339 node = __apicid_to_node[ht_nodeid];
340 /* Pick a nearby node */
341 if (!node_online(node))
342 node = nearby_node(apicid);
344 numa_set_node(cpu, node);
348 static void bsp_init_amd(struct cpuinfo_x86 *c)
350 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
353 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
356 rdmsrl(MSR_K7_HWCR, val);
357 if (!(val & BIT(24)))
358 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
362 if (c->x86 == 0x15) {
363 unsigned long upperbit;
366 cpuid = cpuid_edx(0x80000005);
367 assoc = cpuid >> 16 & 0xff;
368 upperbit = ((cpuid >> 24) << 10) / assoc;
370 va_align.mask = (upperbit - 1) & PAGE_MASK;
371 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
373 /* A random value per boot for bit slice [12:upper_bit) */
374 va_align.bits = get_random_u32() & va_align.mask;
377 if (cpu_has(c, X86_FEATURE_MWAITX))
380 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
381 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
382 c->x86 >= 0x15 && c->x86 <= 0x17) {
386 case 0x15: bit = 54; break;
387 case 0x16: bit = 33; break;
388 case 0x17: bit = 10; break;
392 * Try to cache the base value so further operations can
393 * avoid RMW. If that faults, do not enable SSBD.
395 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
396 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
397 setup_force_cpu_cap(X86_FEATURE_SSBD);
398 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
402 resctrl_cpu_detect(c);
404 /* Figure out Zen generations: */
407 switch (c->x86_model) {
410 setup_force_cpu_cap(X86_FEATURE_ZEN1);
416 setup_force_cpu_cap(X86_FEATURE_ZEN2);
424 switch (c->x86_model) {
427 setup_force_cpu_cap(X86_FEATURE_ZEN3);
431 setup_force_cpu_cap(X86_FEATURE_ZEN4);
439 switch (c->x86_model) {
444 setup_force_cpu_cap(X86_FEATURE_ZEN5);
455 if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
457 * RMP table entry format is not architectural and it can vary by processor
458 * and is defined by the per-processor PPR. Restrict SNP support on the
459 * known CPU model and family for which the RMP table entry format is
460 * currently defined for.
462 if (!boot_cpu_has(X86_FEATURE_ZEN3) &&
463 !boot_cpu_has(X86_FEATURE_ZEN4) &&
464 !boot_cpu_has(X86_FEATURE_ZEN5))
465 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
466 else if (!snp_probe_rmptable_info())
467 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
473 WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
476 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
481 * BIOS support is required for SME and SEV.
482 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
483 * the SME physical address space reduction value.
484 * If BIOS has not enabled SME then don't advertise the
485 * SME feature (set in scattered.c).
486 * If the kernel has not enabled SME via any means then
487 * don't advertise the SME feature.
488 * For SEV: If BIOS has not enabled SEV then don't advertise SEV and
489 * any additional functionality based on it.
491 * In all cases, since support for SME and SEV requires long mode,
492 * don't advertise the feature under CONFIG_X86_32.
494 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
495 /* Check if memory encryption is enabled */
496 rdmsrl(MSR_AMD64_SYSCFG, msr);
497 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
501 * Always adjust physical address bits. Even though this
502 * will be a value above 32-bits this is still done for
503 * CONFIG_X86_32 so that accurate values are reported.
505 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
507 if (IS_ENABLED(CONFIG_X86_32))
511 setup_clear_cpu_cap(X86_FEATURE_SME);
513 rdmsrl(MSR_K7_HWCR, msr);
514 if (!(msr & MSR_K7_HWCR_SMMLOCK))
520 setup_clear_cpu_cap(X86_FEATURE_SME);
522 setup_clear_cpu_cap(X86_FEATURE_SEV);
523 setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
524 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
528 static void early_init_amd(struct cpuinfo_x86 *c)
534 set_cpu_cap(c, X86_FEATURE_K8);
536 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
539 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
540 * with P/T states and does not stop in deep C-states
542 if (c->x86_power & (1 << 8)) {
543 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
544 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
547 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
548 if (c->x86_power & BIT(12))
549 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
551 /* Bit 14 indicates the Runtime Average Power Limit interface. */
552 if (c->x86_power & BIT(14))
553 set_cpu_cap(c, X86_FEATURE_RAPL);
556 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
558 /* Set MTRR capability flag if appropriate */
560 if (c->x86_model == 13 || c->x86_model == 9 ||
561 (c->x86_model == 8 && c->x86_stepping >= 8))
562 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
564 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
566 * ApicID can always be treated as an 8-bit value for AMD APIC versions
567 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
568 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
571 if (boot_cpu_has(X86_FEATURE_APIC)) {
573 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
574 else if (c->x86 >= 0xf) {
575 /* check CPU config space for extended APIC ID */
578 val = read_pci_config(0, 24, 0, 0x68);
579 if ((val >> 17 & 0x3) == 0x3)
580 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
586 * This is only needed to tell the kernel whether to use VMCALL
587 * and VMMCALL. VMMCALL is never executed except under virt, so
588 * we can set it unconditionally.
590 set_cpu_cap(c, X86_FEATURE_VMMCALL);
592 /* F16h erratum 793, CVE-2013-6885 */
593 if (c->x86 == 0x16 && c->x86_model <= 0xf)
594 msr_set_bit(MSR_AMD64_LS_CFG, 15);
596 early_detect_mem_encrypt(c);
598 /* Re-enable TopologyExtensions if switched off by BIOS */
599 if (c->x86 == 0x15 &&
600 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
601 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
603 if (msr_set_bit(0xc0011005, 54) > 0) {
604 rdmsrl(0xc0011005, value);
605 if (value & BIT_64(54)) {
606 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
607 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
612 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
613 if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
614 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
615 else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
616 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
617 setup_force_cpu_cap(X86_FEATURE_SBPB);
622 static void init_amd_k8(struct cpuinfo_x86 *c)
627 /* On C+ stepping K8 rep microcode works well for copy/memset */
628 level = cpuid_eax(1);
629 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
630 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
633 * Some BIOSes incorrectly force this feature, but only K8 revision D
634 * (model = 0x14) and later actually support it.
635 * (AMD Erratum #110, docId: 25759).
637 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
638 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
639 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
640 value &= ~BIT_64(32);
641 wrmsrl_amd_safe(0xc001100d, value);
645 if (!c->x86_model_id[0])
646 strcpy(c->x86_model_id, "Hammer");
650 * Disable TLB flush filter by setting HWCR.FFDIS on K8
651 * bit 6 of msr C001_0015
653 * Errata 63 for SH-B3 steppings
654 * Errata 122 for all steppings (F+ have it disabled by default)
656 msr_set_bit(MSR_K7_HWCR, 6);
658 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
661 * Check models and steppings affected by erratum 400. This is
662 * used to select the proper idle routine and to enable the
663 * check whether the machine is affected in arch_post_acpi_subsys_init()
664 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
666 if (c->x86_model > 0x41 ||
667 (c->x86_model == 0x41 && c->x86_stepping >= 0x2))
668 setup_force_cpu_bug(X86_BUG_AMD_E400);
671 static void init_amd_gh(struct cpuinfo_x86 *c)
673 #ifdef CONFIG_MMCONF_FAM10H
674 /* do this for boot cpu */
675 if (c == &boot_cpu_data)
676 check_enable_amd_mmconf_dmi();
678 fam10h_check_enable_mmcfg();
682 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
683 * is always needed when GART is enabled, even in a kernel which has no
684 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
685 * If it doesn't, we do it here as suggested by the BKDG.
687 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
689 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
692 * On family 10h BIOS may not have properly enabled WC+ support, causing
693 * it to be converted to CD memtype. This may result in performance
694 * degradation for certain nested-paging guests. Prevent this conversion
695 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
697 * NOTE: we want to use the _safe accessors so as not to #GP kvm
698 * guests on older kvm hosts.
700 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
702 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
705 * Check models and steppings affected by erratum 400. This is
706 * used to select the proper idle routine and to enable the
707 * check whether the machine is affected in arch_post_acpi_subsys_init()
708 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
710 if (c->x86_model > 0x2 ||
711 (c->x86_model == 0x2 && c->x86_stepping >= 0x1))
712 setup_force_cpu_bug(X86_BUG_AMD_E400);
715 static void init_amd_ln(struct cpuinfo_x86 *c)
718 * Apply erratum 665 fix unconditionally so machines without a BIOS
721 msr_set_bit(MSR_AMD64_DE_CFG, 31);
724 static bool rdrand_force;
726 static int __init rdrand_cmdline(char *str)
731 if (!strcmp(str, "force"))
738 early_param("rdrand", rdrand_cmdline);
740 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
743 * Saving of the MSR used to hide the RDRAND support during
744 * suspend/resume is done by arch/x86/power/cpu.c, which is
745 * dependent on CONFIG_PM_SLEEP.
747 if (!IS_ENABLED(CONFIG_PM_SLEEP))
751 * The self-test can clear X86_FEATURE_RDRAND, so check for
752 * RDRAND support using the CPUID function directly.
754 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
757 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
760 * Verify that the CPUID change has occurred in case the kernel is
761 * running virtualized and the hypervisor doesn't support the MSR.
763 if (cpuid_ecx(1) & BIT(30)) {
764 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
768 clear_cpu_cap(c, X86_FEATURE_RDRAND);
769 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
772 static void init_amd_jg(struct cpuinfo_x86 *c)
775 * Some BIOS implementations do not restore proper RDRAND support
776 * across suspend and resume. Check on whether to hide the RDRAND
777 * instruction support via CPUID.
779 clear_rdrand_cpuid_bit(c);
782 static void init_amd_bd(struct cpuinfo_x86 *c)
787 * The way access filter has a performance penalty on some workloads.
788 * Disable it on the affected CPUs.
790 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
791 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
793 wrmsrl_safe(MSR_F15H_IC_CFG, value);
798 * Some BIOS implementations do not restore proper RDRAND support
799 * across suspend and resume. Check on whether to hide the RDRAND
800 * instruction support via CPUID.
802 clear_rdrand_cpuid_bit(c);
805 static void fix_erratum_1386(struct cpuinfo_x86 *c)
808 * Work around Erratum 1386. The XSAVES instruction malfunctions in
809 * certain circumstances on Zen1/2 uarch, and not all parts have had
810 * updated microcode at the time of writing (March 2023).
812 * Affected parts all have no supervisor XSAVE states, meaning that
813 * the XSAVEC instruction (which works fine) is equivalent.
815 clear_cpu_cap(c, X86_FEATURE_XSAVES);
818 void init_spectral_chicken(struct cpuinfo_x86 *c)
820 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
824 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
826 * This suppresses speculation from the middle of a basic block, i.e. it
827 * suppresses non-branch predictions.
829 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
830 if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
831 value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
832 wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
838 static void init_amd_zen_common(void)
840 setup_force_cpu_cap(X86_FEATURE_ZEN);
842 node_reclaim_distance = 32;
846 static void init_amd_zen1(struct cpuinfo_x86 *c)
850 /* Fix up CPUID bits, but only if not virtualised. */
851 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
853 /* Erratum 1076: CPB feature bit not being set in CPUID. */
854 if (!cpu_has(c, X86_FEATURE_CPB))
855 set_cpu_cap(c, X86_FEATURE_CPB);
858 pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
859 setup_force_cpu_bug(X86_BUG_DIV0);
862 static bool cpu_has_zenbleed_microcode(void)
866 switch (boot_cpu_data.x86_model) {
867 case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
868 case 0x60 ... 0x67: good_rev = 0x0860010c; break;
869 case 0x68 ... 0x6f: good_rev = 0x08608107; break;
870 case 0x70 ... 0x7f: good_rev = 0x08701033; break;
871 case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
877 if (boot_cpu_data.microcode < good_rev)
883 static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
885 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
888 if (!cpu_has(c, X86_FEATURE_AVX))
891 if (!cpu_has_zenbleed_microcode()) {
892 pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
893 msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
895 msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
899 static void init_amd_zen2(struct cpuinfo_x86 *c)
901 init_spectral_chicken(c);
903 zen2_zenbleed_check(c);
906 static void init_amd_zen3(struct cpuinfo_x86 *c)
908 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
910 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
911 * Branch Type Confusion, but predate the allocation of the
914 if (!cpu_has(c, X86_FEATURE_BTC_NO))
915 set_cpu_cap(c, X86_FEATURE_BTC_NO);
919 static void init_amd_zen4(struct cpuinfo_x86 *c)
921 if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
922 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
925 static void init_amd_zen5(struct cpuinfo_x86 *c)
929 static void init_amd(struct cpuinfo_x86 *c)
936 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
937 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
939 clear_cpu_cap(c, 0*32+31);
942 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
944 /* AMD FSRM also implies FSRS */
945 if (cpu_has(c, X86_FEATURE_FSRM))
946 set_cpu_cap(c, X86_FEATURE_FSRS);
948 /* K6s reports MCEs but don't actually have all the MSRs */
950 clear_cpu_cap(c, X86_FEATURE_MCE);
953 case 4: init_amd_k5(c); break;
954 case 5: init_amd_k6(c); break;
955 case 6: init_amd_k7(c); break;
956 case 0xf: init_amd_k8(c); break;
957 case 0x10: init_amd_gh(c); break;
958 case 0x12: init_amd_ln(c); break;
959 case 0x15: init_amd_bd(c); break;
960 case 0x16: init_amd_jg(c); break;
964 * Save up on some future enablement work and do common Zen
968 init_amd_zen_common();
970 if (boot_cpu_has(X86_FEATURE_ZEN1))
972 else if (boot_cpu_has(X86_FEATURE_ZEN2))
974 else if (boot_cpu_has(X86_FEATURE_ZEN3))
976 else if (boot_cpu_has(X86_FEATURE_ZEN4))
978 else if (boot_cpu_has(X86_FEATURE_ZEN5))
982 * Enable workaround for FXSAVE leak on CPUs
983 * without a XSaveErPtr feature
985 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
986 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
988 cpu_detect_cache_sizes(c);
992 init_amd_cacheinfo(c);
994 if (cpu_has(c, X86_FEATURE_SVM)) {
995 rdmsrl(MSR_VM_CR, vm_cr);
996 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
997 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
998 clear_cpu_cap(c, X86_FEATURE_SVM);
1002 if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
1004 * Use LFENCE for execution serialization. On families which
1005 * don't have that MSR, LFENCE is already serializing.
1006 * msr_set_bit() uses the safe accessors, too, even if the MSR
1009 msr_set_bit(MSR_AMD64_DE_CFG,
1010 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
1012 /* A serializing LFENCE stops RDTSC speculation */
1013 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
1017 * Family 0x12 and above processors have APIC timer
1018 * running in deep C states.
1021 set_cpu_cap(c, X86_FEATURE_ARAT);
1023 /* 3DNow or LM implies PREFETCHW */
1024 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1025 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1026 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1028 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1029 if (!cpu_feature_enabled(X86_FEATURE_XENPV))
1030 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1033 * Turn on the Instructions Retired free counter on machines not
1034 * susceptible to erratum #1054 "Instructions Retired Performance
1035 * Counter May Be Inaccurate".
1037 if (cpu_has(c, X86_FEATURE_IRPERF) &&
1038 (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
1039 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1041 check_null_seg_clears_base(c);
1044 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1045 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1046 * order to be replicated onto them. Regardless, set it here again, if not set,
1047 * to protect against any future refactoring/code reorganization which might
1048 * miss setting this important bit.
1050 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1051 cpu_has(c, X86_FEATURE_AUTOIBRS))
1052 WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
1054 /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
1055 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
1058 #ifdef CONFIG_X86_32
1059 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1061 /* AMD errata T13 (order #21922) */
1064 if (c->x86_model == 3 && c->x86_stepping == 0)
1066 /* Tbird rev A1/A2 */
1067 if (c->x86_model == 4 &&
1068 (c->x86_stepping == 0 || c->x86_stepping == 1))
1075 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1077 u32 ebx, eax, ecx, edx;
1083 if (c->extended_cpuid_level < 0x80000006)
1086 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1088 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1089 tlb_lli_4k[ENTRIES] = ebx & mask;
1092 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1093 * characteristics from the CPUID function 0x80000005 instead.
1095 if (c->x86 == 0xf) {
1096 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1100 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1101 if (!((eax >> 16) & mask))
1102 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1104 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1106 /* a 4M entry uses two 2M entries */
1107 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1109 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1110 if (!(eax & mask)) {
1112 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1113 tlb_lli_2m[ENTRIES] = 1024;
1115 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1116 tlb_lli_2m[ENTRIES] = eax & 0xff;
1119 tlb_lli_2m[ENTRIES] = eax & mask;
1121 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1124 static const struct cpu_dev amd_cpu_dev = {
1126 .c_ident = { "AuthenticAMD" },
1127 #ifdef CONFIG_X86_32
1129 { .family = 4, .model_names =
1132 [7] = "486 DX/2-WB",
1134 [9] = "486 DX/4-WB",
1140 .legacy_cache_size = amd_size_cache,
1142 .c_early_init = early_init_amd,
1143 .c_detect_tlb = cpu_detect_tlb_amd,
1144 .c_bsp_init = bsp_init_amd,
1146 .c_x86_vendor = X86_VENDOR_AMD,
1149 cpu_dev_register(amd_cpu_dev);
1151 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
1153 static unsigned int amd_msr_dr_addr_masks[] = {
1154 MSR_F16H_DR0_ADDR_MASK,
1155 MSR_F16H_DR1_ADDR_MASK,
1156 MSR_F16H_DR1_ADDR_MASK + 1,
1157 MSR_F16H_DR1_ADDR_MASK + 2
1160 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1162 int cpu = smp_processor_id();
1164 if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1167 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1170 if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1173 wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
1174 per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1177 unsigned long amd_get_dr_addr_mask(unsigned int dr)
1179 if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1182 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1185 return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1187 EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
1189 u32 amd_get_highest_perf(void)
1191 struct cpuinfo_x86 *c = &boot_cpu_data;
1193 if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1194 (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1197 if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1198 (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1203 EXPORT_SYMBOL_GPL(amd_get_highest_perf);
1205 static void zenbleed_check_cpu(void *unused)
1207 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1209 zen2_zenbleed_check(c);
1212 void amd_check_microcode(void)
1214 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1217 on_each_cpu(zenbleed_check_cpu, NULL, 1);
1221 * Issue a DIV 0/1 insn to clear any division data from previous DIV
1224 void noinstr amd_clear_divider(void)
1226 asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
1227 :: "a" (0), "d" (0), "r" (1));
1229 EXPORT_SYMBOL_GPL(amd_clear_divider);