1 // SPDX-License-Identifier: GPL-2.0-only
3 * Contains CPU specific errata definitions
5 * Copyright (C) 2014 ARM Ltd.
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/smp_plat.h>
16 static bool __maybe_unused
17 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
19 const struct arm64_midr_revidr *fix;
20 u32 midr = read_cpuid_id(), revidr;
22 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
23 if (!is_midr_in_range(midr, &entry->midr_range))
26 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
27 revidr = read_cpuid(REVIDR_EL1);
28 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
29 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
35 static bool __maybe_unused
36 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
39 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
40 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
43 static bool __maybe_unused
44 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
48 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50 model = read_cpuid_id();
51 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
52 MIDR_ARCHITECTURE_MASK;
54 return model == entry->midr_range.model;
58 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
61 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
62 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
63 u64 ctr_raw, ctr_real;
65 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
68 * We want to make sure that all the CPUs in the system expose
69 * a consistent CTR_EL0 to make sure that applications behaves
70 * correctly with migration.
72 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
74 * 1) It is safe if the system doesn't support IDC, as CPU anyway
75 * reports IDC = 0, consistent with the rest.
77 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
78 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
80 * So, we need to make sure either the raw CTR_EL0 or the effective
81 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
83 ctr_raw = read_cpuid_cachetype() & mask;
84 ctr_real = read_cpuid_effective_cachetype() & mask;
86 return (ctr_real != sys) && (ctr_raw != sys);
90 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
92 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
94 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
95 if ((read_cpuid_cachetype() & mask) !=
96 (arm64_ftr_reg_ctrel0.sys_val & mask))
97 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
100 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
102 #include <asm/mmu_context.h>
103 #include <asm/cacheflush.h>
105 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
107 #ifdef CONFIG_KVM_INDIRECT_VECTORS
108 extern char __smccc_workaround_1_smc_start[];
109 extern char __smccc_workaround_1_smc_end[];
111 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
112 const char *hyp_vecs_end)
114 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
117 for (i = 0; i < SZ_2K; i += 0x80)
118 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
120 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
123 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
124 const char *hyp_vecs_start,
125 const char *hyp_vecs_end)
127 static DEFINE_RAW_SPINLOCK(bp_lock);
131 * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
132 * we're a guest. Skip the hyp-vectors work.
134 if (!hyp_vecs_start) {
135 __this_cpu_write(bp_hardening_data.fn, fn);
139 raw_spin_lock(&bp_lock);
140 for_each_possible_cpu(cpu) {
141 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
142 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
148 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
149 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
150 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
153 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
154 __this_cpu_write(bp_hardening_data.fn, fn);
155 raw_spin_unlock(&bp_lock);
158 #define __smccc_workaround_1_smc_start NULL
159 #define __smccc_workaround_1_smc_end NULL
161 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
162 const char *hyp_vecs_start,
163 const char *hyp_vecs_end)
165 __this_cpu_write(bp_hardening_data.fn, fn);
167 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
169 #include <linux/arm-smccc.h>
171 static void call_smc_arch_workaround_1(void)
173 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
176 static void call_hvc_arch_workaround_1(void)
178 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
181 static void qcom_link_stack_sanitization(void)
185 asm volatile("mov %0, x30 \n"
193 static bool __nospectre_v2;
194 static int __init parse_nospectre_v2(char *str)
196 __nospectre_v2 = true;
199 early_param("nospectre_v2", parse_nospectre_v2);
203 * 0: No workaround required
204 * 1: Workaround installed
206 static int detect_harden_bp_fw(void)
208 bp_hardening_cb_t cb;
209 void *smccc_start, *smccc_end;
210 struct arm_smccc_res res;
211 u32 midr = read_cpuid_id();
213 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
214 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
216 switch ((int)res.a0) {
218 /* Firmware says we're just fine */
226 switch (arm_smccc_1_1_get_conduit()) {
227 case SMCCC_CONDUIT_HVC:
228 cb = call_hvc_arch_workaround_1;
229 /* This is a guest, no need to patch KVM vectors */
234 case SMCCC_CONDUIT_SMC:
235 cb = call_smc_arch_workaround_1;
236 smccc_start = __smccc_workaround_1_smc_start;
237 smccc_end = __smccc_workaround_1_smc_end;
244 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
245 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
246 cb = qcom_link_stack_sanitization;
248 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
249 install_bp_hardening_cb(cb, smccc_start, smccc_end);
254 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
256 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
257 static bool __ssb_safe = true;
259 static const struct ssbd_options {
263 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
264 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
265 { "kernel", ARM64_SSBD_KERNEL, },
268 static int __init ssbd_cfg(char *buf)
275 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
276 int len = strlen(ssbd_options[i].str);
278 if (strncmp(buf, ssbd_options[i].str, len))
281 ssbd_state = ssbd_options[i].state;
287 early_param("ssbd", ssbd_cfg);
289 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
290 __le32 *origptr, __le32 *updptr,
295 BUG_ON(nr_inst != 1);
297 switch (arm_smccc_1_1_get_conduit()) {
298 case SMCCC_CONDUIT_HVC:
299 insn = aarch64_insn_get_hvc_value();
301 case SMCCC_CONDUIT_SMC:
302 insn = aarch64_insn_get_smc_value();
308 *updptr = cpu_to_le32(insn);
311 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
312 __le32 *origptr, __le32 *updptr,
315 BUG_ON(nr_inst != 1);
317 * Only allow mitigation on EL1 entry/exit and guest
318 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
321 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
322 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
325 void arm64_set_ssbd_mitigation(bool state)
329 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
330 pr_info_once("SSBD disabled by kernel configuration\n");
334 if (this_cpu_has_cap(ARM64_SSBS)) {
336 asm volatile(SET_PSTATE_SSBS(0));
338 asm volatile(SET_PSTATE_SSBS(1));
342 conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
345 WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
348 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
351 struct arm_smccc_res res;
352 bool required = true;
354 bool this_cpu_safe = false;
357 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
359 if (cpu_mitigations_off())
360 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
362 /* delay setting __ssb_safe until we get a firmware response */
363 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
364 this_cpu_safe = true;
366 if (this_cpu_has_cap(ARM64_SSBS)) {
373 conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
374 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
376 if (conduit == SMCCC_CONDUIT_NONE) {
377 ssbd_state = ARM64_SSBD_UNKNOWN;
386 case SMCCC_RET_NOT_SUPPORTED:
387 ssbd_state = ARM64_SSBD_UNKNOWN;
392 /* machines with mixed mitigation requirements must not return this */
393 case SMCCC_RET_NOT_REQUIRED:
394 pr_info_once("%s mitigation not required\n", entry->desc);
395 ssbd_state = ARM64_SSBD_MITIGATED;
398 case SMCCC_RET_SUCCESS:
403 case 1: /* Mitigation not required on this CPU */
414 switch (ssbd_state) {
415 case ARM64_SSBD_FORCE_DISABLE:
416 arm64_set_ssbd_mitigation(false);
420 case ARM64_SSBD_KERNEL:
422 __this_cpu_write(arm64_ssbd_callback_required, 1);
423 arm64_set_ssbd_mitigation(true);
427 case ARM64_SSBD_FORCE_ENABLE:
428 arm64_set_ssbd_mitigation(true);
438 switch (ssbd_state) {
439 case ARM64_SSBD_FORCE_DISABLE:
440 pr_info_once("%s disabled from command-line\n", entry->desc);
443 case ARM64_SSBD_FORCE_ENABLE:
444 pr_info_once("%s forced from command-line\n", entry->desc);
451 /* known invulnerable cores */
452 static const struct midr_range arm64_ssb_cpus[] = {
453 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
454 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
455 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
456 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
460 #ifdef CONFIG_ARM64_ERRATUM_1463225
461 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
464 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
467 u32 midr = read_cpuid_id();
468 /* Cortex-A76 r0p0 - r3p1 */
469 struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
471 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
472 return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
476 static void __maybe_unused
477 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
479 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
482 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
483 .matches = is_affected_midr_range, \
484 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
486 #define CAP_MIDR_ALL_VERSIONS(model) \
487 .matches = is_affected_midr_range, \
488 .midr_range = MIDR_ALL_VERSIONS(model)
490 #define MIDR_FIXED(rev, revidr_mask) \
491 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
493 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
494 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
495 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
497 #define CAP_MIDR_RANGE_LIST(list) \
498 .matches = is_affected_midr_range_list, \
499 .midr_range_list = list
501 /* Errata affecting a range of revisions of given model variant */
502 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
503 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
505 /* Errata affecting a single variant/revision of a model */
506 #define ERRATA_MIDR_REV(model, var, rev) \
507 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
509 /* Errata affecting all variants/revisions of a given a model */
510 #define ERRATA_MIDR_ALL_VERSIONS(model) \
511 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
512 CAP_MIDR_ALL_VERSIONS(model)
514 /* Errata affecting a list of midr ranges, with same work around */
515 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
516 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
517 CAP_MIDR_RANGE_LIST(midr_list)
519 /* Track overall mitigation state. We are only mitigated if all cores are ok */
520 static bool __hardenbp_enab = true;
521 static bool __spectrev2_safe = true;
523 int get_spectre_v2_workaround_state(void)
525 if (__spectrev2_safe)
526 return ARM64_BP_HARDEN_NOT_REQUIRED;
528 if (!__hardenbp_enab)
529 return ARM64_BP_HARDEN_UNKNOWN;
531 return ARM64_BP_HARDEN_WA_NEEDED;
535 * List of CPUs that do not need any Spectre-v2 mitigation at all.
537 static const struct midr_range spectre_v2_safe_list[] = {
538 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
539 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
540 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
541 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
546 * Track overall bp hardening for all heterogeneous cores in the machine.
547 * We are only considered "safe" if all booted cores are known safe.
549 static bool __maybe_unused
550 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
554 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
556 /* If the CPU has CSV2 set, we're safe */
557 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
558 ID_AA64PFR0_CSV2_SHIFT))
561 /* Alternatively, we have a list of unaffected CPUs */
562 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
565 /* Fallback to firmware detection */
566 need_wa = detect_harden_bp_fw();
570 __spectrev2_safe = false;
572 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
573 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
574 __hardenbp_enab = false;
579 if (__nospectre_v2 || cpu_mitigations_off()) {
580 pr_info_once("spectrev2 mitigation disabled by command line option\n");
581 __hardenbp_enab = false;
586 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
587 __hardenbp_enab = false;
590 return (need_wa > 0);
593 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
594 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
595 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
599 static bool __maybe_unused
600 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
605 if (!is_affected_midr_range_list(entry, scope) ||
606 !is_hyp_mode_available())
609 for_each_possible_cpu(i) {
610 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
617 #ifdef CONFIG_HARDEN_EL2_VECTORS
619 static const struct midr_range arm64_harden_el2_vectors[] = {
620 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
621 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
627 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
628 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
629 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
631 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
634 .midr_range.model = MIDR_QCOM_KRYO,
635 .matches = is_kryo_midr,
638 #ifdef CONFIG_ARM64_ERRATUM_1286807
640 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
647 #ifdef CONFIG_CAVIUM_ERRATUM_27456
648 const struct midr_range cavium_erratum_27456_cpus[] = {
649 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
650 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
651 /* Cavium ThunderX, T81 pass 1.0 */
652 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
657 #ifdef CONFIG_CAVIUM_ERRATUM_30115
658 static const struct midr_range cavium_erratum_30115_cpus[] = {
659 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
660 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
661 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
662 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
663 /* Cavium ThunderX, T83 pass 1.0 */
664 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
669 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
670 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
672 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
675 .midr_range.model = MIDR_QCOM_KRYO,
676 .matches = is_kryo_midr,
682 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
683 static const struct midr_range workaround_clean_cache[] = {
684 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
685 defined(CONFIG_ARM64_ERRATUM_827319) || \
686 defined(CONFIG_ARM64_ERRATUM_824069)
687 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
688 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
690 #ifdef CONFIG_ARM64_ERRATUM_819472
691 /* Cortex-A53 r0p[01] : ARM errata 819472 */
692 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
698 #ifdef CONFIG_ARM64_ERRATUM_1418040
700 * - 1188873 affects r0p0 to r2p0
701 * - 1418040 affects r0p0 to r3p1
703 static const struct midr_range erratum_1418040_list[] = {
704 /* Cortex-A76 r0p0 to r3p1 */
705 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
706 /* Neoverse-N1 r0p0 to r3p1 */
707 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
712 #ifdef CONFIG_ARM64_ERRATUM_845719
713 static const struct midr_range erratum_845719_list[] = {
714 /* Cortex-A53 r0p[01234] */
715 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
716 /* Brahma-B53 r0p[0] */
717 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
722 #ifdef CONFIG_ARM64_ERRATUM_843419
723 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
725 /* Cortex-A53 r0p[01234] */
726 .matches = is_affected_midr_range,
727 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
728 MIDR_FIXED(0x4, BIT(8)),
731 /* Brahma-B53 r0p[0] */
732 .matches = is_affected_midr_range,
733 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
739 const struct arm64_cpu_capabilities arm64_errata[] = {
740 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
742 .desc = "ARM errata 826319, 827319, 824069, 819472",
743 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
744 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
745 .cpu_enable = cpu_enable_cache_maint_trap,
748 #ifdef CONFIG_ARM64_ERRATUM_832075
750 /* Cortex-A57 r0p0 - r1p2 */
751 .desc = "ARM erratum 832075",
752 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
753 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
758 #ifdef CONFIG_ARM64_ERRATUM_834220
760 /* Cortex-A57 r0p0 - r1p2 */
761 .desc = "ARM erratum 834220",
762 .capability = ARM64_WORKAROUND_834220,
763 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
768 #ifdef CONFIG_ARM64_ERRATUM_843419
770 .desc = "ARM erratum 843419",
771 .capability = ARM64_WORKAROUND_843419,
772 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
773 .matches = cpucap_multi_entry_cap_matches,
774 .match_list = erratum_843419_list,
777 #ifdef CONFIG_ARM64_ERRATUM_845719
779 .desc = "ARM erratum 845719",
780 .capability = ARM64_WORKAROUND_845719,
781 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
784 #ifdef CONFIG_CAVIUM_ERRATUM_23154
786 /* Cavium ThunderX, pass 1.x */
787 .desc = "Cavium erratum 23154",
788 .capability = ARM64_WORKAROUND_CAVIUM_23154,
789 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
792 #ifdef CONFIG_CAVIUM_ERRATUM_27456
794 .desc = "Cavium erratum 27456",
795 .capability = ARM64_WORKAROUND_CAVIUM_27456,
796 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
799 #ifdef CONFIG_CAVIUM_ERRATUM_30115
801 .desc = "Cavium erratum 30115",
802 .capability = ARM64_WORKAROUND_CAVIUM_30115,
803 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
807 .desc = "Mismatched cache type (CTR_EL0)",
808 .capability = ARM64_MISMATCHED_CACHE_TYPE,
809 .matches = has_mismatched_cache_type,
810 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
811 .cpu_enable = cpu_enable_trap_ctr_access,
813 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
815 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
816 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
817 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
818 .matches = cpucap_multi_entry_cap_matches,
819 .match_list = qcom_erratum_1003_list,
822 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
824 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
825 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
826 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
827 .matches = cpucap_multi_entry_cap_matches,
828 .match_list = arm64_repeat_tlbi_list,
831 #ifdef CONFIG_ARM64_ERRATUM_858921
833 /* Cortex-A73 all versions */
834 .desc = "ARM erratum 858921",
835 .capability = ARM64_WORKAROUND_858921,
836 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
840 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
841 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
842 .matches = check_branch_predictor,
844 #ifdef CONFIG_HARDEN_EL2_VECTORS
846 .desc = "EL2 vector hardening",
847 .capability = ARM64_HARDEN_EL2_VECTORS,
848 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
852 .desc = "Speculative Store Bypass Disable",
853 .capability = ARM64_SSBD,
854 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
855 .matches = has_ssbd_mitigation,
856 .midr_range_list = arm64_ssb_cpus,
858 #ifdef CONFIG_ARM64_ERRATUM_1418040
860 .desc = "ARM erratum 1418040",
861 .capability = ARM64_WORKAROUND_1418040,
862 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
865 #ifdef CONFIG_ARM64_ERRATUM_1165522
867 /* Cortex-A76 r0p0 to r2p0 */
868 .desc = "ARM erratum 1165522",
869 .capability = ARM64_WORKAROUND_1165522,
870 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
873 #ifdef CONFIG_ARM64_ERRATUM_1463225
875 .desc = "ARM erratum 1463225",
876 .capability = ARM64_WORKAROUND_1463225,
877 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
878 .matches = has_cortex_a76_erratum_1463225,
881 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
883 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
884 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
885 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
886 .matches = needs_tx2_tvm_workaround,
889 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
890 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
891 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
898 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
901 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
904 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
907 switch (get_spectre_v2_workaround_state()) {
908 case ARM64_BP_HARDEN_NOT_REQUIRED:
909 return sprintf(buf, "Not affected\n");
910 case ARM64_BP_HARDEN_WA_NEEDED:
911 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
912 case ARM64_BP_HARDEN_UNKNOWN:
914 return sprintf(buf, "Vulnerable\n");
918 ssize_t cpu_show_spec_store_bypass(struct device *dev,
919 struct device_attribute *attr, char *buf)
922 return sprintf(buf, "Not affected\n");
924 switch (ssbd_state) {
925 case ARM64_SSBD_KERNEL:
926 case ARM64_SSBD_FORCE_ENABLE:
927 if (IS_ENABLED(CONFIG_ARM64_SSBD))
929 "Mitigation: Speculative Store Bypass disabled via prctl\n");
932 return sprintf(buf, "Vulnerable\n");