Merge tag 'kvmarm-fixes-5.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmar...
[sfrench/cifs-2.6.git] / arch / arm64 / kernel / cpu_errata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/smp_plat.h>
15
16 static bool __maybe_unused
17 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
18 {
19         const struct arm64_midr_revidr *fix;
20         u32 midr = read_cpuid_id(), revidr;
21
22         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
23         if (!is_midr_in_range(midr, &entry->midr_range))
24                 return false;
25
26         midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
27         revidr = read_cpuid(REVIDR_EL1);
28         for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
29                 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
30                         return false;
31
32         return true;
33 }
34
35 static bool __maybe_unused
36 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
37                             int scope)
38 {
39         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
40         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
41 }
42
43 static bool __maybe_unused
44 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
45 {
46         u32 model;
47
48         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
49
50         model = read_cpuid_id();
51         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
52                  MIDR_ARCHITECTURE_MASK;
53
54         return model == entry->midr_range.model;
55 }
56
57 static bool
58 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
59                           int scope)
60 {
61         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
62         u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
63         u64 ctr_raw, ctr_real;
64
65         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
66
67         /*
68          * We want to make sure that all the CPUs in the system expose
69          * a consistent CTR_EL0 to make sure that applications behaves
70          * correctly with migration.
71          *
72          * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
73          *
74          * 1) It is safe if the system doesn't support IDC, as CPU anyway
75          *    reports IDC = 0, consistent with the rest.
76          *
77          * 2) If the system has IDC, it is still safe as we trap CTR_EL0
78          *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
79          *
80          * So, we need to make sure either the raw CTR_EL0 or the effective
81          * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
82          */
83         ctr_raw = read_cpuid_cachetype() & mask;
84         ctr_real = read_cpuid_effective_cachetype() & mask;
85
86         return (ctr_real != sys) && (ctr_raw != sys);
87 }
88
89 static void
90 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
91 {
92         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
93
94         /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
95         if ((read_cpuid_cachetype() & mask) !=
96             (arm64_ftr_reg_ctrel0.sys_val & mask))
97                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
98 }
99
100 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
101
102 #include <asm/mmu_context.h>
103 #include <asm/cacheflush.h>
104
105 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
106
107 #ifdef CONFIG_KVM_INDIRECT_VECTORS
108 extern char __smccc_workaround_1_smc_start[];
109 extern char __smccc_workaround_1_smc_end[];
110
111 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
112                                 const char *hyp_vecs_end)
113 {
114         void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
115         int i;
116
117         for (i = 0; i < SZ_2K; i += 0x80)
118                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
119
120         __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
121 }
122
123 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
124                                     const char *hyp_vecs_start,
125                                     const char *hyp_vecs_end)
126 {
127         static DEFINE_RAW_SPINLOCK(bp_lock);
128         int cpu, slot = -1;
129
130         /*
131          * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
132          * we're a guest. Skip the hyp-vectors work.
133          */
134         if (!hyp_vecs_start) {
135                 __this_cpu_write(bp_hardening_data.fn, fn);
136                 return;
137         }
138
139         raw_spin_lock(&bp_lock);
140         for_each_possible_cpu(cpu) {
141                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
142                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
143                         break;
144                 }
145         }
146
147         if (slot == -1) {
148                 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
149                 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
150                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
151         }
152
153         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
154         __this_cpu_write(bp_hardening_data.fn, fn);
155         raw_spin_unlock(&bp_lock);
156 }
157 #else
158 #define __smccc_workaround_1_smc_start          NULL
159 #define __smccc_workaround_1_smc_end            NULL
160
161 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
162                                       const char *hyp_vecs_start,
163                                       const char *hyp_vecs_end)
164 {
165         __this_cpu_write(bp_hardening_data.fn, fn);
166 }
167 #endif  /* CONFIG_KVM_INDIRECT_VECTORS */
168
169 #include <linux/arm-smccc.h>
170
171 static void call_smc_arch_workaround_1(void)
172 {
173         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
174 }
175
176 static void call_hvc_arch_workaround_1(void)
177 {
178         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
179 }
180
181 static void qcom_link_stack_sanitization(void)
182 {
183         u64 tmp;
184
185         asm volatile("mov       %0, x30         \n"
186                      ".rept     16              \n"
187                      "bl        . + 4           \n"
188                      ".endr                     \n"
189                      "mov       x30, %0         \n"
190                      : "=&r" (tmp));
191 }
192
193 static bool __nospectre_v2;
194 static int __init parse_nospectre_v2(char *str)
195 {
196         __nospectre_v2 = true;
197         return 0;
198 }
199 early_param("nospectre_v2", parse_nospectre_v2);
200
201 /*
202  * -1: No workaround
203  *  0: No workaround required
204  *  1: Workaround installed
205  */
206 static int detect_harden_bp_fw(void)
207 {
208         bp_hardening_cb_t cb;
209         void *smccc_start, *smccc_end;
210         struct arm_smccc_res res;
211         u32 midr = read_cpuid_id();
212
213         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
214                              ARM_SMCCC_ARCH_WORKAROUND_1, &res);
215
216         switch ((int)res.a0) {
217         case 1:
218                 /* Firmware says we're just fine */
219                 return 0;
220         case 0:
221                 break;
222         default:
223                 return -1;
224         }
225
226         switch (arm_smccc_1_1_get_conduit()) {
227         case SMCCC_CONDUIT_HVC:
228                 cb = call_hvc_arch_workaround_1;
229                 /* This is a guest, no need to patch KVM vectors */
230                 smccc_start = NULL;
231                 smccc_end = NULL;
232                 break;
233
234         case SMCCC_CONDUIT_SMC:
235                 cb = call_smc_arch_workaround_1;
236                 smccc_start = __smccc_workaround_1_smc_start;
237                 smccc_end = __smccc_workaround_1_smc_end;
238                 break;
239
240         default:
241                 return -1;
242         }
243
244         if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
245             ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
246                 cb = qcom_link_stack_sanitization;
247
248         if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
249                 install_bp_hardening_cb(cb, smccc_start, smccc_end);
250
251         return 1;
252 }
253
254 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
255
256 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
257 static bool __ssb_safe = true;
258
259 static const struct ssbd_options {
260         const char      *str;
261         int             state;
262 } ssbd_options[] = {
263         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
264         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
265         { "kernel",     ARM64_SSBD_KERNEL, },
266 };
267
268 static int __init ssbd_cfg(char *buf)
269 {
270         int i;
271
272         if (!buf || !buf[0])
273                 return -EINVAL;
274
275         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
276                 int len = strlen(ssbd_options[i].str);
277
278                 if (strncmp(buf, ssbd_options[i].str, len))
279                         continue;
280
281                 ssbd_state = ssbd_options[i].state;
282                 return 0;
283         }
284
285         return -EINVAL;
286 }
287 early_param("ssbd", ssbd_cfg);
288
289 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
290                                        __le32 *origptr, __le32 *updptr,
291                                        int nr_inst)
292 {
293         u32 insn;
294
295         BUG_ON(nr_inst != 1);
296
297         switch (arm_smccc_1_1_get_conduit()) {
298         case SMCCC_CONDUIT_HVC:
299                 insn = aarch64_insn_get_hvc_value();
300                 break;
301         case SMCCC_CONDUIT_SMC:
302                 insn = aarch64_insn_get_smc_value();
303                 break;
304         default:
305                 return;
306         }
307
308         *updptr = cpu_to_le32(insn);
309 }
310
311 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
312                                       __le32 *origptr, __le32 *updptr,
313                                       int nr_inst)
314 {
315         BUG_ON(nr_inst != 1);
316         /*
317          * Only allow mitigation on EL1 entry/exit and guest
318          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
319          * be flipped.
320          */
321         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
322                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
323 }
324
325 void arm64_set_ssbd_mitigation(bool state)
326 {
327         int conduit;
328
329         if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
330                 pr_info_once("SSBD disabled by kernel configuration\n");
331                 return;
332         }
333
334         if (this_cpu_has_cap(ARM64_SSBS)) {
335                 if (state)
336                         asm volatile(SET_PSTATE_SSBS(0));
337                 else
338                         asm volatile(SET_PSTATE_SSBS(1));
339                 return;
340         }
341
342         conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
343                                        NULL);
344
345         WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
346 }
347
348 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
349                                     int scope)
350 {
351         struct arm_smccc_res res;
352         bool required = true;
353         s32 val;
354         bool this_cpu_safe = false;
355         int conduit;
356
357         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
358
359         if (cpu_mitigations_off())
360                 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
361
362         /* delay setting __ssb_safe until we get a firmware response */
363         if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
364                 this_cpu_safe = true;
365
366         if (this_cpu_has_cap(ARM64_SSBS)) {
367                 if (!this_cpu_safe)
368                         __ssb_safe = false;
369                 required = false;
370                 goto out_printmsg;
371         }
372
373         conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
374                                        ARM_SMCCC_ARCH_WORKAROUND_2, &res);
375
376         if (conduit == SMCCC_CONDUIT_NONE) {
377                 ssbd_state = ARM64_SSBD_UNKNOWN;
378                 if (!this_cpu_safe)
379                         __ssb_safe = false;
380                 return false;
381         }
382
383         val = (s32)res.a0;
384
385         switch (val) {
386         case SMCCC_RET_NOT_SUPPORTED:
387                 ssbd_state = ARM64_SSBD_UNKNOWN;
388                 if (!this_cpu_safe)
389                         __ssb_safe = false;
390                 return false;
391
392         /* machines with mixed mitigation requirements must not return this */
393         case SMCCC_RET_NOT_REQUIRED:
394                 pr_info_once("%s mitigation not required\n", entry->desc);
395                 ssbd_state = ARM64_SSBD_MITIGATED;
396                 return false;
397
398         case SMCCC_RET_SUCCESS:
399                 __ssb_safe = false;
400                 required = true;
401                 break;
402
403         case 1: /* Mitigation not required on this CPU */
404                 required = false;
405                 break;
406
407         default:
408                 WARN_ON(1);
409                 if (!this_cpu_safe)
410                         __ssb_safe = false;
411                 return false;
412         }
413
414         switch (ssbd_state) {
415         case ARM64_SSBD_FORCE_DISABLE:
416                 arm64_set_ssbd_mitigation(false);
417                 required = false;
418                 break;
419
420         case ARM64_SSBD_KERNEL:
421                 if (required) {
422                         __this_cpu_write(arm64_ssbd_callback_required, 1);
423                         arm64_set_ssbd_mitigation(true);
424                 }
425                 break;
426
427         case ARM64_SSBD_FORCE_ENABLE:
428                 arm64_set_ssbd_mitigation(true);
429                 required = true;
430                 break;
431
432         default:
433                 WARN_ON(1);
434                 break;
435         }
436
437 out_printmsg:
438         switch (ssbd_state) {
439         case ARM64_SSBD_FORCE_DISABLE:
440                 pr_info_once("%s disabled from command-line\n", entry->desc);
441                 break;
442
443         case ARM64_SSBD_FORCE_ENABLE:
444                 pr_info_once("%s forced from command-line\n", entry->desc);
445                 break;
446         }
447
448         return required;
449 }
450
451 /* known invulnerable cores */
452 static const struct midr_range arm64_ssb_cpus[] = {
453         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
454         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
455         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
456         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
457         {},
458 };
459
460 #ifdef CONFIG_ARM64_ERRATUM_1463225
461 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
462
463 static bool
464 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
465                                int scope)
466 {
467         u32 midr = read_cpuid_id();
468         /* Cortex-A76 r0p0 - r3p1 */
469         struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
470
471         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
472         return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
473 }
474 #endif
475
476 static void __maybe_unused
477 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
478 {
479         sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
480 }
481
482 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
483         .matches = is_affected_midr_range,                      \
484         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
485
486 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
487         .matches = is_affected_midr_range,                              \
488         .midr_range = MIDR_ALL_VERSIONS(model)
489
490 #define MIDR_FIXED(rev, revidr_mask) \
491         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
492
493 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
494         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
495         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
496
497 #define CAP_MIDR_RANGE_LIST(list)                               \
498         .matches = is_affected_midr_range_list,                 \
499         .midr_range_list = list
500
501 /* Errata affecting a range of revisions of  given model variant */
502 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
503         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
504
505 /* Errata affecting a single variant/revision of a model */
506 #define ERRATA_MIDR_REV(model, var, rev)        \
507         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
508
509 /* Errata affecting all variants/revisions of a given a model */
510 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
511         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
512         CAP_MIDR_ALL_VERSIONS(model)
513
514 /* Errata affecting a list of midr ranges, with same work around */
515 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
516         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
517         CAP_MIDR_RANGE_LIST(midr_list)
518
519 /* Track overall mitigation state. We are only mitigated if all cores are ok */
520 static bool __hardenbp_enab = true;
521 static bool __spectrev2_safe = true;
522
523 int get_spectre_v2_workaround_state(void)
524 {
525         if (__spectrev2_safe)
526                 return ARM64_BP_HARDEN_NOT_REQUIRED;
527
528         if (!__hardenbp_enab)
529                 return ARM64_BP_HARDEN_UNKNOWN;
530
531         return ARM64_BP_HARDEN_WA_NEEDED;
532 }
533
534 /*
535  * List of CPUs that do not need any Spectre-v2 mitigation at all.
536  */
537 static const struct midr_range spectre_v2_safe_list[] = {
538         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
539         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
540         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
541         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
542         { /* sentinel */ }
543 };
544
545 /*
546  * Track overall bp hardening for all heterogeneous cores in the machine.
547  * We are only considered "safe" if all booted cores are known safe.
548  */
549 static bool __maybe_unused
550 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
551 {
552         int need_wa;
553
554         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
555
556         /* If the CPU has CSV2 set, we're safe */
557         if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
558                                                  ID_AA64PFR0_CSV2_SHIFT))
559                 return false;
560
561         /* Alternatively, we have a list of unaffected CPUs */
562         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
563                 return false;
564
565         /* Fallback to firmware detection */
566         need_wa = detect_harden_bp_fw();
567         if (!need_wa)
568                 return false;
569
570         __spectrev2_safe = false;
571
572         if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
573                 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
574                 __hardenbp_enab = false;
575                 return false;
576         }
577
578         /* forced off */
579         if (__nospectre_v2 || cpu_mitigations_off()) {
580                 pr_info_once("spectrev2 mitigation disabled by command line option\n");
581                 __hardenbp_enab = false;
582                 return false;
583         }
584
585         if (need_wa < 0) {
586                 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
587                 __hardenbp_enab = false;
588         }
589
590         return (need_wa > 0);
591 }
592
593 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
594         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
595         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
596         {},
597 };
598
599 static bool __maybe_unused
600 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
601                          int scope)
602 {
603         int i;
604
605         if (!is_affected_midr_range_list(entry, scope) ||
606             !is_hyp_mode_available())
607                 return false;
608
609         for_each_possible_cpu(i) {
610                 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
611                         return true;
612         }
613
614         return false;
615 }
616
617 #ifdef CONFIG_HARDEN_EL2_VECTORS
618
619 static const struct midr_range arm64_harden_el2_vectors[] = {
620         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
621         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
622         {},
623 };
624
625 #endif
626
627 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
628 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
629 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
630         {
631                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
632         },
633         {
634                 .midr_range.model = MIDR_QCOM_KRYO,
635                 .matches = is_kryo_midr,
636         },
637 #endif
638 #ifdef CONFIG_ARM64_ERRATUM_1286807
639         {
640                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
641         },
642 #endif
643         {},
644 };
645 #endif
646
647 #ifdef CONFIG_CAVIUM_ERRATUM_27456
648 const struct midr_range cavium_erratum_27456_cpus[] = {
649         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
650         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
651         /* Cavium ThunderX, T81 pass 1.0 */
652         MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
653         {},
654 };
655 #endif
656
657 #ifdef CONFIG_CAVIUM_ERRATUM_30115
658 static const struct midr_range cavium_erratum_30115_cpus[] = {
659         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
660         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
661         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
662         MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
663         /* Cavium ThunderX, T83 pass 1.0 */
664         MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
665         {},
666 };
667 #endif
668
669 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
670 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
671         {
672                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
673         },
674         {
675                 .midr_range.model = MIDR_QCOM_KRYO,
676                 .matches = is_kryo_midr,
677         },
678         {},
679 };
680 #endif
681
682 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
683 static const struct midr_range workaround_clean_cache[] = {
684 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
685         defined(CONFIG_ARM64_ERRATUM_827319) || \
686         defined(CONFIG_ARM64_ERRATUM_824069)
687         /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
688         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
689 #endif
690 #ifdef  CONFIG_ARM64_ERRATUM_819472
691         /* Cortex-A53 r0p[01] : ARM errata 819472 */
692         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
693 #endif
694         {},
695 };
696 #endif
697
698 #ifdef CONFIG_ARM64_ERRATUM_1418040
699 /*
700  * - 1188873 affects r0p0 to r2p0
701  * - 1418040 affects r0p0 to r3p1
702  */
703 static const struct midr_range erratum_1418040_list[] = {
704         /* Cortex-A76 r0p0 to r3p1 */
705         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
706         /* Neoverse-N1 r0p0 to r3p1 */
707         MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
708         {},
709 };
710 #endif
711
712 #ifdef CONFIG_ARM64_ERRATUM_845719
713 static const struct midr_range erratum_845719_list[] = {
714         /* Cortex-A53 r0p[01234] */
715         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
716         /* Brahma-B53 r0p[0] */
717         MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
718         {},
719 };
720 #endif
721
722 #ifdef CONFIG_ARM64_ERRATUM_843419
723 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
724         {
725                 /* Cortex-A53 r0p[01234] */
726                 .matches = is_affected_midr_range,
727                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
728                 MIDR_FIXED(0x4, BIT(8)),
729         },
730         {
731                 /* Brahma-B53 r0p[0] */
732                 .matches = is_affected_midr_range,
733                 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
734         },
735         {},
736 };
737 #endif
738
739 const struct arm64_cpu_capabilities arm64_errata[] = {
740 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
741         {
742                 .desc = "ARM errata 826319, 827319, 824069, 819472",
743                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
744                 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
745                 .cpu_enable = cpu_enable_cache_maint_trap,
746         },
747 #endif
748 #ifdef CONFIG_ARM64_ERRATUM_832075
749         {
750         /* Cortex-A57 r0p0 - r1p2 */
751                 .desc = "ARM erratum 832075",
752                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
753                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
754                                   0, 0,
755                                   1, 2),
756         },
757 #endif
758 #ifdef CONFIG_ARM64_ERRATUM_834220
759         {
760         /* Cortex-A57 r0p0 - r1p2 */
761                 .desc = "ARM erratum 834220",
762                 .capability = ARM64_WORKAROUND_834220,
763                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
764                                   0, 0,
765                                   1, 2),
766         },
767 #endif
768 #ifdef CONFIG_ARM64_ERRATUM_843419
769         {
770                 .desc = "ARM erratum 843419",
771                 .capability = ARM64_WORKAROUND_843419,
772                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
773                 .matches = cpucap_multi_entry_cap_matches,
774                 .match_list = erratum_843419_list,
775         },
776 #endif
777 #ifdef CONFIG_ARM64_ERRATUM_845719
778         {
779                 .desc = "ARM erratum 845719",
780                 .capability = ARM64_WORKAROUND_845719,
781                 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
782         },
783 #endif
784 #ifdef CONFIG_CAVIUM_ERRATUM_23154
785         {
786         /* Cavium ThunderX, pass 1.x */
787                 .desc = "Cavium erratum 23154",
788                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
789                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
790         },
791 #endif
792 #ifdef CONFIG_CAVIUM_ERRATUM_27456
793         {
794                 .desc = "Cavium erratum 27456",
795                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
796                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
797         },
798 #endif
799 #ifdef CONFIG_CAVIUM_ERRATUM_30115
800         {
801                 .desc = "Cavium erratum 30115",
802                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
803                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
804         },
805 #endif
806         {
807                 .desc = "Mismatched cache type (CTR_EL0)",
808                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
809                 .matches = has_mismatched_cache_type,
810                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
811                 .cpu_enable = cpu_enable_trap_ctr_access,
812         },
813 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
814         {
815                 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
816                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
817                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
818                 .matches = cpucap_multi_entry_cap_matches,
819                 .match_list = qcom_erratum_1003_list,
820         },
821 #endif
822 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
823         {
824                 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
825                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
826                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
827                 .matches = cpucap_multi_entry_cap_matches,
828                 .match_list = arm64_repeat_tlbi_list,
829         },
830 #endif
831 #ifdef CONFIG_ARM64_ERRATUM_858921
832         {
833         /* Cortex-A73 all versions */
834                 .desc = "ARM erratum 858921",
835                 .capability = ARM64_WORKAROUND_858921,
836                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
837         },
838 #endif
839         {
840                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
841                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
842                 .matches = check_branch_predictor,
843         },
844 #ifdef CONFIG_HARDEN_EL2_VECTORS
845         {
846                 .desc = "EL2 vector hardening",
847                 .capability = ARM64_HARDEN_EL2_VECTORS,
848                 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
849         },
850 #endif
851         {
852                 .desc = "Speculative Store Bypass Disable",
853                 .capability = ARM64_SSBD,
854                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
855                 .matches = has_ssbd_mitigation,
856                 .midr_range_list = arm64_ssb_cpus,
857         },
858 #ifdef CONFIG_ARM64_ERRATUM_1418040
859         {
860                 .desc = "ARM erratum 1418040",
861                 .capability = ARM64_WORKAROUND_1418040,
862                 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
863         },
864 #endif
865 #ifdef CONFIG_ARM64_ERRATUM_1165522
866         {
867                 /* Cortex-A76 r0p0 to r2p0 */
868                 .desc = "ARM erratum 1165522",
869                 .capability = ARM64_WORKAROUND_1165522,
870                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
871         },
872 #endif
873 #ifdef CONFIG_ARM64_ERRATUM_1463225
874         {
875                 .desc = "ARM erratum 1463225",
876                 .capability = ARM64_WORKAROUND_1463225,
877                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
878                 .matches = has_cortex_a76_erratum_1463225,
879         },
880 #endif
881 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
882         {
883                 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
884                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
885                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
886                 .matches = needs_tx2_tvm_workaround,
887         },
888         {
889                 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
890                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
891                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
892         },
893 #endif
894         {
895         }
896 };
897
898 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
899                             char *buf)
900 {
901         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
902 }
903
904 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
905                 char *buf)
906 {
907         switch (get_spectre_v2_workaround_state()) {
908         case ARM64_BP_HARDEN_NOT_REQUIRED:
909                 return sprintf(buf, "Not affected\n");
910         case ARM64_BP_HARDEN_WA_NEEDED:
911                 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
912         case ARM64_BP_HARDEN_UNKNOWN:
913         default:
914                 return sprintf(buf, "Vulnerable\n");
915         }
916 }
917
918 ssize_t cpu_show_spec_store_bypass(struct device *dev,
919                 struct device_attribute *attr, char *buf)
920 {
921         if (__ssb_safe)
922                 return sprintf(buf, "Not affected\n");
923
924         switch (ssbd_state) {
925         case ARM64_SSBD_KERNEL:
926         case ARM64_SSBD_FORCE_ENABLE:
927                 if (IS_ENABLED(CONFIG_ARM64_SSBD))
928                         return sprintf(buf,
929                             "Mitigation: Speculative Store Bypass disabled via prctl\n");
930         }
931
932         return sprintf(buf, "Vulnerable\n");
933 }