Merge tag '9p-for-4.20' of git://github.com/martinetd/linux
[sfrench/cifs-2.6.git] / arch / arm64 / kernel / cpu_errata.c
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <asm/cpu.h>
23 #include <asm/cputype.h>
24 #include <asm/cpufeature.h>
25
26 static bool __maybe_unused
27 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
28 {
29         const struct arm64_midr_revidr *fix;
30         u32 midr = read_cpuid_id(), revidr;
31
32         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
33         if (!is_midr_in_range(midr, &entry->midr_range))
34                 return false;
35
36         midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
37         revidr = read_cpuid(REVIDR_EL1);
38         for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
39                 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
40                         return false;
41
42         return true;
43 }
44
45 static bool __maybe_unused
46 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
47                             int scope)
48 {
49         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
51 }
52
53 static bool __maybe_unused
54 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
55 {
56         u32 model;
57
58         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
59
60         model = read_cpuid_id();
61         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
62                  MIDR_ARCHITECTURE_MASK;
63
64         return model == entry->midr_range.model;
65 }
66
67 static bool
68 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
69                           int scope)
70 {
71         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
72         u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
73         u64 ctr_raw, ctr_real;
74
75         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
76
77         /*
78          * We want to make sure that all the CPUs in the system expose
79          * a consistent CTR_EL0 to make sure that applications behaves
80          * correctly with migration.
81          *
82          * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
83          *
84          * 1) It is safe if the system doesn't support IDC, as CPU anyway
85          *    reports IDC = 0, consistent with the rest.
86          *
87          * 2) If the system has IDC, it is still safe as we trap CTR_EL0
88          *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
89          *
90          * So, we need to make sure either the raw CTR_EL0 or the effective
91          * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
92          */
93         ctr_raw = read_cpuid_cachetype() & mask;
94         ctr_real = read_cpuid_effective_cachetype() & mask;
95
96         return (ctr_real != sys) && (ctr_raw != sys);
97 }
98
99 static void
100 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
101 {
102         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
103
104         /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
105         if ((read_cpuid_cachetype() & mask) !=
106             (arm64_ftr_reg_ctrel0.sys_val & mask))
107                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
108 }
109
110 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
111
112 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
113 #include <asm/mmu_context.h>
114 #include <asm/cacheflush.h>
115
116 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
117
118 #ifdef CONFIG_KVM_INDIRECT_VECTORS
119 extern char __smccc_workaround_1_smc_start[];
120 extern char __smccc_workaround_1_smc_end[];
121
122 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
123                                 const char *hyp_vecs_end)
124 {
125         void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
126         int i;
127
128         for (i = 0; i < SZ_2K; i += 0x80)
129                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
130
131         __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
132 }
133
134 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
135                                       const char *hyp_vecs_start,
136                                       const char *hyp_vecs_end)
137 {
138         static DEFINE_SPINLOCK(bp_lock);
139         int cpu, slot = -1;
140
141         /*
142          * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
143          * start/end if we're a guest. Skip the hyp-vectors work.
144          */
145         if (!hyp_vecs_start) {
146                 __this_cpu_write(bp_hardening_data.fn, fn);
147                 return;
148         }
149
150         spin_lock(&bp_lock);
151         for_each_possible_cpu(cpu) {
152                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
153                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
154                         break;
155                 }
156         }
157
158         if (slot == -1) {
159                 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
160                 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
161                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
162         }
163
164         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
165         __this_cpu_write(bp_hardening_data.fn, fn);
166         spin_unlock(&bp_lock);
167 }
168 #else
169 #define __smccc_workaround_1_smc_start          NULL
170 #define __smccc_workaround_1_smc_end            NULL
171
172 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
173                                       const char *hyp_vecs_start,
174                                       const char *hyp_vecs_end)
175 {
176         __this_cpu_write(bp_hardening_data.fn, fn);
177 }
178 #endif  /* CONFIG_KVM_INDIRECT_VECTORS */
179
180 static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
181                                      bp_hardening_cb_t fn,
182                                      const char *hyp_vecs_start,
183                                      const char *hyp_vecs_end)
184 {
185         u64 pfr0;
186
187         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
188                 return;
189
190         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
191         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
192                 return;
193
194         __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
195 }
196
197 #include <uapi/linux/psci.h>
198 #include <linux/arm-smccc.h>
199 #include <linux/psci.h>
200
201 static void call_smc_arch_workaround_1(void)
202 {
203         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
204 }
205
206 static void call_hvc_arch_workaround_1(void)
207 {
208         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
209 }
210
211 static void qcom_link_stack_sanitization(void)
212 {
213         u64 tmp;
214
215         asm volatile("mov       %0, x30         \n"
216                      ".rept     16              \n"
217                      "bl        . + 4           \n"
218                      ".endr                     \n"
219                      "mov       x30, %0         \n"
220                      : "=&r" (tmp));
221 }
222
223 static void
224 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
225 {
226         bp_hardening_cb_t cb;
227         void *smccc_start, *smccc_end;
228         struct arm_smccc_res res;
229         u32 midr = read_cpuid_id();
230
231         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
232                 return;
233
234         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
235                 return;
236
237         switch (psci_ops.conduit) {
238         case PSCI_CONDUIT_HVC:
239                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
240                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
241                 if ((int)res.a0 < 0)
242                         return;
243                 cb = call_hvc_arch_workaround_1;
244                 /* This is a guest, no need to patch KVM vectors */
245                 smccc_start = NULL;
246                 smccc_end = NULL;
247                 break;
248
249         case PSCI_CONDUIT_SMC:
250                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
251                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
252                 if ((int)res.a0 < 0)
253                         return;
254                 cb = call_smc_arch_workaround_1;
255                 smccc_start = __smccc_workaround_1_smc_start;
256                 smccc_end = __smccc_workaround_1_smc_end;
257                 break;
258
259         default:
260                 return;
261         }
262
263         if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
264             ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
265                 cb = qcom_link_stack_sanitization;
266
267         install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
268
269         return;
270 }
271 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR */
272
273 #ifdef CONFIG_ARM64_SSBD
274 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
275
276 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
277
278 static const struct ssbd_options {
279         const char      *str;
280         int             state;
281 } ssbd_options[] = {
282         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
283         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
284         { "kernel",     ARM64_SSBD_KERNEL, },
285 };
286
287 static int __init ssbd_cfg(char *buf)
288 {
289         int i;
290
291         if (!buf || !buf[0])
292                 return -EINVAL;
293
294         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
295                 int len = strlen(ssbd_options[i].str);
296
297                 if (strncmp(buf, ssbd_options[i].str, len))
298                         continue;
299
300                 ssbd_state = ssbd_options[i].state;
301                 return 0;
302         }
303
304         return -EINVAL;
305 }
306 early_param("ssbd", ssbd_cfg);
307
308 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
309                                        __le32 *origptr, __le32 *updptr,
310                                        int nr_inst)
311 {
312         u32 insn;
313
314         BUG_ON(nr_inst != 1);
315
316         switch (psci_ops.conduit) {
317         case PSCI_CONDUIT_HVC:
318                 insn = aarch64_insn_get_hvc_value();
319                 break;
320         case PSCI_CONDUIT_SMC:
321                 insn = aarch64_insn_get_smc_value();
322                 break;
323         default:
324                 return;
325         }
326
327         *updptr = cpu_to_le32(insn);
328 }
329
330 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
331                                       __le32 *origptr, __le32 *updptr,
332                                       int nr_inst)
333 {
334         BUG_ON(nr_inst != 1);
335         /*
336          * Only allow mitigation on EL1 entry/exit and guest
337          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
338          * be flipped.
339          */
340         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
341                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
342 }
343
344 void arm64_set_ssbd_mitigation(bool state)
345 {
346         if (this_cpu_has_cap(ARM64_SSBS)) {
347                 if (state)
348                         asm volatile(SET_PSTATE_SSBS(0));
349                 else
350                         asm volatile(SET_PSTATE_SSBS(1));
351                 return;
352         }
353
354         switch (psci_ops.conduit) {
355         case PSCI_CONDUIT_HVC:
356                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
357                 break;
358
359         case PSCI_CONDUIT_SMC:
360                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
361                 break;
362
363         default:
364                 WARN_ON_ONCE(1);
365                 break;
366         }
367 }
368
369 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
370                                     int scope)
371 {
372         struct arm_smccc_res res;
373         bool required = true;
374         s32 val;
375
376         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
377
378         if (this_cpu_has_cap(ARM64_SSBS)) {
379                 required = false;
380                 goto out_printmsg;
381         }
382
383         if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
384                 ssbd_state = ARM64_SSBD_UNKNOWN;
385                 return false;
386         }
387
388         switch (psci_ops.conduit) {
389         case PSCI_CONDUIT_HVC:
390                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
391                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
392                 break;
393
394         case PSCI_CONDUIT_SMC:
395                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
396                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
397                 break;
398
399         default:
400                 ssbd_state = ARM64_SSBD_UNKNOWN;
401                 return false;
402         }
403
404         val = (s32)res.a0;
405
406         switch (val) {
407         case SMCCC_RET_NOT_SUPPORTED:
408                 ssbd_state = ARM64_SSBD_UNKNOWN;
409                 return false;
410
411         case SMCCC_RET_NOT_REQUIRED:
412                 pr_info_once("%s mitigation not required\n", entry->desc);
413                 ssbd_state = ARM64_SSBD_MITIGATED;
414                 return false;
415
416         case SMCCC_RET_SUCCESS:
417                 required = true;
418                 break;
419
420         case 1: /* Mitigation not required on this CPU */
421                 required = false;
422                 break;
423
424         default:
425                 WARN_ON(1);
426                 return false;
427         }
428
429         switch (ssbd_state) {
430         case ARM64_SSBD_FORCE_DISABLE:
431                 arm64_set_ssbd_mitigation(false);
432                 required = false;
433                 break;
434
435         case ARM64_SSBD_KERNEL:
436                 if (required) {
437                         __this_cpu_write(arm64_ssbd_callback_required, 1);
438                         arm64_set_ssbd_mitigation(true);
439                 }
440                 break;
441
442         case ARM64_SSBD_FORCE_ENABLE:
443                 arm64_set_ssbd_mitigation(true);
444                 required = true;
445                 break;
446
447         default:
448                 WARN_ON(1);
449                 break;
450         }
451
452 out_printmsg:
453         switch (ssbd_state) {
454         case ARM64_SSBD_FORCE_DISABLE:
455                 pr_info_once("%s disabled from command-line\n", entry->desc);
456                 break;
457
458         case ARM64_SSBD_FORCE_ENABLE:
459                 pr_info_once("%s forced from command-line\n", entry->desc);
460                 break;
461         }
462
463         return required;
464 }
465 #endif  /* CONFIG_ARM64_SSBD */
466
467 static void __maybe_unused
468 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
469 {
470         sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
471 }
472
473 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
474         .matches = is_affected_midr_range,                      \
475         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
476
477 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
478         .matches = is_affected_midr_range,                              \
479         .midr_range = MIDR_ALL_VERSIONS(model)
480
481 #define MIDR_FIXED(rev, revidr_mask) \
482         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
483
484 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
485         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
486         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
487
488 #define CAP_MIDR_RANGE_LIST(list)                               \
489         .matches = is_affected_midr_range_list,                 \
490         .midr_range_list = list
491
492 /* Errata affecting a range of revisions of  given model variant */
493 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
494         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
495
496 /* Errata affecting a single variant/revision of a model */
497 #define ERRATA_MIDR_REV(model, var, rev)        \
498         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
499
500 /* Errata affecting all variants/revisions of a given a model */
501 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
502         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
503         CAP_MIDR_ALL_VERSIONS(model)
504
505 /* Errata affecting a list of midr ranges, with same work around */
506 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
507         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
508         CAP_MIDR_RANGE_LIST(midr_list)
509
510 /*
511  * Generic helper for handling capabilties with multiple (match,enable) pairs
512  * of call backs, sharing the same capability bit.
513  * Iterate over each entry to see if at least one matches.
514  */
515 static bool __maybe_unused
516 multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
517 {
518         const struct arm64_cpu_capabilities *caps;
519
520         for (caps = entry->match_list; caps->matches; caps++)
521                 if (caps->matches(caps, scope))
522                         return true;
523
524         return false;
525 }
526
527 /*
528  * Take appropriate action for all matching entries in the shared capability
529  * entry.
530  */
531 static void __maybe_unused
532 multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
533 {
534         const struct arm64_cpu_capabilities *caps;
535
536         for (caps = entry->match_list; caps->matches; caps++)
537                 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
538                     caps->cpu_enable)
539                         caps->cpu_enable(caps);
540 }
541
542 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
543
544 /*
545  * List of CPUs where we need to issue a psci call to
546  * harden the branch predictor.
547  */
548 static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
549         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
550         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
551         MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
552         MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
553         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
554         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
555         MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
556         MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
557         MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
558         {},
559 };
560
561 #endif
562
563 #ifdef CONFIG_HARDEN_EL2_VECTORS
564
565 static const struct midr_range arm64_harden_el2_vectors[] = {
566         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
567         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
568         {},
569 };
570
571 #endif
572
573 const struct arm64_cpu_capabilities arm64_errata[] = {
574 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
575         defined(CONFIG_ARM64_ERRATUM_827319) || \
576         defined(CONFIG_ARM64_ERRATUM_824069)
577         {
578         /* Cortex-A53 r0p[012] */
579                 .desc = "ARM errata 826319, 827319, 824069",
580                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
581                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
582                 .cpu_enable = cpu_enable_cache_maint_trap,
583         },
584 #endif
585 #ifdef CONFIG_ARM64_ERRATUM_819472
586         {
587         /* Cortex-A53 r0p[01] */
588                 .desc = "ARM errata 819472",
589                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
590                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
591                 .cpu_enable = cpu_enable_cache_maint_trap,
592         },
593 #endif
594 #ifdef CONFIG_ARM64_ERRATUM_832075
595         {
596         /* Cortex-A57 r0p0 - r1p2 */
597                 .desc = "ARM erratum 832075",
598                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
599                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
600                                   0, 0,
601                                   1, 2),
602         },
603 #endif
604 #ifdef CONFIG_ARM64_ERRATUM_834220
605         {
606         /* Cortex-A57 r0p0 - r1p2 */
607                 .desc = "ARM erratum 834220",
608                 .capability = ARM64_WORKAROUND_834220,
609                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
610                                   0, 0,
611                                   1, 2),
612         },
613 #endif
614 #ifdef CONFIG_ARM64_ERRATUM_843419
615         {
616         /* Cortex-A53 r0p[01234] */
617                 .desc = "ARM erratum 843419",
618                 .capability = ARM64_WORKAROUND_843419,
619                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
620                 MIDR_FIXED(0x4, BIT(8)),
621         },
622 #endif
623 #ifdef CONFIG_ARM64_ERRATUM_845719
624         {
625         /* Cortex-A53 r0p[01234] */
626                 .desc = "ARM erratum 845719",
627                 .capability = ARM64_WORKAROUND_845719,
628                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
629         },
630 #endif
631 #ifdef CONFIG_CAVIUM_ERRATUM_23154
632         {
633         /* Cavium ThunderX, pass 1.x */
634                 .desc = "Cavium erratum 23154",
635                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
636                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
637         },
638 #endif
639 #ifdef CONFIG_CAVIUM_ERRATUM_27456
640         {
641         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
642                 .desc = "Cavium erratum 27456",
643                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
644                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
645                                   0, 0,
646                                   1, 1),
647         },
648         {
649         /* Cavium ThunderX, T81 pass 1.0 */
650                 .desc = "Cavium erratum 27456",
651                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
652                 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
653         },
654 #endif
655 #ifdef CONFIG_CAVIUM_ERRATUM_30115
656         {
657         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
658                 .desc = "Cavium erratum 30115",
659                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
660                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
661                                       0, 0,
662                                       1, 2),
663         },
664         {
665         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
666                 .desc = "Cavium erratum 30115",
667                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
668                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
669         },
670         {
671         /* Cavium ThunderX, T83 pass 1.0 */
672                 .desc = "Cavium erratum 30115",
673                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
674                 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
675         },
676 #endif
677         {
678                 .desc = "Mismatched cache type (CTR_EL0)",
679                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
680                 .matches = has_mismatched_cache_type,
681                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
682                 .cpu_enable = cpu_enable_trap_ctr_access,
683         },
684 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
685         {
686                 .desc = "Qualcomm Technologies Falkor erratum 1003",
687                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
688                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
689         },
690         {
691                 .desc = "Qualcomm Technologies Kryo erratum 1003",
692                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
693                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
694                 .midr_range.model = MIDR_QCOM_KRYO,
695                 .matches = is_kryo_midr,
696         },
697 #endif
698 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
699         {
700                 .desc = "Qualcomm Technologies Falkor erratum 1009",
701                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
702                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
703         },
704 #endif
705 #ifdef CONFIG_ARM64_ERRATUM_858921
706         {
707         /* Cortex-A73 all versions */
708                 .desc = "ARM erratum 858921",
709                 .capability = ARM64_WORKAROUND_858921,
710                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
711         },
712 #endif
713 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
714         {
715                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
716                 .cpu_enable = enable_smccc_arch_workaround_1,
717                 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
718         },
719 #endif
720 #ifdef CONFIG_HARDEN_EL2_VECTORS
721         {
722                 .desc = "EL2 vector hardening",
723                 .capability = ARM64_HARDEN_EL2_VECTORS,
724                 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
725         },
726 #endif
727 #ifdef CONFIG_ARM64_SSBD
728         {
729                 .desc = "Speculative Store Bypass Disable",
730                 .capability = ARM64_SSBD,
731                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
732                 .matches = has_ssbd_mitigation,
733         },
734 #endif
735 #ifdef CONFIG_ARM64_ERRATUM_1188873
736         {
737                 /* Cortex-A76 r0p0 to r2p0 */
738                 .desc = "ARM erratum 1188873",
739                 .capability = ARM64_WORKAROUND_1188873,
740                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
741         },
742 #endif
743         {
744         }
745 };