Merge tag 'gfs2-4.17.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2...
[sfrench/cifs-2.6.git] / arch / arm64 / kernel / cpu_errata.c
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/types.h>
20 #include <asm/cpu.h>
21 #include <asm/cputype.h>
22 #include <asm/cpufeature.h>
23
24 static bool __maybe_unused
25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
26 {
27         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
28         return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
29                                        entry->midr_range_min,
30                                        entry->midr_range_max);
31 }
32
33 static bool __maybe_unused
34 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
35 {
36         u32 model;
37
38         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
39
40         model = read_cpuid_id();
41         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
42                  MIDR_ARCHITECTURE_MASK;
43
44         return model == entry->midr_model;
45 }
46
47 static bool
48 has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
49                                 int scope)
50 {
51         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
52         return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
53                 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
54 }
55
56 static int cpu_enable_trap_ctr_access(void *__unused)
57 {
58         /* Clear SCTLR_EL1.UCT */
59         config_sctlr_el1(SCTLR_EL1_UCT, 0);
60         return 0;
61 }
62
63 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
64 #include <asm/mmu_context.h>
65 #include <asm/cacheflush.h>
66
67 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
68
69 #ifdef CONFIG_KVM
70 extern char __qcom_hyp_sanitize_link_stack_start[];
71 extern char __qcom_hyp_sanitize_link_stack_end[];
72 extern char __smccc_workaround_1_smc_start[];
73 extern char __smccc_workaround_1_smc_end[];
74 extern char __smccc_workaround_1_hvc_start[];
75 extern char __smccc_workaround_1_hvc_end[];
76
77 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
78                                 const char *hyp_vecs_end)
79 {
80         void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
81         int i;
82
83         for (i = 0; i < SZ_2K; i += 0x80)
84                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
85
86         flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
87 }
88
89 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
90                                       const char *hyp_vecs_start,
91                                       const char *hyp_vecs_end)
92 {
93         static int last_slot = -1;
94         static DEFINE_SPINLOCK(bp_lock);
95         int cpu, slot = -1;
96
97         spin_lock(&bp_lock);
98         for_each_possible_cpu(cpu) {
99                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
100                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
101                         break;
102                 }
103         }
104
105         if (slot == -1) {
106                 last_slot++;
107                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
108                         / SZ_2K) <= last_slot);
109                 slot = last_slot;
110                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
111         }
112
113         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
114         __this_cpu_write(bp_hardening_data.fn, fn);
115         spin_unlock(&bp_lock);
116 }
117 #else
118 #define __qcom_hyp_sanitize_link_stack_start    NULL
119 #define __qcom_hyp_sanitize_link_stack_end      NULL
120 #define __smccc_workaround_1_smc_start          NULL
121 #define __smccc_workaround_1_smc_end            NULL
122 #define __smccc_workaround_1_hvc_start          NULL
123 #define __smccc_workaround_1_hvc_end            NULL
124
125 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
126                                       const char *hyp_vecs_start,
127                                       const char *hyp_vecs_end)
128 {
129         __this_cpu_write(bp_hardening_data.fn, fn);
130 }
131 #endif  /* CONFIG_KVM */
132
133 static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
134                                      bp_hardening_cb_t fn,
135                                      const char *hyp_vecs_start,
136                                      const char *hyp_vecs_end)
137 {
138         u64 pfr0;
139
140         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
141                 return;
142
143         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
144         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
145                 return;
146
147         __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
148 }
149
150 #include <uapi/linux/psci.h>
151 #include <linux/arm-smccc.h>
152 #include <linux/psci.h>
153
154 static void call_smc_arch_workaround_1(void)
155 {
156         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
157 }
158
159 static void call_hvc_arch_workaround_1(void)
160 {
161         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
162 }
163
164 static int enable_smccc_arch_workaround_1(void *data)
165 {
166         const struct arm64_cpu_capabilities *entry = data;
167         bp_hardening_cb_t cb;
168         void *smccc_start, *smccc_end;
169         struct arm_smccc_res res;
170
171         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
172                 return 0;
173
174         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
175                 return 0;
176
177         switch (psci_ops.conduit) {
178         case PSCI_CONDUIT_HVC:
179                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
180                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
181                 if ((int)res.a0 < 0)
182                         return 0;
183                 cb = call_hvc_arch_workaround_1;
184                 smccc_start = __smccc_workaround_1_hvc_start;
185                 smccc_end = __smccc_workaround_1_hvc_end;
186                 break;
187
188         case PSCI_CONDUIT_SMC:
189                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
190                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
191                 if ((int)res.a0 < 0)
192                         return 0;
193                 cb = call_smc_arch_workaround_1;
194                 smccc_start = __smccc_workaround_1_smc_start;
195                 smccc_end = __smccc_workaround_1_smc_end;
196                 break;
197
198         default:
199                 return 0;
200         }
201
202         install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
203
204         return 0;
205 }
206
207 static void qcom_link_stack_sanitization(void)
208 {
209         u64 tmp;
210
211         asm volatile("mov       %0, x30         \n"
212                      ".rept     16              \n"
213                      "bl        . + 4           \n"
214                      ".endr                     \n"
215                      "mov       x30, %0         \n"
216                      : "=&r" (tmp));
217 }
218
219 static int qcom_enable_link_stack_sanitization(void *data)
220 {
221         const struct arm64_cpu_capabilities *entry = data;
222
223         install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
224                                 __qcom_hyp_sanitize_link_stack_start,
225                                 __qcom_hyp_sanitize_link_stack_end);
226
227         return 0;
228 }
229 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR */
230
231 #define MIDR_RANGE(model, min, max) \
232         .def_scope = SCOPE_LOCAL_CPU, \
233         .matches = is_affected_midr_range, \
234         .midr_model = model, \
235         .midr_range_min = min, \
236         .midr_range_max = max
237
238 #define MIDR_ALL_VERSIONS(model) \
239         .def_scope = SCOPE_LOCAL_CPU, \
240         .matches = is_affected_midr_range, \
241         .midr_model = model, \
242         .midr_range_min = 0, \
243         .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
244
245 const struct arm64_cpu_capabilities arm64_errata[] = {
246 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
247         defined(CONFIG_ARM64_ERRATUM_827319) || \
248         defined(CONFIG_ARM64_ERRATUM_824069)
249         {
250         /* Cortex-A53 r0p[012] */
251                 .desc = "ARM errata 826319, 827319, 824069",
252                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
253                 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
254                 .enable = cpu_enable_cache_maint_trap,
255         },
256 #endif
257 #ifdef CONFIG_ARM64_ERRATUM_819472
258         {
259         /* Cortex-A53 r0p[01] */
260                 .desc = "ARM errata 819472",
261                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
262                 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
263                 .enable = cpu_enable_cache_maint_trap,
264         },
265 #endif
266 #ifdef CONFIG_ARM64_ERRATUM_832075
267         {
268         /* Cortex-A57 r0p0 - r1p2 */
269                 .desc = "ARM erratum 832075",
270                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
271                 MIDR_RANGE(MIDR_CORTEX_A57,
272                            MIDR_CPU_VAR_REV(0, 0),
273                            MIDR_CPU_VAR_REV(1, 2)),
274         },
275 #endif
276 #ifdef CONFIG_ARM64_ERRATUM_834220
277         {
278         /* Cortex-A57 r0p0 - r1p2 */
279                 .desc = "ARM erratum 834220",
280                 .capability = ARM64_WORKAROUND_834220,
281                 MIDR_RANGE(MIDR_CORTEX_A57,
282                            MIDR_CPU_VAR_REV(0, 0),
283                            MIDR_CPU_VAR_REV(1, 2)),
284         },
285 #endif
286 #ifdef CONFIG_ARM64_ERRATUM_845719
287         {
288         /* Cortex-A53 r0p[01234] */
289                 .desc = "ARM erratum 845719",
290                 .capability = ARM64_WORKAROUND_845719,
291                 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
292         },
293 #endif
294 #ifdef CONFIG_CAVIUM_ERRATUM_23154
295         {
296         /* Cavium ThunderX, pass 1.x */
297                 .desc = "Cavium erratum 23154",
298                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
299                 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
300         },
301 #endif
302 #ifdef CONFIG_CAVIUM_ERRATUM_27456
303         {
304         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
305                 .desc = "Cavium erratum 27456",
306                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
307                 MIDR_RANGE(MIDR_THUNDERX,
308                            MIDR_CPU_VAR_REV(0, 0),
309                            MIDR_CPU_VAR_REV(1, 1)),
310         },
311         {
312         /* Cavium ThunderX, T81 pass 1.0 */
313                 .desc = "Cavium erratum 27456",
314                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
315                 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
316         },
317 #endif
318 #ifdef CONFIG_CAVIUM_ERRATUM_30115
319         {
320         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
321                 .desc = "Cavium erratum 30115",
322                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
323                 MIDR_RANGE(MIDR_THUNDERX, 0x00,
324                            (1 << MIDR_VARIANT_SHIFT) | 2),
325         },
326         {
327         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
328                 .desc = "Cavium erratum 30115",
329                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
330                 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
331         },
332         {
333         /* Cavium ThunderX, T83 pass 1.0 */
334                 .desc = "Cavium erratum 30115",
335                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
336                 MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
337         },
338 #endif
339         {
340                 .desc = "Mismatched cache line size",
341                 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
342                 .matches = has_mismatched_cache_line_size,
343                 .def_scope = SCOPE_LOCAL_CPU,
344                 .enable = cpu_enable_trap_ctr_access,
345         },
346 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
347         {
348                 .desc = "Qualcomm Technologies Falkor erratum 1003",
349                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
350                 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
351                            MIDR_CPU_VAR_REV(0, 0),
352                            MIDR_CPU_VAR_REV(0, 0)),
353         },
354         {
355                 .desc = "Qualcomm Technologies Kryo erratum 1003",
356                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
357                 .def_scope = SCOPE_LOCAL_CPU,
358                 .midr_model = MIDR_QCOM_KRYO,
359                 .matches = is_kryo_midr,
360         },
361 #endif
362 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
363         {
364                 .desc = "Qualcomm Technologies Falkor erratum 1009",
365                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
366                 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
367                            MIDR_CPU_VAR_REV(0, 0),
368                            MIDR_CPU_VAR_REV(0, 0)),
369         },
370 #endif
371 #ifdef CONFIG_ARM64_ERRATUM_858921
372         {
373         /* Cortex-A73 all versions */
374                 .desc = "ARM erratum 858921",
375                 .capability = ARM64_WORKAROUND_858921,
376                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
377         },
378 #endif
379 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
380         {
381                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
382                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
383                 .enable = enable_smccc_arch_workaround_1,
384         },
385         {
386                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
387                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
388                 .enable = enable_smccc_arch_workaround_1,
389         },
390         {
391                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
392                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
393                 .enable = enable_smccc_arch_workaround_1,
394         },
395         {
396                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
397                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
398                 .enable = enable_smccc_arch_workaround_1,
399         },
400         {
401                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
402                 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
403                 .enable = qcom_enable_link_stack_sanitization,
404         },
405         {
406                 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
407                 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
408         },
409         {
410                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
411                 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
412                 .enable = qcom_enable_link_stack_sanitization,
413         },
414         {
415                 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
416                 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
417         },
418         {
419                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
420                 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
421                 .enable = enable_smccc_arch_workaround_1,
422         },
423         {
424                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
425                 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
426                 .enable = enable_smccc_arch_workaround_1,
427         },
428 #endif
429         {
430         }
431 };
432
433 /*
434  * The CPU Errata work arounds are detected and applied at boot time
435  * and the related information is freed soon after. If the new CPU requires
436  * an errata not detected at boot, fail this CPU.
437  */
438 void verify_local_cpu_errata_workarounds(void)
439 {
440         const struct arm64_cpu_capabilities *caps = arm64_errata;
441
442         for (; caps->matches; caps++) {
443                 if (cpus_have_cap(caps->capability)) {
444                         if (caps->enable)
445                                 caps->enable((void *)caps);
446                 } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
447                         pr_crit("CPU%d: Requires work around for %s, not detected"
448                                         " at boot time\n",
449                                 smp_processor_id(),
450                                 caps->desc ? : "an erratum");
451                         cpu_die_early();
452                 }
453         }
454 }
455
456 void update_cpu_errata_workarounds(void)
457 {
458         update_cpu_capabilities(arm64_errata, "enabling workaround for");
459 }
460
461 void __init enable_errata_workarounds(void)
462 {
463         enable_cpu_capabilities(arm64_errata);
464 }