Merge branches 'x86/early-printk', 'x86/microcode' and 'core/objtool' into x86/urgent...
[sfrench/cifs-2.6.git] / arch / arm64 / kernel / cpufeature.c
1 /*
2  * Contains CPU feature definitions
3  *
4  * Copyright (C) 2015 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #define pr_fmt(fmt) "CPU features: " fmt
20
21 #include <linux/bsearch.h>
22 #include <linux/cpumask.h>
23 #include <linux/crash_dump.h>
24 #include <linux/sort.h>
25 #include <linux/stop_machine.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <asm/cpu.h>
29 #include <asm/cpufeature.h>
30 #include <asm/cpu_ops.h>
31 #include <asm/fpsimd.h>
32 #include <asm/mmu_context.h>
33 #include <asm/processor.h>
34 #include <asm/sysreg.h>
35 #include <asm/traps.h>
36 #include <asm/virt.h>
37
38 unsigned long elf_hwcap __read_mostly;
39 EXPORT_SYMBOL_GPL(elf_hwcap);
40
41 #ifdef CONFIG_COMPAT
42 #define COMPAT_ELF_HWCAP_DEFAULT        \
43                                 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
44                                  COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
45                                  COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
46                                  COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
47                                  COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
48                                  COMPAT_HWCAP_LPAE)
49 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
50 unsigned int compat_elf_hwcap2 __read_mostly;
51 #endif
52
53 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
54 EXPORT_SYMBOL(cpu_hwcaps);
55
56 /*
57  * Flag to indicate if we have computed the system wide
58  * capabilities based on the boot time active CPUs. This
59  * will be used to determine if a new booting CPU should
60  * go through the verification process to make sure that it
61  * supports the system capabilities, without using a hotplug
62  * notifier.
63  */
64 static bool sys_caps_initialised;
65
66 static inline void set_sys_caps_initialised(void)
67 {
68         sys_caps_initialised = true;
69 }
70
71 static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
72 {
73         /* file-wide pr_fmt adds "CPU features: " prefix */
74         pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
75         return 0;
76 }
77
78 static struct notifier_block cpu_hwcaps_notifier = {
79         .notifier_call = dump_cpu_hwcaps
80 };
81
82 static int __init register_cpu_hwcaps_dumper(void)
83 {
84         atomic_notifier_chain_register(&panic_notifier_list,
85                                        &cpu_hwcaps_notifier);
86         return 0;
87 }
88 __initcall(register_cpu_hwcaps_dumper);
89
90 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
91 EXPORT_SYMBOL(cpu_hwcap_keys);
92
93 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
94         {                                               \
95                 .sign = SIGNED,                         \
96                 .visible = VISIBLE,                     \
97                 .strict = STRICT,                       \
98                 .type = TYPE,                           \
99                 .shift = SHIFT,                         \
100                 .width = WIDTH,                         \
101                 .safe_val = SAFE_VAL,                   \
102         }
103
104 /* Define a feature with unsigned values */
105 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
106         __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
107
108 /* Define a feature with a signed value */
109 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
110         __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
111
112 #define ARM64_FTR_END                                   \
113         {                                               \
114                 .width = 0,                             \
115         }
116
117 /* meta feature for alternatives */
118 static bool __maybe_unused
119 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
120
121 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
122
123 /*
124  * NOTE: Any changes to the visibility of features should be kept in
125  * sync with the documentation of the CPU feature register ABI.
126  */
127 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
128         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
129         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
130         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
131         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
132         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
133         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
134         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
135         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
136         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
137         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
138         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
139         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
140         ARM64_FTR_END,
141 };
142
143 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
144         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
145         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
146         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
147         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
148         ARM64_FTR_END,
149 };
150
151 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
152         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
153         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
154         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
155         ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
156                                    FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
157         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
158         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
159         S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
160         S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
161         /* Linux doesn't care about the EL3 */
162         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
163         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
164         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
165         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
166         ARM64_FTR_END,
167 };
168
169 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
170         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
171         ARM64_FTR_END,
172 };
173
174 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
175         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
176         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
177         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
178         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
179         /* Linux shouldn't care about secure memory */
180         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
181         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
182         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
183         /*
184          * Differing PARange is fine as long as all peripherals and memory are mapped
185          * within the minimum PARange of all CPUs
186          */
187         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
188         ARM64_FTR_END,
189 };
190
191 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
192         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
193         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
194         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
195         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
196         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
197         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
198         ARM64_FTR_END,
199 };
200
201 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
202         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
203         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
204         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
205         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
206         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
207         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
208         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
209         ARM64_FTR_END,
210 };
211
212 static const struct arm64_ftr_bits ftr_ctr[] = {
213         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
214         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
215         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
216         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
217         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
218         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
219         /*
220          * Linux can handle differing I-cache policies. Userspace JITs will
221          * make use of *minLine.
222          * If we have differing I-cache policies, report it as the weakest - VIPT.
223          */
224         ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT),       /* L1Ip */
225         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
226         ARM64_FTR_END,
227 };
228
229 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
230         .name           = "SYS_CTR_EL0",
231         .ftr_bits       = ftr_ctr
232 };
233
234 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
235         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf),   /* InnerShr */
236         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),       /* FCSE */
237         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),    /* AuxReg */
238         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),       /* TCM */
239         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),       /* ShareLvl */
240         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf),    /* OuterShr */
241         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* PMSA */
242         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),        /* VMSA */
243         ARM64_FTR_END,
244 };
245
246 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
247         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
248         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
249         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
250         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
251         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
252         /*
253          * We can instantiate multiple PMU instances with different levels
254          * of support.
255          */
256         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
257         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
258         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
259         ARM64_FTR_END,
260 };
261
262 static const struct arm64_ftr_bits ftr_mvfr2[] = {
263         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* FPMisc */
264         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* SIMDMisc */
265         ARM64_FTR_END,
266 };
267
268 static const struct arm64_ftr_bits ftr_dczid[] = {
269         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1),            /* DZP */
270         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),       /* BS */
271         ARM64_FTR_END,
272 };
273
274
275 static const struct arm64_ftr_bits ftr_id_isar5[] = {
276         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
277         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
278         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
279         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
280         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
281         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
282         ARM64_FTR_END,
283 };
284
285 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
286         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* ac2 */
287         ARM64_FTR_END,
288 };
289
290 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
291         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),               /* State3 */
292         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),                /* State2 */
293         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* State1 */
294         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* State0 */
295         ARM64_FTR_END,
296 };
297
298 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
299         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
300         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),   /* PerfMon */
301         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
302         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
303         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
304         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
305         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
306         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
307         ARM64_FTR_END,
308 };
309
310 static const struct arm64_ftr_bits ftr_zcr[] = {
311         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
312                 ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0),        /* LEN */
313         ARM64_FTR_END,
314 };
315
316 /*
317  * Common ftr bits for a 32bit register with all hidden, strict
318  * attributes, with 4bit feature fields and a default safe value of
319  * 0. Covers the following 32bit registers:
320  * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
321  */
322 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
323         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
324         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
325         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
326         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
327         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
328         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
329         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
330         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
331         ARM64_FTR_END,
332 };
333
334 /* Table for a single 32bit feature value */
335 static const struct arm64_ftr_bits ftr_single32[] = {
336         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
337         ARM64_FTR_END,
338 };
339
340 static const struct arm64_ftr_bits ftr_raz[] = {
341         ARM64_FTR_END,
342 };
343
344 #define ARM64_FTR_REG(id, table) {              \
345         .sys_id = id,                           \
346         .reg =  &(struct arm64_ftr_reg){        \
347                 .name = #id,                    \
348                 .ftr_bits = &((table)[0]),      \
349         }}
350
351 static const struct __ftr_reg_entry {
352         u32                     sys_id;
353         struct arm64_ftr_reg    *reg;
354 } arm64_ftr_regs[] = {
355
356         /* Op1 = 0, CRn = 0, CRm = 1 */
357         ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
358         ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
359         ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
360         ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
361         ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
362         ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
363         ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
364
365         /* Op1 = 0, CRn = 0, CRm = 2 */
366         ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
367         ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
368         ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
369         ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
370         ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
371         ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
372         ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
373
374         /* Op1 = 0, CRn = 0, CRm = 3 */
375         ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
376         ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
377         ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
378
379         /* Op1 = 0, CRn = 0, CRm = 4 */
380         ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
381         ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
382         ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
383
384         /* Op1 = 0, CRn = 0, CRm = 5 */
385         ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
386         ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
387
388         /* Op1 = 0, CRn = 0, CRm = 6 */
389         ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
390         ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
391
392         /* Op1 = 0, CRn = 0, CRm = 7 */
393         ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
394         ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
395         ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
396
397         /* Op1 = 0, CRn = 1, CRm = 2 */
398         ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
399
400         /* Op1 = 3, CRn = 0, CRm = 0 */
401         { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
402         ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
403
404         /* Op1 = 3, CRn = 14, CRm = 0 */
405         ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
406 };
407
408 static int search_cmp_ftr_reg(const void *id, const void *regp)
409 {
410         return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
411 }
412
413 /*
414  * get_arm64_ftr_reg - Lookup a feature register entry using its
415  * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
416  * ascending order of sys_id , we use binary search to find a matching
417  * entry.
418  *
419  * returns - Upon success,  matching ftr_reg entry for id.
420  *         - NULL on failure. It is upto the caller to decide
421  *           the impact of a failure.
422  */
423 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
424 {
425         const struct __ftr_reg_entry *ret;
426
427         ret = bsearch((const void *)(unsigned long)sys_id,
428                         arm64_ftr_regs,
429                         ARRAY_SIZE(arm64_ftr_regs),
430                         sizeof(arm64_ftr_regs[0]),
431                         search_cmp_ftr_reg);
432         if (ret)
433                 return ret->reg;
434         return NULL;
435 }
436
437 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
438                                s64 ftr_val)
439 {
440         u64 mask = arm64_ftr_mask(ftrp);
441
442         reg &= ~mask;
443         reg |= (ftr_val << ftrp->shift) & mask;
444         return reg;
445 }
446
447 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
448                                 s64 cur)
449 {
450         s64 ret = 0;
451
452         switch (ftrp->type) {
453         case FTR_EXACT:
454                 ret = ftrp->safe_val;
455                 break;
456         case FTR_LOWER_SAFE:
457                 ret = new < cur ? new : cur;
458                 break;
459         case FTR_HIGHER_SAFE:
460                 ret = new > cur ? new : cur;
461                 break;
462         default:
463                 BUG();
464         }
465
466         return ret;
467 }
468
469 static void __init sort_ftr_regs(void)
470 {
471         int i;
472
473         /* Check that the array is sorted so that we can do the binary search */
474         for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
475                 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
476 }
477
478 /*
479  * Initialise the CPU feature register from Boot CPU values.
480  * Also initiliases the strict_mask for the register.
481  * Any bits that are not covered by an arm64_ftr_bits entry are considered
482  * RES0 for the system-wide value, and must strictly match.
483  */
484 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
485 {
486         u64 val = 0;
487         u64 strict_mask = ~0x0ULL;
488         u64 user_mask = 0;
489         u64 valid_mask = 0;
490
491         const struct arm64_ftr_bits *ftrp;
492         struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
493
494         BUG_ON(!reg);
495
496         for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
497                 u64 ftr_mask = arm64_ftr_mask(ftrp);
498                 s64 ftr_new = arm64_ftr_value(ftrp, new);
499
500                 val = arm64_ftr_set_value(ftrp, val, ftr_new);
501
502                 valid_mask |= ftr_mask;
503                 if (!ftrp->strict)
504                         strict_mask &= ~ftr_mask;
505                 if (ftrp->visible)
506                         user_mask |= ftr_mask;
507                 else
508                         reg->user_val = arm64_ftr_set_value(ftrp,
509                                                             reg->user_val,
510                                                             ftrp->safe_val);
511         }
512
513         val &= valid_mask;
514
515         reg->sys_val = val;
516         reg->strict_mask = strict_mask;
517         reg->user_mask = user_mask;
518 }
519
520 extern const struct arm64_cpu_capabilities arm64_errata[];
521 static void __init setup_boot_cpu_capabilities(void);
522
523 void __init init_cpu_features(struct cpuinfo_arm64 *info)
524 {
525         /* Before we start using the tables, make sure it is sorted */
526         sort_ftr_regs();
527
528         init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
529         init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
530         init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
531         init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
532         init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
533         init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
534         init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
535         init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
536         init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
537         init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
538         init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
539         init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
540         init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
541
542         if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
543                 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
544                 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
545                 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
546                 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
547                 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
548                 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
549                 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
550                 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
551                 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
552                 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
553                 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
554                 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
555                 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
556                 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
557                 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
558                 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
559         }
560
561         if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
562                 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
563                 sve_init_vq_map();
564         }
565
566         /*
567          * Detect and enable early CPU capabilities based on the boot CPU,
568          * after we have initialised the CPU feature infrastructure.
569          */
570         setup_boot_cpu_capabilities();
571 }
572
573 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
574 {
575         const struct arm64_ftr_bits *ftrp;
576
577         for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
578                 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
579                 s64 ftr_new = arm64_ftr_value(ftrp, new);
580
581                 if (ftr_cur == ftr_new)
582                         continue;
583                 /* Find a safe value */
584                 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
585                 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
586         }
587
588 }
589
590 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
591 {
592         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
593
594         BUG_ON(!regp);
595         update_cpu_ftr_reg(regp, val);
596         if ((boot & regp->strict_mask) == (val & regp->strict_mask))
597                 return 0;
598         pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
599                         regp->name, boot, cpu, val);
600         return 1;
601 }
602
603 /*
604  * Update system wide CPU feature registers with the values from a
605  * non-boot CPU. Also performs SANITY checks to make sure that there
606  * aren't any insane variations from that of the boot CPU.
607  */
608 void update_cpu_features(int cpu,
609                          struct cpuinfo_arm64 *info,
610                          struct cpuinfo_arm64 *boot)
611 {
612         int taint = 0;
613
614         /*
615          * The kernel can handle differing I-cache policies, but otherwise
616          * caches should look identical. Userspace JITs will make use of
617          * *minLine.
618          */
619         taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
620                                       info->reg_ctr, boot->reg_ctr);
621
622         /*
623          * Userspace may perform DC ZVA instructions. Mismatched block sizes
624          * could result in too much or too little memory being zeroed if a
625          * process is preempted and migrated between CPUs.
626          */
627         taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
628                                       info->reg_dczid, boot->reg_dczid);
629
630         /* If different, timekeeping will be broken (especially with KVM) */
631         taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
632                                       info->reg_cntfrq, boot->reg_cntfrq);
633
634         /*
635          * The kernel uses self-hosted debug features and expects CPUs to
636          * support identical debug features. We presently need CTX_CMPs, WRPs,
637          * and BRPs to be identical.
638          * ID_AA64DFR1 is currently RES0.
639          */
640         taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
641                                       info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
642         taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
643                                       info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
644         /*
645          * Even in big.LITTLE, processors should be identical instruction-set
646          * wise.
647          */
648         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
649                                       info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
650         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
651                                       info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
652
653         /*
654          * Differing PARange support is fine as long as all peripherals and
655          * memory are mapped within the minimum PARange of all CPUs.
656          * Linux should not care about secure memory.
657          */
658         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
659                                       info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
660         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
661                                       info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
662         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
663                                       info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
664
665         /*
666          * EL3 is not our concern.
667          */
668         taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
669                                       info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
670         taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
671                                       info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
672
673         taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
674                                       info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
675
676         /*
677          * If we have AArch32, we care about 32-bit features for compat.
678          * If the system doesn't support AArch32, don't update them.
679          */
680         if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
681                 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
682
683                 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
684                                         info->reg_id_dfr0, boot->reg_id_dfr0);
685                 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
686                                         info->reg_id_isar0, boot->reg_id_isar0);
687                 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
688                                         info->reg_id_isar1, boot->reg_id_isar1);
689                 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
690                                         info->reg_id_isar2, boot->reg_id_isar2);
691                 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
692                                         info->reg_id_isar3, boot->reg_id_isar3);
693                 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
694                                         info->reg_id_isar4, boot->reg_id_isar4);
695                 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
696                                         info->reg_id_isar5, boot->reg_id_isar5);
697
698                 /*
699                  * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
700                  * ACTLR formats could differ across CPUs and therefore would have to
701                  * be trapped for virtualization anyway.
702                  */
703                 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
704                                         info->reg_id_mmfr0, boot->reg_id_mmfr0);
705                 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
706                                         info->reg_id_mmfr1, boot->reg_id_mmfr1);
707                 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
708                                         info->reg_id_mmfr2, boot->reg_id_mmfr2);
709                 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
710                                         info->reg_id_mmfr3, boot->reg_id_mmfr3);
711                 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
712                                         info->reg_id_pfr0, boot->reg_id_pfr0);
713                 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
714                                         info->reg_id_pfr1, boot->reg_id_pfr1);
715                 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
716                                         info->reg_mvfr0, boot->reg_mvfr0);
717                 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
718                                         info->reg_mvfr1, boot->reg_mvfr1);
719                 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
720                                         info->reg_mvfr2, boot->reg_mvfr2);
721         }
722
723         if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
724                 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
725                                         info->reg_zcr, boot->reg_zcr);
726
727                 /* Probe vector lengths, unless we already gave up on SVE */
728                 if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
729                     !sys_caps_initialised)
730                         sve_update_vq_map();
731         }
732
733         /*
734          * Mismatched CPU features are a recipe for disaster. Don't even
735          * pretend to support them.
736          */
737         if (taint) {
738                 pr_warn_once("Unsupported CPU feature variation detected.\n");
739                 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
740         }
741 }
742
743 u64 read_sanitised_ftr_reg(u32 id)
744 {
745         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
746
747         /* We shouldn't get a request for an unsupported register */
748         BUG_ON(!regp);
749         return regp->sys_val;
750 }
751
752 #define read_sysreg_case(r)     \
753         case r:         return read_sysreg_s(r)
754
755 /*
756  * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
757  * Read the system register on the current CPU
758  */
759 static u64 __read_sysreg_by_encoding(u32 sys_id)
760 {
761         switch (sys_id) {
762         read_sysreg_case(SYS_ID_PFR0_EL1);
763         read_sysreg_case(SYS_ID_PFR1_EL1);
764         read_sysreg_case(SYS_ID_DFR0_EL1);
765         read_sysreg_case(SYS_ID_MMFR0_EL1);
766         read_sysreg_case(SYS_ID_MMFR1_EL1);
767         read_sysreg_case(SYS_ID_MMFR2_EL1);
768         read_sysreg_case(SYS_ID_MMFR3_EL1);
769         read_sysreg_case(SYS_ID_ISAR0_EL1);
770         read_sysreg_case(SYS_ID_ISAR1_EL1);
771         read_sysreg_case(SYS_ID_ISAR2_EL1);
772         read_sysreg_case(SYS_ID_ISAR3_EL1);
773         read_sysreg_case(SYS_ID_ISAR4_EL1);
774         read_sysreg_case(SYS_ID_ISAR5_EL1);
775         read_sysreg_case(SYS_MVFR0_EL1);
776         read_sysreg_case(SYS_MVFR1_EL1);
777         read_sysreg_case(SYS_MVFR2_EL1);
778
779         read_sysreg_case(SYS_ID_AA64PFR0_EL1);
780         read_sysreg_case(SYS_ID_AA64PFR1_EL1);
781         read_sysreg_case(SYS_ID_AA64DFR0_EL1);
782         read_sysreg_case(SYS_ID_AA64DFR1_EL1);
783         read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
784         read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
785         read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
786         read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
787         read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
788
789         read_sysreg_case(SYS_CNTFRQ_EL0);
790         read_sysreg_case(SYS_CTR_EL0);
791         read_sysreg_case(SYS_DCZID_EL0);
792
793         default:
794                 BUG();
795                 return 0;
796         }
797 }
798
799 #include <linux/irqchip/arm-gic-v3.h>
800
801 static bool
802 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
803 {
804         int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
805
806         return val >= entry->min_field_value;
807 }
808
809 static bool
810 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
811 {
812         u64 val;
813
814         WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
815         if (scope == SCOPE_SYSTEM)
816                 val = read_sanitised_ftr_reg(entry->sys_reg);
817         else
818                 val = __read_sysreg_by_encoding(entry->sys_reg);
819
820         return feature_matches(val, entry);
821 }
822
823 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
824 {
825         bool has_sre;
826
827         if (!has_cpuid_feature(entry, scope))
828                 return false;
829
830         has_sre = gic_enable_sre();
831         if (!has_sre)
832                 pr_warn_once("%s present but disabled by higher exception level\n",
833                              entry->desc);
834
835         return has_sre;
836 }
837
838 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
839 {
840         u32 midr = read_cpuid_id();
841
842         /* Cavium ThunderX pass 1.x and 2.x */
843         return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
844                 MIDR_CPU_VAR_REV(0, 0),
845                 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
846 }
847
848 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
849 {
850         u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
851
852         return cpuid_feature_extract_signed_field(pfr0,
853                                         ID_AA64PFR0_FP_SHIFT) < 0;
854 }
855
856 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
857                           int scope)
858 {
859         u64 ctr;
860
861         if (scope == SCOPE_SYSTEM)
862                 ctr = arm64_ftr_reg_ctrel0.sys_val;
863         else
864                 ctr = read_cpuid_effective_cachetype();
865
866         return ctr & BIT(CTR_IDC_SHIFT);
867 }
868
869 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
870 {
871         /*
872          * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
873          * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
874          * to the CTR_EL0 on this CPU and emulate it with the real/safe
875          * value.
876          */
877         if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
878                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
879 }
880
881 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
882                           int scope)
883 {
884         u64 ctr;
885
886         if (scope == SCOPE_SYSTEM)
887                 ctr = arm64_ftr_reg_ctrel0.sys_val;
888         else
889                 ctr = read_cpuid_cachetype();
890
891         return ctr & BIT(CTR_DIC_SHIFT);
892 }
893
894 static bool __maybe_unused
895 has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
896 {
897         /*
898          * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
899          * may share TLB entries with a CPU stuck in the crashed
900          * kernel.
901          */
902          if (is_kdump_kernel())
903                 return false;
904
905         return has_cpuid_feature(entry, scope);
906 }
907
908 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
909 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
910
911 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
912                                 int scope)
913 {
914         /* List of CPUs that are not vulnerable and don't need KPTI */
915         static const struct midr_range kpti_safe_list[] = {
916                 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
917                 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
918                 { /* sentinel */ }
919         };
920         char const *str = "command line option";
921
922         /*
923          * For reasons that aren't entirely clear, enabling KPTI on Cavium
924          * ThunderX leads to apparent I-cache corruption of kernel text, which
925          * ends as well as you might imagine. Don't even try.
926          */
927         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
928                 str = "ARM64_WORKAROUND_CAVIUM_27456";
929                 __kpti_forced = -1;
930         }
931
932         /* Forced? */
933         if (__kpti_forced) {
934                 pr_info_once("kernel page table isolation forced %s by %s\n",
935                              __kpti_forced > 0 ? "ON" : "OFF", str);
936                 return __kpti_forced > 0;
937         }
938
939         /* Useful for KASLR robustness */
940         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
941                 return true;
942
943         /* Don't force KPTI for CPUs that are not vulnerable */
944         if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
945                 return false;
946
947         /* Defer to CPU feature registers */
948         return !has_cpuid_feature(entry, scope);
949 }
950
951 static void
952 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
953 {
954         typedef void (kpti_remap_fn)(int, int, phys_addr_t);
955         extern kpti_remap_fn idmap_kpti_install_ng_mappings;
956         kpti_remap_fn *remap_fn;
957
958         static bool kpti_applied = false;
959         int cpu = smp_processor_id();
960
961         if (kpti_applied)
962                 return;
963
964         remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
965
966         cpu_install_idmap();
967         remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
968         cpu_uninstall_idmap();
969
970         if (!cpu)
971                 kpti_applied = true;
972
973         return;
974 }
975
976 static int __init parse_kpti(char *str)
977 {
978         bool enabled;
979         int ret = strtobool(str, &enabled);
980
981         if (ret)
982                 return ret;
983
984         __kpti_forced = enabled ? 1 : -1;
985         return 0;
986 }
987 early_param("kpti", parse_kpti);
988 #endif  /* CONFIG_UNMAP_KERNEL_AT_EL0 */
989
990 #ifdef CONFIG_ARM64_HW_AFDBM
991 static inline void __cpu_enable_hw_dbm(void)
992 {
993         u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
994
995         write_sysreg(tcr, tcr_el1);
996         isb();
997 }
998
999 static bool cpu_has_broken_dbm(void)
1000 {
1001         /* List of CPUs which have broken DBM support. */
1002         static const struct midr_range cpus[] = {
1003 #ifdef CONFIG_ARM64_ERRATUM_1024718
1004                 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0),  // A55 r0p0 -r1p0
1005 #endif
1006                 {},
1007         };
1008
1009         return is_midr_in_range_list(read_cpuid_id(), cpus);
1010 }
1011
1012 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
1013 {
1014         return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
1015                !cpu_has_broken_dbm();
1016 }
1017
1018 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
1019 {
1020         if (cpu_can_use_dbm(cap))
1021                 __cpu_enable_hw_dbm();
1022 }
1023
1024 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
1025                        int __unused)
1026 {
1027         static bool detected = false;
1028         /*
1029          * DBM is a non-conflicting feature. i.e, the kernel can safely
1030          * run a mix of CPUs with and without the feature. So, we
1031          * unconditionally enable the capability to allow any late CPU
1032          * to use the feature. We only enable the control bits on the
1033          * CPU, if it actually supports.
1034          *
1035          * We have to make sure we print the "feature" detection only
1036          * when at least one CPU actually uses it. So check if this CPU
1037          * can actually use it and print the message exactly once.
1038          *
1039          * This is safe as all CPUs (including secondary CPUs - due to the
1040          * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
1041          * goes through the "matches" check exactly once. Also if a CPU
1042          * matches the criteria, it is guaranteed that the CPU will turn
1043          * the DBM on, as the capability is unconditionally enabled.
1044          */
1045         if (!detected && cpu_can_use_dbm(cap)) {
1046                 detected = true;
1047                 pr_info("detected: Hardware dirty bit management\n");
1048         }
1049
1050         return true;
1051 }
1052
1053 #endif
1054
1055 #ifdef CONFIG_ARM64_VHE
1056 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
1057 {
1058         return is_kernel_in_hyp_mode();
1059 }
1060
1061 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
1062 {
1063         /*
1064          * Copy register values that aren't redirected by hardware.
1065          *
1066          * Before code patching, we only set tpidr_el1, all CPUs need to copy
1067          * this value to tpidr_el2 before we patch the code. Once we've done
1068          * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
1069          * do anything here.
1070          */
1071         if (!alternatives_applied)
1072                 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
1073 }
1074 #endif
1075
1076 static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
1077 {
1078         u64 val = read_sysreg_s(SYS_CLIDR_EL1);
1079
1080         /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
1081         WARN_ON(val & (7 << 27 | 7 << 21));
1082 }
1083
1084 #ifdef CONFIG_ARM64_SSBD
1085 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
1086 {
1087         if (user_mode(regs))
1088                 return 1;
1089
1090         if (instr & BIT(PSTATE_Imm_shift))
1091                 regs->pstate |= PSR_SSBS_BIT;
1092         else
1093                 regs->pstate &= ~PSR_SSBS_BIT;
1094
1095         arm64_skip_faulting_instruction(regs, 4);
1096         return 0;
1097 }
1098
1099 static struct undef_hook ssbs_emulation_hook = {
1100         .instr_mask     = ~(1U << PSTATE_Imm_shift),
1101         .instr_val      = 0xd500401f | PSTATE_SSBS,
1102         .fn             = ssbs_emulation_handler,
1103 };
1104
1105 static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
1106 {
1107         static bool undef_hook_registered = false;
1108         static DEFINE_SPINLOCK(hook_lock);
1109
1110         spin_lock(&hook_lock);
1111         if (!undef_hook_registered) {
1112                 register_undef_hook(&ssbs_emulation_hook);
1113                 undef_hook_registered = true;
1114         }
1115         spin_unlock(&hook_lock);
1116
1117         if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
1118                 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
1119                 arm64_set_ssbd_mitigation(false);
1120         } else {
1121                 arm64_set_ssbd_mitigation(true);
1122         }
1123 }
1124 #endif /* CONFIG_ARM64_SSBD */
1125
1126 #ifdef CONFIG_ARM64_PAN
1127 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
1128 {
1129         /*
1130          * We modify PSTATE. This won't work from irq context as the PSTATE
1131          * is discarded once we return from the exception.
1132          */
1133         WARN_ON_ONCE(in_interrupt());
1134
1135         sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
1136         asm(SET_PSTATE_PAN(1));
1137 }
1138 #endif /* CONFIG_ARM64_PAN */
1139
1140 #ifdef CONFIG_ARM64_RAS_EXTN
1141 static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1142 {
1143         /* Firmware may have left a deferred SError in this register. */
1144         write_sysreg_s(0, SYS_DISR_EL1);
1145 }
1146 #endif /* CONFIG_ARM64_RAS_EXTN */
1147
1148 static const struct arm64_cpu_capabilities arm64_features[] = {
1149         {
1150                 .desc = "GIC system register CPU interface",
1151                 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
1152                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1153                 .matches = has_useable_gicv3_cpuif,
1154                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1155                 .field_pos = ID_AA64PFR0_GIC_SHIFT,
1156                 .sign = FTR_UNSIGNED,
1157                 .min_field_value = 1,
1158         },
1159 #ifdef CONFIG_ARM64_PAN
1160         {
1161                 .desc = "Privileged Access Never",
1162                 .capability = ARM64_HAS_PAN,
1163                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1164                 .matches = has_cpuid_feature,
1165                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1166                 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
1167                 .sign = FTR_UNSIGNED,
1168                 .min_field_value = 1,
1169                 .cpu_enable = cpu_enable_pan,
1170         },
1171 #endif /* CONFIG_ARM64_PAN */
1172 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
1173         {
1174                 .desc = "LSE atomic instructions",
1175                 .capability = ARM64_HAS_LSE_ATOMICS,
1176                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1177                 .matches = has_cpuid_feature,
1178                 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1179                 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
1180                 .sign = FTR_UNSIGNED,
1181                 .min_field_value = 2,
1182         },
1183 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
1184         {
1185                 .desc = "Software prefetching using PRFM",
1186                 .capability = ARM64_HAS_NO_HW_PREFETCH,
1187                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1188                 .matches = has_no_hw_prefetch,
1189         },
1190 #ifdef CONFIG_ARM64_UAO
1191         {
1192                 .desc = "User Access Override",
1193                 .capability = ARM64_HAS_UAO,
1194                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1195                 .matches = has_cpuid_feature,
1196                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1197                 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
1198                 .min_field_value = 1,
1199                 /*
1200                  * We rely on stop_machine() calling uao_thread_switch() to set
1201                  * UAO immediately after patching.
1202                  */
1203         },
1204 #endif /* CONFIG_ARM64_UAO */
1205 #ifdef CONFIG_ARM64_PAN
1206         {
1207                 .capability = ARM64_ALT_PAN_NOT_UAO,
1208                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1209                 .matches = cpufeature_pan_not_uao,
1210         },
1211 #endif /* CONFIG_ARM64_PAN */
1212 #ifdef CONFIG_ARM64_VHE
1213         {
1214                 .desc = "Virtualization Host Extensions",
1215                 .capability = ARM64_HAS_VIRT_HOST_EXTN,
1216                 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1217                 .matches = runs_at_el2,
1218                 .cpu_enable = cpu_copy_el2regs,
1219         },
1220 #endif  /* CONFIG_ARM64_VHE */
1221         {
1222                 .desc = "32-bit EL0 Support",
1223                 .capability = ARM64_HAS_32BIT_EL0,
1224                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1225                 .matches = has_cpuid_feature,
1226                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1227                 .sign = FTR_UNSIGNED,
1228                 .field_pos = ID_AA64PFR0_EL0_SHIFT,
1229                 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
1230         },
1231 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1232         {
1233                 .desc = "Kernel page table isolation (KPTI)",
1234                 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
1235                 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1236                 /*
1237                  * The ID feature fields below are used to indicate that
1238                  * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
1239                  * more details.
1240                  */
1241                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1242                 .field_pos = ID_AA64PFR0_CSV3_SHIFT,
1243                 .min_field_value = 1,
1244                 .matches = unmap_kernel_at_el0,
1245                 .cpu_enable = kpti_install_ng_mappings,
1246         },
1247 #endif
1248         {
1249                 /* FP/SIMD is not implemented */
1250                 .capability = ARM64_HAS_NO_FPSIMD,
1251                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1252                 .min_field_value = 0,
1253                 .matches = has_no_fpsimd,
1254         },
1255 #ifdef CONFIG_ARM64_PMEM
1256         {
1257                 .desc = "Data cache clean to Point of Persistence",
1258                 .capability = ARM64_HAS_DCPOP,
1259                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1260                 .matches = has_cpuid_feature,
1261                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1262                 .field_pos = ID_AA64ISAR1_DPB_SHIFT,
1263                 .min_field_value = 1,
1264         },
1265 #endif
1266 #ifdef CONFIG_ARM64_SVE
1267         {
1268                 .desc = "Scalable Vector Extension",
1269                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1270                 .capability = ARM64_SVE,
1271                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1272                 .sign = FTR_UNSIGNED,
1273                 .field_pos = ID_AA64PFR0_SVE_SHIFT,
1274                 .min_field_value = ID_AA64PFR0_SVE,
1275                 .matches = has_cpuid_feature,
1276                 .cpu_enable = sve_kernel_enable,
1277         },
1278 #endif /* CONFIG_ARM64_SVE */
1279 #ifdef CONFIG_ARM64_RAS_EXTN
1280         {
1281                 .desc = "RAS Extension Support",
1282                 .capability = ARM64_HAS_RAS_EXTN,
1283                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1284                 .matches = has_cpuid_feature,
1285                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1286                 .sign = FTR_UNSIGNED,
1287                 .field_pos = ID_AA64PFR0_RAS_SHIFT,
1288                 .min_field_value = ID_AA64PFR0_RAS_V1,
1289                 .cpu_enable = cpu_clear_disr,
1290         },
1291 #endif /* CONFIG_ARM64_RAS_EXTN */
1292         {
1293                 .desc = "Data cache clean to the PoU not required for I/D coherence",
1294                 .capability = ARM64_HAS_CACHE_IDC,
1295                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1296                 .matches = has_cache_idc,
1297                 .cpu_enable = cpu_emulate_effective_ctr,
1298         },
1299         {
1300                 .desc = "Instruction cache invalidation not required for I/D coherence",
1301                 .capability = ARM64_HAS_CACHE_DIC,
1302                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1303                 .matches = has_cache_dic,
1304         },
1305         {
1306                 .desc = "Stage-2 Force Write-Back",
1307                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1308                 .capability = ARM64_HAS_STAGE2_FWB,
1309                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1310                 .sign = FTR_UNSIGNED,
1311                 .field_pos = ID_AA64MMFR2_FWB_SHIFT,
1312                 .min_field_value = 1,
1313                 .matches = has_cpuid_feature,
1314                 .cpu_enable = cpu_has_fwb,
1315         },
1316 #ifdef CONFIG_ARM64_HW_AFDBM
1317         {
1318                 /*
1319                  * Since we turn this on always, we don't want the user to
1320                  * think that the feature is available when it may not be.
1321                  * So hide the description.
1322                  *
1323                  * .desc = "Hardware pagetable Dirty Bit Management",
1324                  *
1325                  */
1326                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1327                 .capability = ARM64_HW_DBM,
1328                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1329                 .sign = FTR_UNSIGNED,
1330                 .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
1331                 .min_field_value = 2,
1332                 .matches = has_hw_dbm,
1333                 .cpu_enable = cpu_enable_hw_dbm,
1334         },
1335 #endif
1336 #ifdef CONFIG_ARM64_SSBD
1337         {
1338                 .desc = "CRC32 instructions",
1339                 .capability = ARM64_HAS_CRC32,
1340                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1341                 .matches = has_cpuid_feature,
1342                 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1343                 .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
1344                 .min_field_value = 1,
1345         },
1346         {
1347                 .desc = "Speculative Store Bypassing Safe (SSBS)",
1348                 .capability = ARM64_SSBS,
1349                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1350                 .matches = has_cpuid_feature,
1351                 .sys_reg = SYS_ID_AA64PFR1_EL1,
1352                 .field_pos = ID_AA64PFR1_SSBS_SHIFT,
1353                 .sign = FTR_UNSIGNED,
1354                 .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
1355                 .cpu_enable = cpu_enable_ssbs,
1356         },
1357 #endif
1358 #ifdef CONFIG_ARM64_CNP
1359         {
1360                 .desc = "Common not Private translations",
1361                 .capability = ARM64_HAS_CNP,
1362                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1363                 .matches = has_useable_cnp,
1364                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1365                 .sign = FTR_UNSIGNED,
1366                 .field_pos = ID_AA64MMFR2_CNP_SHIFT,
1367                 .min_field_value = 1,
1368                 .cpu_enable = cpu_enable_cnp,
1369         },
1370 #endif
1371         {},
1372 };
1373
1374 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)      \
1375         {                                                       \
1376                 .desc = #cap,                                   \
1377                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,            \
1378                 .matches = has_cpuid_feature,                   \
1379                 .sys_reg = reg,                                 \
1380                 .field_pos = field,                             \
1381                 .sign = s,                                      \
1382                 .min_field_value = min_value,                   \
1383                 .hwcap_type = cap_type,                         \
1384                 .hwcap = cap,                                   \
1385         }
1386
1387 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1388         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
1389         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
1390         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
1391         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
1392         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
1393         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
1394         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
1395         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
1396         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
1397         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
1398         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
1399         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
1400         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
1401         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
1402         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
1403         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
1404         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
1405         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
1406         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
1407         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
1408         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
1409         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
1410         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
1411         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
1412         HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
1413 #ifdef CONFIG_ARM64_SVE
1414         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
1415 #endif
1416         HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
1417         {},
1418 };
1419
1420 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1421 #ifdef CONFIG_COMPAT
1422         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1423         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1424         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1425         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1426         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1427 #endif
1428         {},
1429 };
1430
1431 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1432 {
1433         switch (cap->hwcap_type) {
1434         case CAP_HWCAP:
1435                 elf_hwcap |= cap->hwcap;
1436                 break;
1437 #ifdef CONFIG_COMPAT
1438         case CAP_COMPAT_HWCAP:
1439                 compat_elf_hwcap |= (u32)cap->hwcap;
1440                 break;
1441         case CAP_COMPAT_HWCAP2:
1442                 compat_elf_hwcap2 |= (u32)cap->hwcap;
1443                 break;
1444 #endif
1445         default:
1446                 WARN_ON(1);
1447                 break;
1448         }
1449 }
1450
1451 /* Check if we have a particular HWCAP enabled */
1452 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1453 {
1454         bool rc;
1455
1456         switch (cap->hwcap_type) {
1457         case CAP_HWCAP:
1458                 rc = (elf_hwcap & cap->hwcap) != 0;
1459                 break;
1460 #ifdef CONFIG_COMPAT
1461         case CAP_COMPAT_HWCAP:
1462                 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1463                 break;
1464         case CAP_COMPAT_HWCAP2:
1465                 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1466                 break;
1467 #endif
1468         default:
1469                 WARN_ON(1);
1470                 rc = false;
1471         }
1472
1473         return rc;
1474 }
1475
1476 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1477 {
1478         /* We support emulation of accesses to CPU ID feature registers */
1479         elf_hwcap |= HWCAP_CPUID;
1480         for (; hwcaps->matches; hwcaps++)
1481                 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
1482                         cap_set_elf_hwcap(hwcaps);
1483 }
1484
1485 /*
1486  * Check if the current CPU has a given feature capability.
1487  * Should be called from non-preemptible context.
1488  */
1489 static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
1490                                unsigned int cap)
1491 {
1492         const struct arm64_cpu_capabilities *caps;
1493
1494         if (WARN_ON(preemptible()))
1495                 return false;
1496
1497         for (caps = cap_array; caps->matches; caps++)
1498                 if (caps->capability == cap)
1499                         return caps->matches(caps, SCOPE_LOCAL_CPU);
1500
1501         return false;
1502 }
1503
1504 static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1505                                       u16 scope_mask, const char *info)
1506 {
1507         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1508         for (; caps->matches; caps++) {
1509                 if (!(caps->type & scope_mask) ||
1510                     !caps->matches(caps, cpucap_default_scope(caps)))
1511                         continue;
1512
1513                 if (!cpus_have_cap(caps->capability) && caps->desc)
1514                         pr_info("%s %s\n", info, caps->desc);
1515                 cpus_set_cap(caps->capability);
1516         }
1517 }
1518
1519 static void update_cpu_capabilities(u16 scope_mask)
1520 {
1521         __update_cpu_capabilities(arm64_errata, scope_mask,
1522                                   "enabling workaround for");
1523         __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1524 }
1525
1526 static int __enable_cpu_capability(void *arg)
1527 {
1528         const struct arm64_cpu_capabilities *cap = arg;
1529
1530         cap->cpu_enable(cap);
1531         return 0;
1532 }
1533
1534 /*
1535  * Run through the enabled capabilities and enable() it on all active
1536  * CPUs
1537  */
1538 static void __init
1539 __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1540                           u16 scope_mask)
1541 {
1542         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1543         for (; caps->matches; caps++) {
1544                 unsigned int num = caps->capability;
1545
1546                 if (!(caps->type & scope_mask) || !cpus_have_cap(num))
1547                         continue;
1548
1549                 /* Ensure cpus_have_const_cap(num) works */
1550                 static_branch_enable(&cpu_hwcap_keys[num]);
1551
1552                 if (caps->cpu_enable) {
1553                         /*
1554                          * Capabilities with SCOPE_BOOT_CPU scope are finalised
1555                          * before any secondary CPU boots. Thus, each secondary
1556                          * will enable the capability as appropriate via
1557                          * check_local_cpu_capabilities(). The only exception is
1558                          * the boot CPU, for which the capability must be
1559                          * enabled here. This approach avoids costly
1560                          * stop_machine() calls for this case.
1561                          *
1562                          * Otherwise, use stop_machine() as it schedules the
1563                          * work allowing us to modify PSTATE, instead of
1564                          * on_each_cpu() which uses an IPI, giving us a PSTATE
1565                          * that disappears when we return.
1566                          */
1567                         if (scope_mask & SCOPE_BOOT_CPU)
1568                                 caps->cpu_enable(caps);
1569                         else
1570                                 stop_machine(__enable_cpu_capability,
1571                                              (void *)caps, cpu_online_mask);
1572                 }
1573         }
1574 }
1575
1576 static void __init enable_cpu_capabilities(u16 scope_mask)
1577 {
1578         __enable_cpu_capabilities(arm64_errata, scope_mask);
1579         __enable_cpu_capabilities(arm64_features, scope_mask);
1580 }
1581
1582 /*
1583  * Run through the list of capabilities to check for conflicts.
1584  * If the system has already detected a capability, take necessary
1585  * action on this CPU.
1586  *
1587  * Returns "false" on conflicts.
1588  */
1589 static bool
1590 __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
1591                         u16 scope_mask)
1592 {
1593         bool cpu_has_cap, system_has_cap;
1594
1595         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1596
1597         for (; caps->matches; caps++) {
1598                 if (!(caps->type & scope_mask))
1599                         continue;
1600
1601                 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
1602                 system_has_cap = cpus_have_cap(caps->capability);
1603
1604                 if (system_has_cap) {
1605                         /*
1606                          * Check if the new CPU misses an advertised feature,
1607                          * which is not safe to miss.
1608                          */
1609                         if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
1610                                 break;
1611                         /*
1612                          * We have to issue cpu_enable() irrespective of
1613                          * whether the CPU has it or not, as it is enabeld
1614                          * system wide. It is upto the call back to take
1615                          * appropriate action on this CPU.
1616                          */
1617                         if (caps->cpu_enable)
1618                                 caps->cpu_enable(caps);
1619                 } else {
1620                         /*
1621                          * Check if the CPU has this capability if it isn't
1622                          * safe to have when the system doesn't.
1623                          */
1624                         if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
1625                                 break;
1626                 }
1627         }
1628
1629         if (caps->matches) {
1630                 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
1631                         smp_processor_id(), caps->capability,
1632                         caps->desc, system_has_cap, cpu_has_cap);
1633                 return false;
1634         }
1635
1636         return true;
1637 }
1638
1639 static bool verify_local_cpu_caps(u16 scope_mask)
1640 {
1641         return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
1642                __verify_local_cpu_caps(arm64_features, scope_mask);
1643 }
1644
1645 /*
1646  * Check for CPU features that are used in early boot
1647  * based on the Boot CPU value.
1648  */
1649 static void check_early_cpu_features(void)
1650 {
1651         verify_cpu_asid_bits();
1652         /*
1653          * Early features are used by the kernel already. If there
1654          * is a conflict, we cannot proceed further.
1655          */
1656         if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
1657                 cpu_panic_kernel();
1658 }
1659
1660 static void
1661 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1662 {
1663
1664         for (; caps->matches; caps++)
1665                 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1666                         pr_crit("CPU%d: missing HWCAP: %s\n",
1667                                         smp_processor_id(), caps->desc);
1668                         cpu_die_early();
1669                 }
1670 }
1671
1672 static void verify_sve_features(void)
1673 {
1674         u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
1675         u64 zcr = read_zcr_features();
1676
1677         unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
1678         unsigned int len = zcr & ZCR_ELx_LEN_MASK;
1679
1680         if (len < safe_len || sve_verify_vq_map()) {
1681                 pr_crit("CPU%d: SVE: required vector length(s) missing\n",
1682                         smp_processor_id());
1683                 cpu_die_early();
1684         }
1685
1686         /* Add checks on other ZCR bits here if necessary */
1687 }
1688
1689
1690 /*
1691  * Run through the enabled system capabilities and enable() it on this CPU.
1692  * The capabilities were decided based on the available CPUs at the boot time.
1693  * Any new CPU should match the system wide status of the capability. If the
1694  * new CPU doesn't have a capability which the system now has enabled, we
1695  * cannot do anything to fix it up and could cause unexpected failures. So
1696  * we park the CPU.
1697  */
1698 static void verify_local_cpu_capabilities(void)
1699 {
1700         /*
1701          * The capabilities with SCOPE_BOOT_CPU are checked from
1702          * check_early_cpu_features(), as they need to be verified
1703          * on all secondary CPUs.
1704          */
1705         if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
1706                 cpu_die_early();
1707
1708         verify_local_elf_hwcaps(arm64_elf_hwcaps);
1709
1710         if (system_supports_32bit_el0())
1711                 verify_local_elf_hwcaps(compat_elf_hwcaps);
1712
1713         if (system_supports_sve())
1714                 verify_sve_features();
1715 }
1716
1717 void check_local_cpu_capabilities(void)
1718 {
1719         /*
1720          * All secondary CPUs should conform to the early CPU features
1721          * in use by the kernel based on boot CPU.
1722          */
1723         check_early_cpu_features();
1724
1725         /*
1726          * If we haven't finalised the system capabilities, this CPU gets
1727          * a chance to update the errata work arounds and local features.
1728          * Otherwise, this CPU should verify that it has all the system
1729          * advertised capabilities.
1730          */
1731         if (!sys_caps_initialised)
1732                 update_cpu_capabilities(SCOPE_LOCAL_CPU);
1733         else
1734                 verify_local_cpu_capabilities();
1735 }
1736
1737 static void __init setup_boot_cpu_capabilities(void)
1738 {
1739         /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
1740         update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
1741         /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
1742         enable_cpu_capabilities(SCOPE_BOOT_CPU);
1743 }
1744
1745 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1746 EXPORT_SYMBOL(arm64_const_caps_ready);
1747
1748 static void __init mark_const_caps_ready(void)
1749 {
1750         static_branch_enable(&arm64_const_caps_ready);
1751 }
1752
1753 extern const struct arm64_cpu_capabilities arm64_errata[];
1754
1755 bool this_cpu_has_cap(unsigned int cap)
1756 {
1757         return (__this_cpu_has_cap(arm64_features, cap) ||
1758                 __this_cpu_has_cap(arm64_errata, cap));
1759 }
1760
1761 static void __init setup_system_capabilities(void)
1762 {
1763         /*
1764          * We have finalised the system-wide safe feature
1765          * registers, finalise the capabilities that depend
1766          * on it. Also enable all the available capabilities,
1767          * that are not enabled already.
1768          */
1769         update_cpu_capabilities(SCOPE_SYSTEM);
1770         enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
1771 }
1772
1773 void __init setup_cpu_features(void)
1774 {
1775         u32 cwg;
1776
1777         setup_system_capabilities();
1778         mark_const_caps_ready();
1779         setup_elf_hwcaps(arm64_elf_hwcaps);
1780
1781         if (system_supports_32bit_el0())
1782                 setup_elf_hwcaps(compat_elf_hwcaps);
1783
1784         if (system_uses_ttbr0_pan())
1785                 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
1786
1787         sve_setup();
1788         minsigstksz_setup();
1789
1790         /* Advertise that we have computed the system capabilities */
1791         set_sys_caps_initialised();
1792
1793         /*
1794          * Check for sane CTR_EL0.CWG value.
1795          */
1796         cwg = cache_type_cwg();
1797         if (!cwg)
1798                 pr_warn("No Cache Writeback Granule information, assuming %d\n",
1799                         ARCH_DMA_MINALIGN);
1800 }
1801
1802 static bool __maybe_unused
1803 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1804 {
1805         return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
1806 }
1807
1808 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
1809 {
1810         cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
1811 }
1812
1813 /*
1814  * We emulate only the following system register space.
1815  * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
1816  * See Table C5-6 System instruction encodings for System register accesses,
1817  * ARMv8 ARM(ARM DDI 0487A.f) for more details.
1818  */
1819 static inline bool __attribute_const__ is_emulated(u32 id)
1820 {
1821         return (sys_reg_Op0(id) == 0x3 &&
1822                 sys_reg_CRn(id) == 0x0 &&
1823                 sys_reg_Op1(id) == 0x0 &&
1824                 (sys_reg_CRm(id) == 0 ||
1825                  ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
1826 }
1827
1828 /*
1829  * With CRm == 0, reg should be one of :
1830  * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
1831  */
1832 static inline int emulate_id_reg(u32 id, u64 *valp)
1833 {
1834         switch (id) {
1835         case SYS_MIDR_EL1:
1836                 *valp = read_cpuid_id();
1837                 break;
1838         case SYS_MPIDR_EL1:
1839                 *valp = SYS_MPIDR_SAFE_VAL;
1840                 break;
1841         case SYS_REVIDR_EL1:
1842                 /* IMPLEMENTATION DEFINED values are emulated with 0 */
1843                 *valp = 0;
1844                 break;
1845         default:
1846                 return -EINVAL;
1847         }
1848
1849         return 0;
1850 }
1851
1852 static int emulate_sys_reg(u32 id, u64 *valp)
1853 {
1854         struct arm64_ftr_reg *regp;
1855
1856         if (!is_emulated(id))
1857                 return -EINVAL;
1858
1859         if (sys_reg_CRm(id) == 0)
1860                 return emulate_id_reg(id, valp);
1861
1862         regp = get_arm64_ftr_reg(id);
1863         if (regp)
1864                 *valp = arm64_ftr_reg_user_value(regp);
1865         else
1866                 /*
1867                  * The untracked registers are either IMPLEMENTATION DEFINED
1868                  * (e.g, ID_AFR0_EL1) or reserved RAZ.
1869                  */
1870                 *valp = 0;
1871         return 0;
1872 }
1873
1874 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
1875 {
1876         int rc;
1877         u64 val;
1878
1879         rc = emulate_sys_reg(sys_reg, &val);
1880         if (!rc) {
1881                 pt_regs_write_reg(regs, rt, val);
1882                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1883         }
1884         return rc;
1885 }
1886
1887 static int emulate_mrs(struct pt_regs *regs, u32 insn)
1888 {
1889         u32 sys_reg, rt;
1890
1891         /*
1892          * sys_reg values are defined as used in mrs/msr instruction.
1893          * shift the imm value to get the encoding.
1894          */
1895         sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
1896         rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
1897         return do_emulate_mrs(regs, sys_reg, rt);
1898 }
1899
1900 static struct undef_hook mrs_hook = {
1901         .instr_mask = 0xfff00000,
1902         .instr_val  = 0xd5300000,
1903         .pstate_mask = PSR_AA32_MODE_MASK,
1904         .pstate_val = PSR_MODE_EL0t,
1905         .fn = emulate_mrs,
1906 };
1907
1908 static int __init enable_mrs_emulation(void)
1909 {
1910         register_undef_hook(&mrs_hook);
1911         return 0;
1912 }
1913
1914 core_initcall(enable_mrs_emulation);