1 // SPDX-License-Identifier: GPL-2.0
3 * Check for KVM_GET_REG_LIST regressions.
5 * Copyright (c) 2023 Intel Corporation
10 #include "test_util.h"
11 #include "processor.h"
13 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
16 VCPU_FEATURE_ISA_EXT = 0,
20 static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX];
22 bool filter_reg(__u64 reg)
24 switch (reg & ~REG_MASK) {
26 * Same set of ISA_EXT registers are not present on all host because
27 * ISA_EXT registers are visible to the KVM user space based on the
28 * ISA extensions available on the host. Also, disabling an ISA
29 * extension using corresponding ISA_EXT register does not affect
30 * the visibility of the ISA_EXT register itself.
32 * Based on above, we should filter-out all ISA_EXT registers.
34 * Note: The below list is alphabetically sorted.
36 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_A:
37 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_C:
38 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D:
39 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F:
40 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_H:
41 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_I:
42 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_M:
43 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_V:
44 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN:
45 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA:
46 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSTC:
47 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL:
48 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT:
49 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT:
50 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA:
51 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB:
52 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBS:
53 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM:
54 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ:
55 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICNTR:
56 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICOND:
57 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICSR:
58 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIFENCEI:
59 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
60 case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHPM:
62 * Like ISA_EXT registers, SBI_EXT registers are only visible when the
63 * host supports them and disabling them does not affect the visibility
64 * of the SBI_EXT register itself.
66 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01:
67 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME:
68 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI:
69 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE:
70 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST:
71 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM:
72 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU:
73 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN:
74 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA:
75 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL:
76 case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR:
78 /* AIA registers are always available when Ssaia can't be disabled */
79 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect):
80 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1):
81 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2):
82 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh):
83 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph):
84 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
85 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
86 return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA];
94 bool check_reject_set(int err)
99 static bool vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext_id)
104 ret = __vcpu_get_reg(vcpu, ext_id, &value);
105 return (ret) ? false : !!value;
108 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
110 unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
111 struct vcpu_reg_sublist *s;
115 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
116 __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]);
119 * Disable all extensions which were enabled by default
120 * if they were available in the risc-v host.
122 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
123 rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
124 if (rc && isa_ext_state[i])
125 isa_ext_cant_disable[i] = true;
128 for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
129 rc = __vcpu_set_reg(vcpu, RISCV_SBI_EXT_REG(i), 0);
130 TEST_ASSERT(!rc || (rc == -1 && errno == ENOENT), "Unexpected error");
133 for_each_sublist(c, s) {
137 switch (s->feature_type) {
138 case VCPU_FEATURE_ISA_EXT:
139 feature = RISCV_ISA_EXT_REG(s->feature);
141 case VCPU_FEATURE_SBI_EXT:
142 feature = RISCV_SBI_EXT_REG(s->feature);
145 TEST_FAIL("Unknown feature type");
148 /* Try to enable the desired extension */
149 __vcpu_set_reg(vcpu, feature, 1);
151 /* Double check whether the desired extension was enabled */
152 __TEST_REQUIRE(vcpu_has_ext(vcpu, feature),
153 "%s not available, skipping tests\n", s->name);
157 static const char *config_id_to_str(const char *prefix, __u64 id)
159 /* reg_off is the offset into struct kvm_riscv_config */
160 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
162 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG);
165 case KVM_REG_RISCV_CONFIG_REG(isa):
166 return "KVM_REG_RISCV_CONFIG_REG(isa)";
167 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
168 return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
169 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
170 return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
171 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
172 return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
173 case KVM_REG_RISCV_CONFIG_REG(marchid):
174 return "KVM_REG_RISCV_CONFIG_REG(marchid)";
175 case KVM_REG_RISCV_CONFIG_REG(mimpid):
176 return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
177 case KVM_REG_RISCV_CONFIG_REG(satp_mode):
178 return "KVM_REG_RISCV_CONFIG_REG(satp_mode)";
181 return strdup_printf("%lld /* UNKNOWN */", reg_off);
184 static const char *core_id_to_str(const char *prefix, __u64 id)
186 /* reg_off is the offset into struct kvm_riscv_core */
187 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
189 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE);
192 case KVM_REG_RISCV_CORE_REG(regs.pc):
193 return "KVM_REG_RISCV_CORE_REG(regs.pc)";
194 case KVM_REG_RISCV_CORE_REG(regs.ra):
195 return "KVM_REG_RISCV_CORE_REG(regs.ra)";
196 case KVM_REG_RISCV_CORE_REG(regs.sp):
197 return "KVM_REG_RISCV_CORE_REG(regs.sp)";
198 case KVM_REG_RISCV_CORE_REG(regs.gp):
199 return "KVM_REG_RISCV_CORE_REG(regs.gp)";
200 case KVM_REG_RISCV_CORE_REG(regs.tp):
201 return "KVM_REG_RISCV_CORE_REG(regs.tp)";
202 case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
203 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
204 reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
205 case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
206 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
207 reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
208 case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
209 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
210 reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
211 case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
212 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
213 reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
214 case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
215 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
216 reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
217 case KVM_REG_RISCV_CORE_REG(mode):
218 return "KVM_REG_RISCV_CORE_REG(mode)";
221 return strdup_printf("%lld /* UNKNOWN */", reg_off);
224 #define RISCV_CSR_GENERAL(csr) \
225 "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
226 #define RISCV_CSR_AIA(csr) \
227 "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
228 #define RISCV_CSR_SMSTATEEN(csr) \
229 "KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_REG(" #csr ")"
231 static const char *general_csr_id_to_str(__u64 reg_off)
233 /* reg_off is the offset into struct kvm_riscv_csr */
235 case KVM_REG_RISCV_CSR_REG(sstatus):
236 return RISCV_CSR_GENERAL(sstatus);
237 case KVM_REG_RISCV_CSR_REG(sie):
238 return RISCV_CSR_GENERAL(sie);
239 case KVM_REG_RISCV_CSR_REG(stvec):
240 return RISCV_CSR_GENERAL(stvec);
241 case KVM_REG_RISCV_CSR_REG(sscratch):
242 return RISCV_CSR_GENERAL(sscratch);
243 case KVM_REG_RISCV_CSR_REG(sepc):
244 return RISCV_CSR_GENERAL(sepc);
245 case KVM_REG_RISCV_CSR_REG(scause):
246 return RISCV_CSR_GENERAL(scause);
247 case KVM_REG_RISCV_CSR_REG(stval):
248 return RISCV_CSR_GENERAL(stval);
249 case KVM_REG_RISCV_CSR_REG(sip):
250 return RISCV_CSR_GENERAL(sip);
251 case KVM_REG_RISCV_CSR_REG(satp):
252 return RISCV_CSR_GENERAL(satp);
253 case KVM_REG_RISCV_CSR_REG(scounteren):
254 return RISCV_CSR_GENERAL(scounteren);
255 case KVM_REG_RISCV_CSR_REG(senvcfg):
256 return RISCV_CSR_GENERAL(senvcfg);
259 return strdup_printf("KVM_REG_RISCV_CSR_GENERAL | %lld /* UNKNOWN */", reg_off);
262 static const char *aia_csr_id_to_str(__u64 reg_off)
264 /* reg_off is the offset into struct kvm_riscv_aia_csr */
266 case KVM_REG_RISCV_CSR_AIA_REG(siselect):
267 return RISCV_CSR_AIA(siselect);
268 case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
269 return RISCV_CSR_AIA(iprio1);
270 case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
271 return RISCV_CSR_AIA(iprio2);
272 case KVM_REG_RISCV_CSR_AIA_REG(sieh):
273 return RISCV_CSR_AIA(sieh);
274 case KVM_REG_RISCV_CSR_AIA_REG(siph):
275 return RISCV_CSR_AIA(siph);
276 case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
277 return RISCV_CSR_AIA(iprio1h);
278 case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
279 return RISCV_CSR_AIA(iprio2h);
282 return strdup_printf("KVM_REG_RISCV_CSR_AIA | %lld /* UNKNOWN */", reg_off);
285 static const char *smstateen_csr_id_to_str(__u64 reg_off)
287 /* reg_off is the offset into struct kvm_riscv_smstateen_csr */
289 case KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0):
290 return RISCV_CSR_SMSTATEEN(sstateen0);
293 TEST_FAIL("Unknown smstateen csr reg: 0x%llx", reg_off);
297 static const char *csr_id_to_str(const char *prefix, __u64 id)
299 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
300 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
302 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR);
304 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
306 switch (reg_subtype) {
307 case KVM_REG_RISCV_CSR_GENERAL:
308 return general_csr_id_to_str(reg_off);
309 case KVM_REG_RISCV_CSR_AIA:
310 return aia_csr_id_to_str(reg_off);
311 case KVM_REG_RISCV_CSR_SMSTATEEN:
312 return smstateen_csr_id_to_str(reg_off);
315 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
318 static const char *timer_id_to_str(const char *prefix, __u64 id)
320 /* reg_off is the offset into struct kvm_riscv_timer */
321 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
323 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER);
326 case KVM_REG_RISCV_TIMER_REG(frequency):
327 return "KVM_REG_RISCV_TIMER_REG(frequency)";
328 case KVM_REG_RISCV_TIMER_REG(time):
329 return "KVM_REG_RISCV_TIMER_REG(time)";
330 case KVM_REG_RISCV_TIMER_REG(compare):
331 return "KVM_REG_RISCV_TIMER_REG(compare)";
332 case KVM_REG_RISCV_TIMER_REG(state):
333 return "KVM_REG_RISCV_TIMER_REG(state)";
336 return strdup_printf("%lld /* UNKNOWN */", reg_off);
339 static const char *fp_f_id_to_str(const char *prefix, __u64 id)
341 /* reg_off is the offset into struct __riscv_f_ext_state */
342 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
344 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F);
347 case KVM_REG_RISCV_FP_F_REG(f[0]) ...
348 KVM_REG_RISCV_FP_F_REG(f[31]):
349 return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
350 case KVM_REG_RISCV_FP_F_REG(fcsr):
351 return "KVM_REG_RISCV_FP_F_REG(fcsr)";
354 return strdup_printf("%lld /* UNKNOWN */", reg_off);
357 static const char *fp_d_id_to_str(const char *prefix, __u64 id)
359 /* reg_off is the offset into struct __riscv_d_ext_state */
360 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
362 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D);
365 case KVM_REG_RISCV_FP_D_REG(f[0]) ...
366 KVM_REG_RISCV_FP_D_REG(f[31]):
367 return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
368 case KVM_REG_RISCV_FP_D_REG(fcsr):
369 return "KVM_REG_RISCV_FP_D_REG(fcsr)";
372 return strdup_printf("%lld /* UNKNOWN */", reg_off);
375 #define KVM_ISA_EXT_ARR(ext) \
376 [KVM_RISCV_ISA_EXT_##ext] = "KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_" #ext
378 static const char *isa_ext_single_id_to_str(__u64 reg_off)
380 static const char * const kvm_isa_ext_reg_name[] = {
389 KVM_ISA_EXT_ARR(SMSTATEEN),
390 KVM_ISA_EXT_ARR(SSAIA),
391 KVM_ISA_EXT_ARR(SSTC),
392 KVM_ISA_EXT_ARR(SVINVAL),
393 KVM_ISA_EXT_ARR(SVNAPOT),
394 KVM_ISA_EXT_ARR(SVPBMT),
395 KVM_ISA_EXT_ARR(ZBA),
396 KVM_ISA_EXT_ARR(ZBB),
397 KVM_ISA_EXT_ARR(ZBS),
398 KVM_ISA_EXT_ARR(ZICBOM),
399 KVM_ISA_EXT_ARR(ZICBOZ),
400 KVM_ISA_EXT_ARR(ZICNTR),
401 KVM_ISA_EXT_ARR(ZICOND),
402 KVM_ISA_EXT_ARR(ZICSR),
403 KVM_ISA_EXT_ARR(ZIFENCEI),
404 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
405 KVM_ISA_EXT_ARR(ZIHPM),
408 if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name))
409 return strdup_printf("KVM_REG_RISCV_ISA_SINGLE | %lld /* UNKNOWN */", reg_off);
411 return kvm_isa_ext_reg_name[reg_off];
414 static const char *isa_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
416 const char *unknown = "";
418 if (reg_off > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
419 unknown = " /* UNKNOWN */";
421 switch (reg_subtype) {
422 case KVM_REG_RISCV_ISA_MULTI_EN:
423 return strdup_printf("KVM_REG_RISCV_ISA_MULTI_EN | %lld%s", reg_off, unknown);
424 case KVM_REG_RISCV_ISA_MULTI_DIS:
425 return strdup_printf("KVM_REG_RISCV_ISA_MULTI_DIS | %lld%s", reg_off, unknown);
428 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
431 static const char *isa_ext_id_to_str(const char *prefix, __u64 id)
433 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
434 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
436 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT);
438 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
440 switch (reg_subtype) {
441 case KVM_REG_RISCV_ISA_SINGLE:
442 return isa_ext_single_id_to_str(reg_off);
443 case KVM_REG_RISCV_ISA_MULTI_EN:
444 case KVM_REG_RISCV_ISA_MULTI_DIS:
445 return isa_ext_multi_id_to_str(reg_subtype, reg_off);
448 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
451 #define KVM_SBI_EXT_ARR(ext) \
452 [ext] = "KVM_REG_RISCV_SBI_SINGLE | " #ext
454 static const char *sbi_ext_single_id_to_str(__u64 reg_off)
456 /* reg_off is KVM_RISCV_SBI_EXT_ID */
457 static const char * const kvm_sbi_ext_reg_name[] = {
458 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_V01),
459 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_TIME),
460 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_IPI),
461 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_RFENCE),
462 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SRST),
463 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_HSM),
464 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_PMU),
465 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_STA),
466 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL),
467 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR),
468 KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN),
471 if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name))
472 return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
474 return kvm_sbi_ext_reg_name[reg_off];
477 static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
479 const char *unknown = "";
481 if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
482 unknown = " /* UNKNOWN */";
484 switch (reg_subtype) {
485 case KVM_REG_RISCV_SBI_MULTI_EN:
486 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld%s", reg_off, unknown);
487 case KVM_REG_RISCV_SBI_MULTI_DIS:
488 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld%s", reg_off, unknown);
491 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
494 static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
496 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
497 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
499 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_EXT);
501 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
503 switch (reg_subtype) {
504 case KVM_REG_RISCV_SBI_SINGLE:
505 return sbi_ext_single_id_to_str(reg_off);
506 case KVM_REG_RISCV_SBI_MULTI_EN:
507 case KVM_REG_RISCV_SBI_MULTI_DIS:
508 return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
511 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
514 static const char *sbi_sta_id_to_str(__u64 reg_off)
517 case 0: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo)";
518 case 1: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi)";
520 return strdup_printf("KVM_REG_RISCV_SBI_STA | %lld /* UNKNOWN */", reg_off);
523 static const char *sbi_id_to_str(const char *prefix, __u64 id)
525 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_STATE);
526 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
528 assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_STATE);
530 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
532 switch (reg_subtype) {
533 case KVM_REG_RISCV_SBI_STA:
534 return sbi_sta_id_to_str(reg_off);
537 return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
540 void print_reg(const char *prefix, __u64 id)
542 const char *reg_size = NULL;
544 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
545 "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
547 switch (id & KVM_REG_SIZE_MASK) {
548 case KVM_REG_SIZE_U32:
549 reg_size = "KVM_REG_SIZE_U32";
551 case KVM_REG_SIZE_U64:
552 reg_size = "KVM_REG_SIZE_U64";
554 case KVM_REG_SIZE_U128:
555 reg_size = "KVM_REG_SIZE_U128";
558 printf("\tKVM_REG_RISCV | (%lld << KVM_REG_SIZE_SHIFT) | 0x%llx /* UNKNOWN */,\n",
559 (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id & ~REG_MASK);
563 switch (id & KVM_REG_RISCV_TYPE_MASK) {
564 case KVM_REG_RISCV_CONFIG:
565 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
566 reg_size, config_id_to_str(prefix, id));
568 case KVM_REG_RISCV_CORE:
569 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
570 reg_size, core_id_to_str(prefix, id));
572 case KVM_REG_RISCV_CSR:
573 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
574 reg_size, csr_id_to_str(prefix, id));
576 case KVM_REG_RISCV_TIMER:
577 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
578 reg_size, timer_id_to_str(prefix, id));
580 case KVM_REG_RISCV_FP_F:
581 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
582 reg_size, fp_f_id_to_str(prefix, id));
584 case KVM_REG_RISCV_FP_D:
585 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
586 reg_size, fp_d_id_to_str(prefix, id));
588 case KVM_REG_RISCV_ISA_EXT:
589 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
590 reg_size, isa_ext_id_to_str(prefix, id));
592 case KVM_REG_RISCV_SBI_EXT:
593 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
594 reg_size, sbi_ext_id_to_str(prefix, id));
596 case KVM_REG_RISCV_SBI_STATE:
597 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_STATE | %s,\n",
598 reg_size, sbi_id_to_str(prefix, id));
601 printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,\n",
602 reg_size, id & ~REG_MASK);
608 * The current blessed list was primed with the output of kernel version
609 * v6.5-rc3 and then later updated with new registers.
611 static __u64 base_regs[] = {
612 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
613 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
614 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
615 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
616 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode),
617 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
618 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
619 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
620 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
621 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
622 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
623 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
624 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
625 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
626 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
627 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
628 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
629 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
630 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
631 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
632 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
633 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
634 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
635 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
636 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
637 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
638 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
639 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
640 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
641 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
642 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
643 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
644 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
645 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
646 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
647 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
648 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
649 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
650 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
651 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
652 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
653 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
654 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
655 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
656 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
657 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
658 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
659 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
660 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(senvcfg),
661 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
662 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
663 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
664 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
668 * The skips_set list registers that should skip set test.
669 * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
671 static __u64 base_skips_set[] = {
672 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
675 static __u64 sbi_base_regs[] = {
676 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
677 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
678 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
679 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
680 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
681 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
682 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
683 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
686 static __u64 sbi_sta_regs[] = {
687 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA,
688 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo),
689 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi),
692 static __u64 zicbom_regs[] = {
693 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
694 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM,
697 static __u64 zicboz_regs[] = {
698 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
699 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ,
702 static __u64 aia_regs[] = {
703 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
704 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
705 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
706 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
707 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
708 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
709 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
710 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA,
713 static __u64 smstateen_regs[] = {
714 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0),
715 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN,
718 static __u64 fp_f_regs[] = {
719 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
720 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
721 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
722 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
723 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
724 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
725 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
726 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
727 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
728 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
729 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
730 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
731 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
732 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
733 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
734 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
735 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
736 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
737 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
738 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
739 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
740 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
741 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
742 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
743 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
744 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
745 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
746 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
747 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
748 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
749 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
750 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
751 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
752 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F,
755 static __u64 fp_d_regs[] = {
756 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
757 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
758 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
759 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
760 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
761 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
762 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
763 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
764 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
765 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
766 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
767 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
768 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
769 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
770 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
771 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
772 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
773 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
774 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
775 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
776 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
777 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
778 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
779 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
780 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
781 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
782 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
783 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
784 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
785 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
786 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
787 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
788 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
789 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D,
792 #define SUBLIST_BASE \
793 {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
794 .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),}
795 #define SUBLIST_SBI_BASE \
796 {"sbi-base", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_V01, \
797 .regs = sbi_base_regs, .regs_n = ARRAY_SIZE(sbi_base_regs),}
798 #define SUBLIST_SBI_STA \
799 {"sbi-sta", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_STA, \
800 .regs = sbi_sta_regs, .regs_n = ARRAY_SIZE(sbi_sta_regs),}
801 #define SUBLIST_ZICBOM \
802 {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),}
803 #define SUBLIST_ZICBOZ \
804 {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),}
805 #define SUBLIST_AIA \
806 {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),}
807 #define SUBLIST_SMSTATEEN \
808 {"smstateen", .feature = KVM_RISCV_ISA_EXT_SMSTATEEN, .regs = smstateen_regs, .regs_n = ARRAY_SIZE(smstateen_regs),}
809 #define SUBLIST_FP_F \
810 {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
811 .regs_n = ARRAY_SIZE(fp_f_regs),}
812 #define SUBLIST_FP_D \
813 {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
814 .regs_n = ARRAY_SIZE(fp_d_regs),}
816 #define KVM_ISA_EXT_SIMPLE_CONFIG(ext, extu) \
817 static __u64 regs_##ext[] = { \
818 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | \
819 KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | \
820 KVM_RISCV_ISA_EXT_##extu, \
822 static struct vcpu_reg_list config_##ext = { \
827 .feature = KVM_RISCV_ISA_EXT_##extu, \
828 .regs = regs_##ext, \
829 .regs_n = ARRAY_SIZE(regs_##ext), \
835 #define KVM_SBI_EXT_SIMPLE_CONFIG(ext, extu) \
836 static __u64 regs_sbi_##ext[] = { \
837 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | \
838 KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | \
839 KVM_RISCV_SBI_EXT_##extu, \
841 static struct vcpu_reg_list config_sbi_##ext = { \
845 .name = "sbi-"#ext, \
846 .feature_type = VCPU_FEATURE_SBI_EXT, \
847 .feature = KVM_RISCV_SBI_EXT_##extu, \
848 .regs = regs_sbi_##ext, \
849 .regs_n = ARRAY_SIZE(regs_sbi_##ext), \
855 #define KVM_ISA_EXT_SUBLIST_CONFIG(ext, extu) \
856 static struct vcpu_reg_list config_##ext = { \
864 #define KVM_SBI_EXT_SUBLIST_CONFIG(ext, extu) \
865 static struct vcpu_reg_list config_sbi_##ext = { \
868 SUBLIST_SBI_##extu, \
873 /* Note: The below list is alphabetically sorted. */
875 KVM_SBI_EXT_SUBLIST_CONFIG(base, BASE);
876 KVM_SBI_EXT_SUBLIST_CONFIG(sta, STA);
877 KVM_SBI_EXT_SIMPLE_CONFIG(pmu, PMU);
878 KVM_SBI_EXT_SIMPLE_CONFIG(dbcn, DBCN);
880 KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA);
881 KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F);
882 KVM_ISA_EXT_SUBLIST_CONFIG(fp_d, FP_D);
883 KVM_ISA_EXT_SIMPLE_CONFIG(h, H);
884 KVM_ISA_EXT_SUBLIST_CONFIG(smstateen, SMSTATEEN);
885 KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC);
886 KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL);
887 KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT);
888 KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT);
889 KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA);
890 KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB);
891 KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS);
892 KVM_ISA_EXT_SUBLIST_CONFIG(zicbom, ZICBOM);
893 KVM_ISA_EXT_SUBLIST_CONFIG(zicboz, ZICBOZ);
894 KVM_ISA_EXT_SIMPLE_CONFIG(zicntr, ZICNTR);
895 KVM_ISA_EXT_SIMPLE_CONFIG(zicond, ZICOND);
896 KVM_ISA_EXT_SIMPLE_CONFIG(zicsr, ZICSR);
897 KVM_ISA_EXT_SIMPLE_CONFIG(zifencei, ZIFENCEI);
898 KVM_ISA_EXT_SIMPLE_CONFIG(zihintpause, ZIHINTPAUSE);
899 KVM_ISA_EXT_SIMPLE_CONFIG(zihpm, ZIHPM);
901 struct vcpu_reg_list *vcpu_configs[] = {
927 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);