1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kdebug.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <linux/sched/signal.h>
19 #include <linux/kvm_host.h>
21 #include <asm/hwcap.h>
23 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
24 KVM_GENERIC_VCPU_STATS(),
25 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
26 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
27 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
28 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
29 STATS_DESC_COUNTER(VCPU, csr_exit_user),
30 STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
31 STATS_DESC_COUNTER(VCPU, exits)
34 const struct kvm_stats_header kvm_vcpu_stats_header = {
35 .name_size = KVM_STATS_NAME_SIZE,
36 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
37 .id_offset = sizeof(struct kvm_stats_header),
38 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
39 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
40 sizeof(kvm_vcpu_stats_desc),
43 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
45 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
46 static const unsigned long kvm_isa_ext_arr[] = {
58 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
62 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
63 if (kvm_isa_ext_arr[i] == base_ext)
67 return KVM_RISCV_ISA_EXT_MAX;
70 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
73 case KVM_RISCV_ISA_EXT_H:
82 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
85 case KVM_RISCV_ISA_EXT_A:
86 case KVM_RISCV_ISA_EXT_C:
87 case KVM_RISCV_ISA_EXT_I:
88 case KVM_RISCV_ISA_EXT_M:
89 case KVM_RISCV_ISA_EXT_SSTC:
98 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
100 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
101 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
102 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
103 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
107 * The preemption should be disabled here because it races with
108 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
109 * also calls vcpu_load/put.
112 loaded = (vcpu->cpu != -1);
114 kvm_arch_vcpu_put(vcpu);
116 vcpu->arch.last_exit_cpu = -1;
118 memcpy(csr, reset_csr, sizeof(*csr));
120 memcpy(cntx, reset_cntx, sizeof(*cntx));
122 kvm_riscv_vcpu_fp_reset(vcpu);
124 kvm_riscv_vcpu_timer_reset(vcpu);
126 WRITE_ONCE(vcpu->arch.irqs_pending, 0);
127 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
129 vcpu->arch.hfence_head = 0;
130 vcpu->arch.hfence_tail = 0;
131 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
133 /* Reset the guest CSRs for hotplug usecase */
135 kvm_arch_vcpu_load(vcpu, smp_processor_id());
139 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
144 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
146 struct kvm_cpu_context *cntx;
147 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
148 unsigned long host_isa, i;
150 /* Mark this VCPU never ran */
151 vcpu->arch.ran_atleast_once = false;
152 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
153 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
155 /* Setup ISA features available to VCPU */
156 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
157 host_isa = kvm_isa_ext_arr[i];
158 if (__riscv_isa_extension_available(NULL, host_isa) &&
159 kvm_riscv_vcpu_isa_enable_allowed(i))
160 set_bit(host_isa, vcpu->arch.isa);
163 /* Setup VCPU hfence queue */
164 spin_lock_init(&vcpu->arch.hfence_lock);
166 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
167 cntx = &vcpu->arch.guest_reset_context;
168 cntx->sstatus = SR_SPP | SR_SPIE;
170 cntx->hstatus |= HSTATUS_VTW;
171 cntx->hstatus |= HSTATUS_SPVP;
172 cntx->hstatus |= HSTATUS_SPV;
174 /* By default, make CY, TM, and IR counters accessible in VU mode */
175 reset_csr->scounteren = 0x7;
177 /* Setup VCPU timer */
178 kvm_riscv_vcpu_timer_init(vcpu);
181 kvm_riscv_reset_vcpu(vcpu);
186 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
189 * vcpu with id 0 is the designated boot cpu.
190 * Keep all vcpus with non-zero id in power-off state so that
191 * they can be brought up using SBI HSM extension.
193 if (vcpu->vcpu_idx != 0)
194 kvm_riscv_vcpu_power_off(vcpu);
197 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
199 /* Cleanup VCPU timer */
200 kvm_riscv_vcpu_timer_deinit(vcpu);
202 /* Free unused pages pre-allocated for G-stage page table mappings */
203 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
206 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
208 return kvm_riscv_vcpu_timer_pending(vcpu);
211 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
215 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
219 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
221 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
222 !vcpu->arch.power_off && !vcpu->arch.pause);
225 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
227 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
230 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
232 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
235 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
237 return VM_FAULT_SIGBUS;
240 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
241 const struct kvm_one_reg *reg)
243 unsigned long __user *uaddr =
244 (unsigned long __user *)(unsigned long)reg->addr;
245 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
247 KVM_REG_RISCV_CONFIG);
248 unsigned long reg_val;
250 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
254 case KVM_REG_RISCV_CONFIG_REG(isa):
255 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
261 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
267 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
268 const struct kvm_one_reg *reg)
270 unsigned long __user *uaddr =
271 (unsigned long __user *)(unsigned long)reg->addr;
272 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
274 KVM_REG_RISCV_CONFIG);
275 unsigned long i, isa_ext, reg_val;
277 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
280 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
283 /* This ONE REG interface is only defined for single letter extensions */
284 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
288 case KVM_REG_RISCV_CONFIG_REG(isa):
289 if (!vcpu->arch.ran_atleast_once) {
290 /* Ignore the enable/disable request for certain extensions */
291 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
292 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
293 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
297 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
298 if (reg_val & BIT(i))
300 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
301 if (!(reg_val & BIT(i)))
304 reg_val &= riscv_isa_extension_base(NULL);
305 /* Do not modify anything beyond single letter extensions */
306 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
307 (reg_val & KVM_RISCV_BASE_ISA_MASK);
308 vcpu->arch.isa[0] = reg_val;
309 kvm_riscv_vcpu_fp_reset(vcpu);
321 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
322 const struct kvm_one_reg *reg)
324 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
325 unsigned long __user *uaddr =
326 (unsigned long __user *)(unsigned long)reg->addr;
327 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
330 unsigned long reg_val;
332 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
334 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
337 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
338 reg_val = cntx->sepc;
339 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
340 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
341 reg_val = ((unsigned long *)cntx)[reg_num];
342 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
343 reg_val = (cntx->sstatus & SR_SPP) ?
344 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
348 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
354 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
355 const struct kvm_one_reg *reg)
357 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
358 unsigned long __user *uaddr =
359 (unsigned long __user *)(unsigned long)reg->addr;
360 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
363 unsigned long reg_val;
365 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
367 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
370 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
373 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
374 cntx->sepc = reg_val;
375 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
376 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
377 ((unsigned long *)cntx)[reg_num] = reg_val;
378 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
379 if (reg_val == KVM_RISCV_MODE_S)
380 cntx->sstatus |= SR_SPP;
382 cntx->sstatus &= ~SR_SPP;
389 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
390 const struct kvm_one_reg *reg)
392 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
393 unsigned long __user *uaddr =
394 (unsigned long __user *)(unsigned long)reg->addr;
395 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
398 unsigned long reg_val;
400 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
402 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
405 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
406 kvm_riscv_vcpu_flush_interrupts(vcpu);
407 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
409 reg_val = ((unsigned long *)csr)[reg_num];
411 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
417 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
418 const struct kvm_one_reg *reg)
420 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
421 unsigned long __user *uaddr =
422 (unsigned long __user *)(unsigned long)reg->addr;
423 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
426 unsigned long reg_val;
428 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
430 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
433 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
436 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
437 reg_val &= VSIP_VALID_MASK;
438 reg_val <<= VSIP_TO_HVIP_SHIFT;
441 ((unsigned long *)csr)[reg_num] = reg_val;
443 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
444 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
449 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
450 const struct kvm_one_reg *reg)
452 unsigned long __user *uaddr =
453 (unsigned long __user *)(unsigned long)reg->addr;
454 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
456 KVM_REG_RISCV_ISA_EXT);
457 unsigned long reg_val = 0;
458 unsigned long host_isa_ext;
460 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
463 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
464 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
467 host_isa_ext = kvm_isa_ext_arr[reg_num];
468 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
469 reg_val = 1; /* Mark the given extension as available */
471 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
477 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
478 const struct kvm_one_reg *reg)
480 unsigned long __user *uaddr =
481 (unsigned long __user *)(unsigned long)reg->addr;
482 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
484 KVM_REG_RISCV_ISA_EXT);
485 unsigned long reg_val;
486 unsigned long host_isa_ext;
488 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
491 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
492 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
495 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
498 host_isa_ext = kvm_isa_ext_arr[reg_num];
499 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
502 if (!vcpu->arch.ran_atleast_once) {
504 * All multi-letter extension and a few single letter
505 * extension can be disabled
508 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
509 set_bit(host_isa_ext, vcpu->arch.isa);
511 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
512 clear_bit(host_isa_ext, vcpu->arch.isa);
515 kvm_riscv_vcpu_fp_reset(vcpu);
523 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
524 const struct kvm_one_reg *reg)
526 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
527 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
528 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
529 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
530 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
531 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
532 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
533 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
534 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
535 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
537 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
538 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
540 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
541 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
546 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
547 const struct kvm_one_reg *reg)
549 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
550 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
551 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
552 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
553 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
554 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
555 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
556 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
557 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
558 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
560 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
561 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
563 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
564 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
569 long kvm_arch_vcpu_async_ioctl(struct file *filp,
570 unsigned int ioctl, unsigned long arg)
572 struct kvm_vcpu *vcpu = filp->private_data;
573 void __user *argp = (void __user *)arg;
575 if (ioctl == KVM_INTERRUPT) {
576 struct kvm_interrupt irq;
578 if (copy_from_user(&irq, argp, sizeof(irq)))
581 if (irq.irq == KVM_INTERRUPT_SET)
582 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
584 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
590 long kvm_arch_vcpu_ioctl(struct file *filp,
591 unsigned int ioctl, unsigned long arg)
593 struct kvm_vcpu *vcpu = filp->private_data;
594 void __user *argp = (void __user *)arg;
598 case KVM_SET_ONE_REG:
599 case KVM_GET_ONE_REG: {
600 struct kvm_one_reg reg;
603 if (copy_from_user(®, argp, sizeof(reg)))
606 if (ioctl == KVM_SET_ONE_REG)
607 r = kvm_riscv_vcpu_set_reg(vcpu, ®);
609 r = kvm_riscv_vcpu_get_reg(vcpu, ®);
619 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
620 struct kvm_sregs *sregs)
625 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
626 struct kvm_sregs *sregs)
631 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
636 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
641 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
642 struct kvm_translation *tr)
647 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
652 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
657 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
659 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
660 unsigned long mask, val;
662 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
663 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
664 val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
671 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
674 struct kvm_vcpu_arch *v = &vcpu->arch;
675 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
677 /* Read current HVIP and VSIE CSRs */
678 csr->vsie = csr_read(CSR_VSIE);
680 /* Sync-up HVIP.VSSIP bit changes does by Guest */
681 hvip = csr_read(CSR_HVIP);
682 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
683 if (hvip & (1UL << IRQ_VS_SOFT)) {
684 if (!test_and_set_bit(IRQ_VS_SOFT,
685 &v->irqs_pending_mask))
686 set_bit(IRQ_VS_SOFT, &v->irqs_pending);
688 if (!test_and_set_bit(IRQ_VS_SOFT,
689 &v->irqs_pending_mask))
690 clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
695 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
697 if (irq != IRQ_VS_SOFT &&
698 irq != IRQ_VS_TIMER &&
702 set_bit(irq, &vcpu->arch.irqs_pending);
703 smp_mb__before_atomic();
704 set_bit(irq, &vcpu->arch.irqs_pending_mask);
711 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
713 if (irq != IRQ_VS_SOFT &&
714 irq != IRQ_VS_TIMER &&
718 clear_bit(irq, &vcpu->arch.irqs_pending);
719 smp_mb__before_atomic();
720 set_bit(irq, &vcpu->arch.irqs_pending_mask);
725 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
727 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
728 << VSIP_TO_HVIP_SHIFT) & mask;
730 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
733 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
735 vcpu->arch.power_off = true;
736 kvm_make_request(KVM_REQ_SLEEP, vcpu);
740 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
742 vcpu->arch.power_off = false;
743 kvm_vcpu_wake_up(vcpu);
746 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
747 struct kvm_mp_state *mp_state)
749 if (vcpu->arch.power_off)
750 mp_state->mp_state = KVM_MP_STATE_STOPPED;
752 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
757 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
758 struct kvm_mp_state *mp_state)
762 switch (mp_state->mp_state) {
763 case KVM_MP_STATE_RUNNABLE:
764 vcpu->arch.power_off = false;
766 case KVM_MP_STATE_STOPPED:
767 kvm_riscv_vcpu_power_off(vcpu);
776 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
777 struct kvm_guest_debug *dbg)
779 /* TODO; To be implemented later. */
783 static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
787 if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT))
788 henvcfg |= ENVCFG_PBMTE;
790 if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC))
791 henvcfg |= ENVCFG_STCE;
792 csr_write(CSR_HENVCFG, henvcfg);
794 csr_write(CSR_HENVCFGH, henvcfg >> 32);
798 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
800 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
802 csr_write(CSR_VSSTATUS, csr->vsstatus);
803 csr_write(CSR_VSIE, csr->vsie);
804 csr_write(CSR_VSTVEC, csr->vstvec);
805 csr_write(CSR_VSSCRATCH, csr->vsscratch);
806 csr_write(CSR_VSEPC, csr->vsepc);
807 csr_write(CSR_VSCAUSE, csr->vscause);
808 csr_write(CSR_VSTVAL, csr->vstval);
809 csr_write(CSR_HVIP, csr->hvip);
810 csr_write(CSR_VSATP, csr->vsatp);
812 kvm_riscv_vcpu_update_config(vcpu->arch.isa);
814 kvm_riscv_gstage_update_hgatp(vcpu);
816 kvm_riscv_vcpu_timer_restore(vcpu);
818 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
819 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
825 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
827 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
831 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
833 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
835 kvm_riscv_vcpu_timer_save(vcpu);
837 csr->vsstatus = csr_read(CSR_VSSTATUS);
838 csr->vsie = csr_read(CSR_VSIE);
839 csr->vstvec = csr_read(CSR_VSTVEC);
840 csr->vsscratch = csr_read(CSR_VSSCRATCH);
841 csr->vsepc = csr_read(CSR_VSEPC);
842 csr->vscause = csr_read(CSR_VSCAUSE);
843 csr->vstval = csr_read(CSR_VSTVAL);
844 csr->hvip = csr_read(CSR_HVIP);
845 csr->vsatp = csr_read(CSR_VSATP);
848 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
850 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
852 if (kvm_request_pending(vcpu)) {
853 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
854 kvm_vcpu_srcu_read_unlock(vcpu);
855 rcuwait_wait_event(wait,
856 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
858 kvm_vcpu_srcu_read_lock(vcpu);
860 if (vcpu->arch.power_off || vcpu->arch.pause) {
862 * Awaken to handle a signal, request to
865 kvm_make_request(KVM_REQ_SLEEP, vcpu);
869 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
870 kvm_riscv_reset_vcpu(vcpu);
872 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
873 kvm_riscv_gstage_update_hgatp(vcpu);
875 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
876 kvm_riscv_fence_i_process(vcpu);
879 * The generic KVM_REQ_TLB_FLUSH is same as
880 * KVM_REQ_HFENCE_GVMA_VMID_ALL
882 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
883 kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
885 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
886 kvm_riscv_hfence_vvma_all_process(vcpu);
888 if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
889 kvm_riscv_hfence_process(vcpu);
893 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
895 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
897 csr_write(CSR_HVIP, csr->hvip);
901 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
902 * the vCPU is running.
904 * This must be noinstr as instrumentation may make use of RCU, and this is not
905 * safe during the EQS.
907 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
909 guest_state_enter_irqoff();
910 __kvm_riscv_switch_to(&vcpu->arch);
911 vcpu->arch.last_exit_cpu = vcpu->cpu;
912 guest_state_exit_irqoff();
915 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
918 struct kvm_cpu_trap trap;
919 struct kvm_run *run = vcpu->run;
921 /* Mark this VCPU ran at least once */
922 vcpu->arch.ran_atleast_once = true;
924 kvm_vcpu_srcu_read_lock(vcpu);
926 switch (run->exit_reason) {
928 /* Process MMIO value returned from user-space */
929 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
931 case KVM_EXIT_RISCV_SBI:
932 /* Process SBI value returned from user-space */
933 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
935 case KVM_EXIT_RISCV_CSR:
936 /* Process CSR value returned from user-space */
937 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
944 kvm_vcpu_srcu_read_unlock(vcpu);
948 if (run->immediate_exit) {
949 kvm_vcpu_srcu_read_unlock(vcpu);
955 kvm_sigset_activate(vcpu);
958 run->exit_reason = KVM_EXIT_UNKNOWN;
960 /* Check conditions before entering the guest */
963 kvm_riscv_gstage_vmid_update(vcpu);
965 kvm_riscv_check_vcpu_requests(vcpu);
970 * Exit if we have a signal pending so that we can deliver
971 * the signal to user space.
973 if (signal_pending(current)) {
975 run->exit_reason = KVM_EXIT_INTR;
979 * Ensure we set mode to IN_GUEST_MODE after we disable
980 * interrupts and before the final VCPU requests check.
981 * See the comment in kvm_vcpu_exiting_guest_mode() and
982 * Documentation/virt/kvm/vcpu-requests.rst
984 vcpu->mode = IN_GUEST_MODE;
986 kvm_vcpu_srcu_read_unlock(vcpu);
987 smp_mb__after_srcu_read_unlock();
990 * We might have got VCPU interrupts updated asynchronously
991 * so update it in HW.
993 kvm_riscv_vcpu_flush_interrupts(vcpu);
995 /* Update HVIP CSR for current CPU */
996 kvm_riscv_update_hvip(vcpu);
999 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
1000 kvm_request_pending(vcpu)) {
1001 vcpu->mode = OUTSIDE_GUEST_MODE;
1003 kvm_vcpu_srcu_read_lock(vcpu);
1008 * Cleanup stale TLB enteries
1010 * Note: This should be done after G-stage VMID has been
1011 * updated using kvm_riscv_gstage_vmid_ver_changed()
1013 kvm_riscv_local_tlb_sanitize(vcpu);
1015 guest_timing_enter_irqoff();
1017 kvm_riscv_vcpu_enter_exit(vcpu);
1019 vcpu->mode = OUTSIDE_GUEST_MODE;
1023 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1024 * get an interrupt between __kvm_riscv_switch_to() and
1025 * local_irq_enable() which can potentially change CSRs.
1027 trap.sepc = vcpu->arch.guest_context.sepc;
1028 trap.scause = csr_read(CSR_SCAUSE);
1029 trap.stval = csr_read(CSR_STVAL);
1030 trap.htval = csr_read(CSR_HTVAL);
1031 trap.htinst = csr_read(CSR_HTINST);
1033 /* Syncup interrupts state with HW */
1034 kvm_riscv_vcpu_sync_interrupts(vcpu);
1039 * We must ensure that any pending interrupts are taken before
1040 * we exit guest timing so that timer ticks are accounted as
1041 * guest time. Transiently unmask interrupts so that any
1042 * pending interrupts are taken.
1044 * There's no barrier which ensures that pending interrupts are
1045 * recognised, so we just hope that the CPU takes any pending
1046 * interrupts between the enable and disable.
1049 local_irq_disable();
1051 guest_timing_exit_irqoff();
1057 kvm_vcpu_srcu_read_lock(vcpu);
1059 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1062 kvm_sigset_deactivate(vcpu);
1066 kvm_vcpu_srcu_read_unlock(vcpu);