1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
13 #define CREATE_TRACE_POINTS
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17 KVM_GENERIC_VCPU_STATS(),
18 STATS_DESC_COUNTER(VCPU, int_exits),
19 STATS_DESC_COUNTER(VCPU, idle_exits),
20 STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21 STATS_DESC_COUNTER(VCPU, signal_exits),
24 const struct kvm_stats_header kvm_vcpu_stats_header = {
25 .name_size = KVM_STATS_NAME_SIZE,
26 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
27 .id_offset = sizeof(struct kvm_stats_header),
28 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
29 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
30 sizeof(kvm_vcpu_stats_desc),
34 * kvm_check_requests - check and handle pending vCPU requests
36 * Return: RESUME_GUEST if we should enter the guest
37 * RESUME_HOST if we should exit to userspace
39 static int kvm_check_requests(struct kvm_vcpu *vcpu)
41 if (!kvm_request_pending(vcpu))
44 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
45 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
47 if (kvm_dirty_ring_check_request(vcpu))
54 * Check and handle pending signal and vCPU requests etc
55 * Run with irq enabled and preempt enabled
57 * Return: RESUME_GUEST if we should enter the guest
58 * RESUME_HOST if we should exit to userspace
59 * < 0 if we should exit to userspace, where the return value
62 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
67 * Check conditions before entering the guest
69 ret = xfer_to_guest_mode_handle_work(vcpu);
73 ret = kvm_check_requests(vcpu);
79 * Called with irq enabled
81 * Return: RESUME_GUEST if we should enter the guest, and irq disabled
82 * Others if we should exit to userspace
84 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
89 ret = kvm_enter_guest_check(vcpu);
90 if (ret != RESUME_GUEST)
94 * Handle vcpu timer, interrupts, check requests and
95 * check vmid before vcpu enter guest
98 kvm_deliver_intr(vcpu);
99 kvm_deliver_exception(vcpu);
100 /* Make sure the vcpu mode has been written */
101 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
102 kvm_check_vpid(vcpu);
103 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
104 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
105 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
107 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
108 /* make sure the vcpu mode has been written */
109 smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
113 } while (ret != RESUME_GUEST);
119 * Return 1 for resume guest and "<= 0" for resume host.
121 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
123 int ret = RESUME_GUEST;
124 unsigned long estat = vcpu->arch.host_estat;
125 u32 intr = estat & 0x1fff; /* Ignore NMI */
126 u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
128 vcpu->mode = OUTSIDE_GUEST_MODE;
130 /* Set a default exit reason */
131 run->exit_reason = KVM_EXIT_UNKNOWN;
133 guest_timing_exit_irqoff();
134 guest_state_exit_irqoff();
137 trace_kvm_exit(vcpu, ecode);
139 ret = kvm_handle_fault(vcpu, ecode);
141 WARN(!intr, "vm exiting with suspicious irq\n");
142 ++vcpu->stat.int_exits;
145 if (ret == RESUME_GUEST)
146 ret = kvm_pre_enter_guest(vcpu);
148 if (ret != RESUME_GUEST) {
153 guest_timing_enter_irqoff();
154 guest_state_enter_irqoff();
155 trace_kvm_reenter(vcpu);
160 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
162 return !!(vcpu->arch.irq_pending) &&
163 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
166 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
168 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
171 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
176 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
178 return VM_FAULT_SIGBUS;
181 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
182 struct kvm_translation *tr)
187 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
191 /* Protect from TOD sync and vcpu_load/put() */
193 ret = kvm_pending_timer(vcpu) ||
194 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
200 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
204 kvm_debug("vCPU Register Dump:\n");
205 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
206 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
208 for (i = 0; i < 32; i += 4) {
209 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
210 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
211 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
214 kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
215 kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
216 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
218 kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
223 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
224 struct kvm_mp_state *mp_state)
226 *mp_state = vcpu->arch.mp_state;
231 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
232 struct kvm_mp_state *mp_state)
236 switch (mp_state->mp_state) {
237 case KVM_MP_STATE_RUNNABLE:
238 vcpu->arch.mp_state = *mp_state;
247 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
248 struct kvm_guest_debug *dbg)
253 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
256 struct loongarch_csrs *csr = vcpu->arch.csr;
258 if (get_gcsr_flag(id) & INVALID_GCSR)
261 if (id == LOONGARCH_CSR_ESTAT) {
262 /* ESTAT IP0~IP7 get from GINTC */
263 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
264 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
269 * Get software CSR state since software state is consistent
270 * with hardware for synchronous ioctl
272 *val = kvm_read_sw_gcsr(csr, id);
277 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
280 struct loongarch_csrs *csr = vcpu->arch.csr;
282 if (get_gcsr_flag(id) & INVALID_GCSR)
285 if (id == LOONGARCH_CSR_ESTAT) {
286 /* ESTAT IP0~IP7 inject through GINTC */
287 gintc = (val >> 2) & 0xff;
288 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
290 gintc = val & ~(0xffUL << 2);
291 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
296 kvm_write_sw_gcsr(csr, id, val);
301 static int _kvm_get_cpucfg_mask(int id, u64 *v)
303 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
308 /* CPUCFG2 features unconditionally supported by KVM */
309 *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
310 CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
313 * For the ISA extensions listed below, if one is supported
314 * by the host, then it is also supported by KVM.
324 * No restrictions on other valid CPUCFG IDs' values, but
325 * CPUCFG data is limited to 32 bits as the LoongArch ISA
326 * manual says (Volume 1, Section 2.2.10.5 "CPUCFG").
333 static int kvm_check_cpucfg(int id, u64 val)
338 ret = _kvm_get_cpucfg_mask(id, &mask);
343 /* Unsupported features and/or the higher 32 bits should not be set */
348 if (!(val & CPUCFG2_LLFTP))
349 /* Guests must have a constant timer */
351 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
352 /* Single and double float point must both be set when FP is enabled */
354 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
355 /* LSX architecturally implies FP but val does not satisfy that */
357 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
358 /* LASX architecturally implies LSX and FP but val does not satisfy that */
363 * Values for the other CPUCFG IDs are not being further validated
364 * besides the mask check above.
370 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
371 const struct kvm_one_reg *reg, u64 *v)
374 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
377 case KVM_REG_LOONGARCH_CSR:
378 id = KVM_GET_IOC_CSR_IDX(reg->id);
379 ret = _kvm_getcsr(vcpu, id, v);
381 case KVM_REG_LOONGARCH_CPUCFG:
382 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
383 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
384 *v = vcpu->arch.cpucfg[id];
388 case KVM_REG_LOONGARCH_KVM:
390 case KVM_REG_LOONGARCH_COUNTER:
391 *v = drdtime() + vcpu->kvm->arch.time_offset;
406 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
409 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
412 case KVM_REG_SIZE_U64:
413 ret = kvm_get_one_reg(vcpu, reg, &v);
416 ret = put_user(v, (u64 __user *)(long)reg->addr);
426 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
427 const struct kvm_one_reg *reg, u64 v)
430 u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
433 case KVM_REG_LOONGARCH_CSR:
434 id = KVM_GET_IOC_CSR_IDX(reg->id);
435 ret = _kvm_setcsr(vcpu, id, v);
437 case KVM_REG_LOONGARCH_CPUCFG:
438 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
439 ret = kvm_check_cpucfg(id, v);
442 vcpu->arch.cpucfg[id] = (u32)v;
444 case KVM_REG_LOONGARCH_KVM:
446 case KVM_REG_LOONGARCH_COUNTER:
448 * gftoffset is relative with board, not vcpu
449 * only set for the first time for smp system
451 if (vcpu->vcpu_id == 0)
452 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
454 case KVM_REG_LOONGARCH_VCPU_RESET:
455 kvm_reset_timer(vcpu);
456 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
457 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
472 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
475 u64 v, size = reg->id & KVM_REG_SIZE_MASK;
478 case KVM_REG_SIZE_U64:
479 ret = get_user(v, (u64 __user *)(long)reg->addr);
487 return kvm_set_one_reg(vcpu, reg, v);
490 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
495 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
500 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
504 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
505 regs->gpr[i] = vcpu->arch.gprs[i];
507 regs->pc = vcpu->arch.pc;
512 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
516 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
517 vcpu->arch.gprs[i] = regs->gpr[i];
519 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
520 vcpu->arch.pc = regs->pc;
525 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
526 struct kvm_enable_cap *cap)
528 /* FPU is enabled by default, will support LSX/LASX later. */
532 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
533 struct kvm_device_attr *attr)
535 switch (attr->attr) {
545 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
546 struct kvm_device_attr *attr)
550 switch (attr->group) {
551 case KVM_LOONGARCH_VCPU_CPUCFG:
552 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
561 static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
562 struct kvm_device_attr *attr)
566 uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
568 ret = _kvm_get_cpucfg_mask(attr->attr, &val);
572 put_user(val, uaddr);
577 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
578 struct kvm_device_attr *attr)
582 switch (attr->group) {
583 case KVM_LOONGARCH_VCPU_CPUCFG:
584 ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
593 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
594 struct kvm_device_attr *attr)
599 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
600 struct kvm_device_attr *attr)
604 switch (attr->group) {
605 case KVM_LOONGARCH_VCPU_CPUCFG:
606 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
615 long kvm_arch_vcpu_ioctl(struct file *filp,
616 unsigned int ioctl, unsigned long arg)
619 struct kvm_device_attr attr;
620 void __user *argp = (void __user *)arg;
621 struct kvm_vcpu *vcpu = filp->private_data;
624 * Only software CSR should be modified
626 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
627 * should be used. Since CSR registers owns by this vcpu, if switch
628 * to other vcpus, other vcpus need reload CSR registers.
630 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
631 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
632 * aux_inuse flag and reload CSR registers form software.
636 case KVM_SET_ONE_REG:
637 case KVM_GET_ONE_REG: {
638 struct kvm_one_reg reg;
641 if (copy_from_user(®, argp, sizeof(reg)))
643 if (ioctl == KVM_SET_ONE_REG) {
644 r = kvm_set_reg(vcpu, ®);
645 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
647 r = kvm_get_reg(vcpu, ®);
650 case KVM_ENABLE_CAP: {
651 struct kvm_enable_cap cap;
654 if (copy_from_user(&cap, argp, sizeof(cap)))
656 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
659 case KVM_HAS_DEVICE_ATTR: {
661 if (copy_from_user(&attr, argp, sizeof(attr)))
663 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
666 case KVM_GET_DEVICE_ATTR: {
668 if (copy_from_user(&attr, argp, sizeof(attr)))
670 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
673 case KVM_SET_DEVICE_ATTR: {
675 if (copy_from_user(&attr, argp, sizeof(attr)))
677 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
688 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
692 fpu->fcc = vcpu->arch.fpu.fcc;
693 fpu->fcsr = vcpu->arch.fpu.fcsr;
694 for (i = 0; i < NUM_FPU_REGS; i++)
695 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
700 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
704 vcpu->arch.fpu.fcc = fpu->fcc;
705 vcpu->arch.fpu.fcsr = fpu->fcsr;
706 for (i = 0; i < NUM_FPU_REGS; i++)
707 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
712 /* Enable FPU and restore context */
713 void kvm_own_fpu(struct kvm_vcpu *vcpu)
718 set_csr_euen(CSR_EUEN_FPEN);
720 kvm_restore_fpu(&vcpu->arch.fpu);
721 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
722 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
727 #ifdef CONFIG_CPU_HAS_LSX
728 /* Enable LSX and restore context */
729 int kvm_own_lsx(struct kvm_vcpu *vcpu)
731 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
736 /* Enable LSX for guest */
737 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
738 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
741 * Guest FPU state already loaded,
742 * only restore upper LSX state
744 _restore_lsx_upper(&vcpu->arch.fpu);
747 /* Neither FP or LSX already active,
748 * restore full LSX state
750 kvm_restore_lsx(&vcpu->arch.fpu);
754 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
755 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
762 #ifdef CONFIG_CPU_HAS_LASX
763 /* Enable LASX and restore context */
764 int kvm_own_lasx(struct kvm_vcpu *vcpu)
766 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
771 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
772 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
774 case KVM_LARCH_LSX | KVM_LARCH_FPU:
775 /* Guest LSX state already loaded, only restore upper LASX state */
776 _restore_lasx_upper(&vcpu->arch.fpu);
779 /* Guest FP state already loaded, only restore upper LSX & LASX state */
780 _restore_lsx_upper(&vcpu->arch.fpu);
781 _restore_lasx_upper(&vcpu->arch.fpu);
784 /* Neither FP or LSX already active, restore full LASX state */
785 kvm_restore_lasx(&vcpu->arch.fpu);
789 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
790 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
797 /* Save context and disable FPU */
798 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
802 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
803 kvm_save_lasx(&vcpu->arch.fpu);
804 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
805 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
807 /* Disable LASX & LSX & FPU */
808 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
809 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
810 kvm_save_lsx(&vcpu->arch.fpu);
811 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
812 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
814 /* Disable LSX & FPU */
815 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
816 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
817 kvm_save_fpu(&vcpu->arch.fpu);
818 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
819 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
822 clear_csr_euen(CSR_EUEN_FPEN);
828 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
830 int intr = (int)irq->irq;
833 kvm_queue_irq(vcpu, intr);
835 kvm_dequeue_irq(vcpu, -intr);
837 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
846 long kvm_arch_vcpu_async_ioctl(struct file *filp,
847 unsigned int ioctl, unsigned long arg)
849 void __user *argp = (void __user *)arg;
850 struct kvm_vcpu *vcpu = filp->private_data;
852 if (ioctl == KVM_INTERRUPT) {
853 struct kvm_interrupt irq;
855 if (copy_from_user(&irq, argp, sizeof(irq)))
858 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
860 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
866 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
871 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
873 unsigned long timer_hz;
874 struct loongarch_csrs *csr;
878 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
879 vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
881 vcpu->arch.handle_exit = kvm_handle_exit;
882 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
883 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
888 * All kvm exceptions share one exception entry, and host <-> guest
889 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
891 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
894 vcpu->arch.last_sched_cpu = -1;
897 * Initialize guest register state to valid architectural reset state.
899 timer_hz = calc_const_freq();
900 kvm_init_timer(vcpu, timer_hz);
902 /* Set Initialize mode for guest */
903 csr = vcpu->arch.csr;
904 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
907 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
909 /* Start with no pending virtual guest interrupts */
910 csr->csrs[LOONGARCH_CSR_GINTC] = 0;
915 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
919 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
922 struct kvm_context *context;
924 hrtimer_cancel(&vcpu->arch.swtimer);
925 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
926 kfree(vcpu->arch.csr);
929 * If the vCPU is freed and reused as another vCPU, we don't want the
930 * matching pointer wrongly hanging around in last_vcpu.
932 for_each_possible_cpu(cpu) {
933 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
934 if (context->last_vcpu == vcpu)
935 context->last_vcpu = NULL;
939 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
942 struct kvm_context *context;
943 struct loongarch_csrs *csr = vcpu->arch.csr;
946 * Have we migrated to a different CPU?
947 * If so, any old guest TLB state may be stale.
949 migrated = (vcpu->arch.last_sched_cpu != cpu);
952 * Was this the last vCPU to run on this CPU?
953 * If not, any old guest state from this vCPU will have been clobbered.
955 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
956 if (migrated || (context->last_vcpu != vcpu))
957 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
958 context->last_vcpu = vcpu;
960 /* Restore timer state regardless */
961 kvm_restore_timer(vcpu);
963 /* Control guest page CCA attribute */
964 change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
966 /* Don't bother restoring registers multiple times unless necessary */
967 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
970 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
972 /* Restore guest CSR registers */
973 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
974 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
975 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
976 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
977 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
978 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
979 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
980 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
981 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
982 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
983 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
984 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
985 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
986 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
987 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
988 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
989 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
990 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
991 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
992 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
993 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
994 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
995 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
996 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
997 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
998 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
999 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1000 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1001 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1002 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1003 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1004 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1005 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1006 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1007 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1008 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1009 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1010 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1011 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1012 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1013 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1014 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1015 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1016 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1018 /* Restore Root.GINTC from unused Guest.GINTC register */
1019 write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1022 * We should clear linked load bit to break interrupted atomics. This
1023 * prevents a SC on the next vCPU from succeeding by matching a LL on
1024 * the previous vCPU.
1026 if (vcpu->kvm->created_vcpus > 1)
1027 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1029 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1034 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1036 unsigned long flags;
1038 local_irq_save(flags);
1039 /* Restore guest state to registers */
1040 _kvm_vcpu_load(vcpu, cpu);
1041 local_irq_restore(flags);
1044 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1046 struct loongarch_csrs *csr = vcpu->arch.csr;
1051 * Update CSR state from hardware if software CSR state is stale,
1052 * most CSR registers are kept unchanged during process context
1053 * switch except CSR registers like remaining timer tick value and
1054 * injected interrupt state.
1056 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1059 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1060 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1061 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1062 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1063 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1064 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1065 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1066 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1067 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1068 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1069 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1070 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1071 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1072 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1073 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1074 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1075 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1076 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1077 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1078 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1079 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1080 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1081 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1082 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1083 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1084 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1085 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1086 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1087 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1088 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1089 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1090 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1091 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1092 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1093 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1094 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1095 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1096 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1097 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1098 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1099 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1100 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1101 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1102 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1103 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1104 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1105 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1107 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1110 kvm_save_timer(vcpu);
1111 /* Save Root.GINTC into unused Guest.GINTC register */
1112 csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1117 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1120 unsigned long flags;
1122 local_irq_save(flags);
1123 cpu = smp_processor_id();
1124 vcpu->arch.last_sched_cpu = cpu;
1126 /* Save guest state in registers */
1127 _kvm_vcpu_put(vcpu, cpu);
1128 local_irq_restore(flags);
1131 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1134 struct kvm_run *run = vcpu->run;
1136 if (vcpu->mmio_needed) {
1137 if (!vcpu->mmio_is_write)
1138 kvm_complete_mmio_read(vcpu, run);
1139 vcpu->mmio_needed = 0;
1142 if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1143 if (!run->iocsr_io.is_write)
1144 kvm_complete_iocsr_read(vcpu, run);
1147 if (run->immediate_exit)
1150 /* Clear exit_reason */
1151 run->exit_reason = KVM_EXIT_UNKNOWN;
1154 kvm_sigset_activate(vcpu);
1155 r = kvm_pre_enter_guest(vcpu);
1156 if (r != RESUME_GUEST)
1159 guest_timing_enter_irqoff();
1160 guest_state_enter_irqoff();
1161 trace_kvm_enter(vcpu);
1162 r = kvm_loongarch_ops->enter_guest(run, vcpu);
1164 trace_kvm_out(vcpu);
1166 * Guest exit is already recorded at kvm_handle_exit()
1167 * return value must not be RESUME_GUEST
1171 kvm_sigset_deactivate(vcpu);