Merge tag 'rust-6.9' of https://github.com/Rust-for-Linux/linux
[sfrench/cifs-2.6.git] / arch / loongarch / kvm / vcpu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/loongarch.h>
10 #include <asm/setup.h>
11 #include <asm/time.h>
12
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15
16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
17         KVM_GENERIC_VCPU_STATS(),
18         STATS_DESC_COUNTER(VCPU, int_exits),
19         STATS_DESC_COUNTER(VCPU, idle_exits),
20         STATS_DESC_COUNTER(VCPU, cpucfg_exits),
21         STATS_DESC_COUNTER(VCPU, signal_exits),
22 };
23
24 const struct kvm_stats_header kvm_vcpu_stats_header = {
25         .name_size = KVM_STATS_NAME_SIZE,
26         .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
27         .id_offset = sizeof(struct kvm_stats_header),
28         .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
29         .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
30                        sizeof(kvm_vcpu_stats_desc),
31 };
32
33 /*
34  * kvm_check_requests - check and handle pending vCPU requests
35  *
36  * Return: RESUME_GUEST if we should enter the guest
37  *         RESUME_HOST  if we should exit to userspace
38  */
39 static int kvm_check_requests(struct kvm_vcpu *vcpu)
40 {
41         if (!kvm_request_pending(vcpu))
42                 return RESUME_GUEST;
43
44         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
45                 vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
46
47         if (kvm_dirty_ring_check_request(vcpu))
48                 return RESUME_HOST;
49
50         return RESUME_GUEST;
51 }
52
53 /*
54  * Check and handle pending signal and vCPU requests etc
55  * Run with irq enabled and preempt enabled
56  *
57  * Return: RESUME_GUEST if we should enter the guest
58  *         RESUME_HOST  if we should exit to userspace
59  *         < 0 if we should exit to userspace, where the return value
60  *         indicates an error
61  */
62 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
63 {
64         int ret;
65
66         /*
67          * Check conditions before entering the guest
68          */
69         ret = xfer_to_guest_mode_handle_work(vcpu);
70         if (ret < 0)
71                 return ret;
72
73         ret = kvm_check_requests(vcpu);
74
75         return ret;
76 }
77
78 /*
79  * Called with irq enabled
80  *
81  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
82  *         Others if we should exit to userspace
83  */
84 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
85 {
86         int ret;
87
88         do {
89                 ret = kvm_enter_guest_check(vcpu);
90                 if (ret != RESUME_GUEST)
91                         break;
92
93                 /*
94                  * Handle vcpu timer, interrupts, check requests and
95                  * check vmid before vcpu enter guest
96                  */
97                 local_irq_disable();
98                 kvm_deliver_intr(vcpu);
99                 kvm_deliver_exception(vcpu);
100                 /* Make sure the vcpu mode has been written */
101                 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
102                 kvm_check_vpid(vcpu);
103                 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
104                 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
105                 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
106
107                 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
108                         /* make sure the vcpu mode has been written */
109                         smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
110                         local_irq_enable();
111                         ret = -EAGAIN;
112                 }
113         } while (ret != RESUME_GUEST);
114
115         return ret;
116 }
117
118 /*
119  * Return 1 for resume guest and "<= 0" for resume host.
120  */
121 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
122 {
123         int ret = RESUME_GUEST;
124         unsigned long estat = vcpu->arch.host_estat;
125         u32 intr = estat & 0x1fff; /* Ignore NMI */
126         u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
127
128         vcpu->mode = OUTSIDE_GUEST_MODE;
129
130         /* Set a default exit reason */
131         run->exit_reason = KVM_EXIT_UNKNOWN;
132
133         guest_timing_exit_irqoff();
134         guest_state_exit_irqoff();
135         local_irq_enable();
136
137         trace_kvm_exit(vcpu, ecode);
138         if (ecode) {
139                 ret = kvm_handle_fault(vcpu, ecode);
140         } else {
141                 WARN(!intr, "vm exiting with suspicious irq\n");
142                 ++vcpu->stat.int_exits;
143         }
144
145         if (ret == RESUME_GUEST)
146                 ret = kvm_pre_enter_guest(vcpu);
147
148         if (ret != RESUME_GUEST) {
149                 local_irq_disable();
150                 return ret;
151         }
152
153         guest_timing_enter_irqoff();
154         guest_state_enter_irqoff();
155         trace_kvm_reenter(vcpu);
156
157         return RESUME_GUEST;
158 }
159
160 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
161 {
162         return !!(vcpu->arch.irq_pending) &&
163                 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
164 }
165
166 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
167 {
168         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
169 }
170
171 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
172 {
173         return false;
174 }
175
176 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
177 {
178         return VM_FAULT_SIGBUS;
179 }
180
181 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
182                                   struct kvm_translation *tr)
183 {
184         return -EINVAL;
185 }
186
187 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
188 {
189         int ret;
190
191         /* Protect from TOD sync and vcpu_load/put() */
192         preempt_disable();
193         ret = kvm_pending_timer(vcpu) ||
194                 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
195         preempt_enable();
196
197         return ret;
198 }
199
200 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
201 {
202         int i;
203
204         kvm_debug("vCPU Register Dump:\n");
205         kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
206         kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
207
208         for (i = 0; i < 32; i += 4) {
209                 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
210                        vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
211                        vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
212         }
213
214         kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
215                   kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
216                   kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
217
218         kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
219
220         return 0;
221 }
222
223 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
224                                 struct kvm_mp_state *mp_state)
225 {
226         *mp_state = vcpu->arch.mp_state;
227
228         return 0;
229 }
230
231 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
232                                 struct kvm_mp_state *mp_state)
233 {
234         int ret = 0;
235
236         switch (mp_state->mp_state) {
237         case KVM_MP_STATE_RUNNABLE:
238                 vcpu->arch.mp_state = *mp_state;
239                 break;
240         default:
241                 ret = -EINVAL;
242         }
243
244         return ret;
245 }
246
247 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
248                                         struct kvm_guest_debug *dbg)
249 {
250         return -EINVAL;
251 }
252
253 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
254 {
255         unsigned long gintc;
256         struct loongarch_csrs *csr = vcpu->arch.csr;
257
258         if (get_gcsr_flag(id) & INVALID_GCSR)
259                 return -EINVAL;
260
261         if (id == LOONGARCH_CSR_ESTAT) {
262                 /* ESTAT IP0~IP7 get from GINTC */
263                 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
264                 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
265                 return 0;
266         }
267
268         /*
269          * Get software CSR state since software state is consistent
270          * with hardware for synchronous ioctl
271          */
272         *val = kvm_read_sw_gcsr(csr, id);
273
274         return 0;
275 }
276
277 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
278 {
279         int ret = 0, gintc;
280         struct loongarch_csrs *csr = vcpu->arch.csr;
281
282         if (get_gcsr_flag(id) & INVALID_GCSR)
283                 return -EINVAL;
284
285         if (id == LOONGARCH_CSR_ESTAT) {
286                 /* ESTAT IP0~IP7 inject through GINTC */
287                 gintc = (val >> 2) & 0xff;
288                 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
289
290                 gintc = val & ~(0xffUL << 2);
291                 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
292
293                 return ret;
294         }
295
296         kvm_write_sw_gcsr(csr, id, val);
297
298         return ret;
299 }
300
301 static int _kvm_get_cpucfg_mask(int id, u64 *v)
302 {
303         if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
304                 return -EINVAL;
305
306         switch (id) {
307         case 2:
308                 /* CPUCFG2 features unconditionally supported by KVM */
309                 *v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
310                      CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
311                      CPUCFG2_LAM;
312                 /*
313                  * For the ISA extensions listed below, if one is supported
314                  * by the host, then it is also supported by KVM.
315                  */
316                 if (cpu_has_lsx)
317                         *v |= CPUCFG2_LSX;
318                 if (cpu_has_lasx)
319                         *v |= CPUCFG2_LASX;
320
321                 return 0;
322         default:
323                 /*
324                  * No restrictions on other valid CPUCFG IDs' values, but
325                  * CPUCFG data is limited to 32 bits as the LoongArch ISA
326                  * manual says (Volume 1, Section 2.2.10.5 "CPUCFG").
327                  */
328                 *v = U32_MAX;
329                 return 0;
330         }
331 }
332
333 static int kvm_check_cpucfg(int id, u64 val)
334 {
335         int ret;
336         u64 mask = 0;
337
338         ret = _kvm_get_cpucfg_mask(id, &mask);
339         if (ret)
340                 return ret;
341
342         if (val & ~mask)
343                 /* Unsupported features and/or the higher 32 bits should not be set */
344                 return -EINVAL;
345
346         switch (id) {
347         case 2:
348                 if (!(val & CPUCFG2_LLFTP))
349                         /* Guests must have a constant timer */
350                         return -EINVAL;
351                 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
352                         /* Single and double float point must both be set when FP is enabled */
353                         return -EINVAL;
354                 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
355                         /* LSX architecturally implies FP but val does not satisfy that */
356                         return -EINVAL;
357                 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
358                         /* LASX architecturally implies LSX and FP but val does not satisfy that */
359                         return -EINVAL;
360                 return 0;
361         default:
362                 /*
363                  * Values for the other CPUCFG IDs are not being further validated
364                  * besides the mask check above.
365                  */
366                 return 0;
367         }
368 }
369
370 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
371                 const struct kvm_one_reg *reg, u64 *v)
372 {
373         int id, ret = 0;
374         u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
375
376         switch (type) {
377         case KVM_REG_LOONGARCH_CSR:
378                 id = KVM_GET_IOC_CSR_IDX(reg->id);
379                 ret = _kvm_getcsr(vcpu, id, v);
380                 break;
381         case KVM_REG_LOONGARCH_CPUCFG:
382                 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
383                 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
384                         *v = vcpu->arch.cpucfg[id];
385                 else
386                         ret = -EINVAL;
387                 break;
388         case KVM_REG_LOONGARCH_KVM:
389                 switch (reg->id) {
390                 case KVM_REG_LOONGARCH_COUNTER:
391                         *v = drdtime() + vcpu->kvm->arch.time_offset;
392                         break;
393                 default:
394                         ret = -EINVAL;
395                         break;
396                 }
397                 break;
398         default:
399                 ret = -EINVAL;
400                 break;
401         }
402
403         return ret;
404 }
405
406 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
407 {
408         int ret = 0;
409         u64 v, size = reg->id & KVM_REG_SIZE_MASK;
410
411         switch (size) {
412         case KVM_REG_SIZE_U64:
413                 ret = kvm_get_one_reg(vcpu, reg, &v);
414                 if (ret)
415                         return ret;
416                 ret = put_user(v, (u64 __user *)(long)reg->addr);
417                 break;
418         default:
419                 ret = -EINVAL;
420                 break;
421         }
422
423         return ret;
424 }
425
426 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
427                         const struct kvm_one_reg *reg, u64 v)
428 {
429         int id, ret = 0;
430         u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
431
432         switch (type) {
433         case KVM_REG_LOONGARCH_CSR:
434                 id = KVM_GET_IOC_CSR_IDX(reg->id);
435                 ret = _kvm_setcsr(vcpu, id, v);
436                 break;
437         case KVM_REG_LOONGARCH_CPUCFG:
438                 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
439                 ret = kvm_check_cpucfg(id, v);
440                 if (ret)
441                         break;
442                 vcpu->arch.cpucfg[id] = (u32)v;
443                 break;
444         case KVM_REG_LOONGARCH_KVM:
445                 switch (reg->id) {
446                 case KVM_REG_LOONGARCH_COUNTER:
447                         /*
448                          * gftoffset is relative with board, not vcpu
449                          * only set for the first time for smp system
450                          */
451                         if (vcpu->vcpu_id == 0)
452                                 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
453                         break;
454                 case KVM_REG_LOONGARCH_VCPU_RESET:
455                         kvm_reset_timer(vcpu);
456                         memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
457                         memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
458                         break;
459                 default:
460                         ret = -EINVAL;
461                         break;
462                 }
463                 break;
464         default:
465                 ret = -EINVAL;
466                 break;
467         }
468
469         return ret;
470 }
471
472 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
473 {
474         int ret = 0;
475         u64 v, size = reg->id & KVM_REG_SIZE_MASK;
476
477         switch (size) {
478         case KVM_REG_SIZE_U64:
479                 ret = get_user(v, (u64 __user *)(long)reg->addr);
480                 if (ret)
481                         return ret;
482                 break;
483         default:
484                 return -EINVAL;
485         }
486
487         return kvm_set_one_reg(vcpu, reg, v);
488 }
489
490 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
491 {
492         return -ENOIOCTLCMD;
493 }
494
495 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
496 {
497         return -ENOIOCTLCMD;
498 }
499
500 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
501 {
502         int i;
503
504         for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
505                 regs->gpr[i] = vcpu->arch.gprs[i];
506
507         regs->pc = vcpu->arch.pc;
508
509         return 0;
510 }
511
512 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
513 {
514         int i;
515
516         for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
517                 vcpu->arch.gprs[i] = regs->gpr[i];
518
519         vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
520         vcpu->arch.pc = regs->pc;
521
522         return 0;
523 }
524
525 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
526                                      struct kvm_enable_cap *cap)
527 {
528         /* FPU is enabled by default, will support LSX/LASX later. */
529         return -EINVAL;
530 }
531
532 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
533                                          struct kvm_device_attr *attr)
534 {
535         switch (attr->attr) {
536         case 2:
537                 return 0;
538         default:
539                 return -ENXIO;
540         }
541
542         return -ENXIO;
543 }
544
545 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
546                                        struct kvm_device_attr *attr)
547 {
548         int ret = -ENXIO;
549
550         switch (attr->group) {
551         case KVM_LOONGARCH_VCPU_CPUCFG:
552                 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
553                 break;
554         default:
555                 break;
556         }
557
558         return ret;
559 }
560
561 static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
562                                          struct kvm_device_attr *attr)
563 {
564         int ret = 0;
565         uint64_t val;
566         uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
567
568         ret = _kvm_get_cpucfg_mask(attr->attr, &val);
569         if (ret)
570                 return ret;
571
572         put_user(val, uaddr);
573
574         return ret;
575 }
576
577 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
578                                        struct kvm_device_attr *attr)
579 {
580         int ret = -ENXIO;
581
582         switch (attr->group) {
583         case KVM_LOONGARCH_VCPU_CPUCFG:
584                 ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
585                 break;
586         default:
587                 break;
588         }
589
590         return ret;
591 }
592
593 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
594                                          struct kvm_device_attr *attr)
595 {
596         return -ENXIO;
597 }
598
599 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
600                                        struct kvm_device_attr *attr)
601 {
602         int ret = -ENXIO;
603
604         switch (attr->group) {
605         case KVM_LOONGARCH_VCPU_CPUCFG:
606                 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
607                 break;
608         default:
609                 break;
610         }
611
612         return ret;
613 }
614
615 long kvm_arch_vcpu_ioctl(struct file *filp,
616                          unsigned int ioctl, unsigned long arg)
617 {
618         long r;
619         struct kvm_device_attr attr;
620         void __user *argp = (void __user *)arg;
621         struct kvm_vcpu *vcpu = filp->private_data;
622
623         /*
624          * Only software CSR should be modified
625          *
626          * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
627          * should be used. Since CSR registers owns by this vcpu, if switch
628          * to other vcpus, other vcpus need reload CSR registers.
629          *
630          * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
631          * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
632          * aux_inuse flag and reload CSR registers form software.
633          */
634
635         switch (ioctl) {
636         case KVM_SET_ONE_REG:
637         case KVM_GET_ONE_REG: {
638                 struct kvm_one_reg reg;
639
640                 r = -EFAULT;
641                 if (copy_from_user(&reg, argp, sizeof(reg)))
642                         break;
643                 if (ioctl == KVM_SET_ONE_REG) {
644                         r = kvm_set_reg(vcpu, &reg);
645                         vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
646                 } else
647                         r = kvm_get_reg(vcpu, &reg);
648                 break;
649         }
650         case KVM_ENABLE_CAP: {
651                 struct kvm_enable_cap cap;
652
653                 r = -EFAULT;
654                 if (copy_from_user(&cap, argp, sizeof(cap)))
655                         break;
656                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
657                 break;
658         }
659         case KVM_HAS_DEVICE_ATTR: {
660                 r = -EFAULT;
661                 if (copy_from_user(&attr, argp, sizeof(attr)))
662                         break;
663                 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
664                 break;
665         }
666         case KVM_GET_DEVICE_ATTR: {
667                 r = -EFAULT;
668                 if (copy_from_user(&attr, argp, sizeof(attr)))
669                         break;
670                 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
671                 break;
672         }
673         case KVM_SET_DEVICE_ATTR: {
674                 r = -EFAULT;
675                 if (copy_from_user(&attr, argp, sizeof(attr)))
676                         break;
677                 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
678                 break;
679         }
680         default:
681                 r = -ENOIOCTLCMD;
682                 break;
683         }
684
685         return r;
686 }
687
688 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
689 {
690         int i = 0;
691
692         fpu->fcc = vcpu->arch.fpu.fcc;
693         fpu->fcsr = vcpu->arch.fpu.fcsr;
694         for (i = 0; i < NUM_FPU_REGS; i++)
695                 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
696
697         return 0;
698 }
699
700 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
701 {
702         int i = 0;
703
704         vcpu->arch.fpu.fcc = fpu->fcc;
705         vcpu->arch.fpu.fcsr = fpu->fcsr;
706         for (i = 0; i < NUM_FPU_REGS; i++)
707                 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
708
709         return 0;
710 }
711
712 /* Enable FPU and restore context */
713 void kvm_own_fpu(struct kvm_vcpu *vcpu)
714 {
715         preempt_disable();
716
717         /* Enable FPU */
718         set_csr_euen(CSR_EUEN_FPEN);
719
720         kvm_restore_fpu(&vcpu->arch.fpu);
721         vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
722         trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
723
724         preempt_enable();
725 }
726
727 #ifdef CONFIG_CPU_HAS_LSX
728 /* Enable LSX and restore context */
729 int kvm_own_lsx(struct kvm_vcpu *vcpu)
730 {
731         if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
732                 return -EINVAL;
733
734         preempt_disable();
735
736         /* Enable LSX for guest */
737         set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
738         switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
739         case KVM_LARCH_FPU:
740                 /*
741                  * Guest FPU state already loaded,
742                  * only restore upper LSX state
743                  */
744                 _restore_lsx_upper(&vcpu->arch.fpu);
745                 break;
746         default:
747                 /* Neither FP or LSX already active,
748                  * restore full LSX state
749                  */
750                 kvm_restore_lsx(&vcpu->arch.fpu);
751                 break;
752         }
753
754         trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
755         vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
756         preempt_enable();
757
758         return 0;
759 }
760 #endif
761
762 #ifdef CONFIG_CPU_HAS_LASX
763 /* Enable LASX and restore context */
764 int kvm_own_lasx(struct kvm_vcpu *vcpu)
765 {
766         if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
767                 return -EINVAL;
768
769         preempt_disable();
770
771         set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
772         switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
773         case KVM_LARCH_LSX:
774         case KVM_LARCH_LSX | KVM_LARCH_FPU:
775                 /* Guest LSX state already loaded, only restore upper LASX state */
776                 _restore_lasx_upper(&vcpu->arch.fpu);
777                 break;
778         case KVM_LARCH_FPU:
779                 /* Guest FP state already loaded, only restore upper LSX & LASX state */
780                 _restore_lsx_upper(&vcpu->arch.fpu);
781                 _restore_lasx_upper(&vcpu->arch.fpu);
782                 break;
783         default:
784                 /* Neither FP or LSX already active, restore full LASX state */
785                 kvm_restore_lasx(&vcpu->arch.fpu);
786                 break;
787         }
788
789         trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
790         vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
791         preempt_enable();
792
793         return 0;
794 }
795 #endif
796
797 /* Save context and disable FPU */
798 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
799 {
800         preempt_disable();
801
802         if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
803                 kvm_save_lasx(&vcpu->arch.fpu);
804                 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
805                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
806
807                 /* Disable LASX & LSX & FPU */
808                 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
809         } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
810                 kvm_save_lsx(&vcpu->arch.fpu);
811                 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
812                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
813
814                 /* Disable LSX & FPU */
815                 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
816         } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
817                 kvm_save_fpu(&vcpu->arch.fpu);
818                 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
819                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
820
821                 /* Disable FPU */
822                 clear_csr_euen(CSR_EUEN_FPEN);
823         }
824
825         preempt_enable();
826 }
827
828 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
829 {
830         int intr = (int)irq->irq;
831
832         if (intr > 0)
833                 kvm_queue_irq(vcpu, intr);
834         else if (intr < 0)
835                 kvm_dequeue_irq(vcpu, -intr);
836         else {
837                 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
838                 return -EINVAL;
839         }
840
841         kvm_vcpu_kick(vcpu);
842
843         return 0;
844 }
845
846 long kvm_arch_vcpu_async_ioctl(struct file *filp,
847                                unsigned int ioctl, unsigned long arg)
848 {
849         void __user *argp = (void __user *)arg;
850         struct kvm_vcpu *vcpu = filp->private_data;
851
852         if (ioctl == KVM_INTERRUPT) {
853                 struct kvm_interrupt irq;
854
855                 if (copy_from_user(&irq, argp, sizeof(irq)))
856                         return -EFAULT;
857
858                 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
859
860                 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
861         }
862
863         return -ENOIOCTLCMD;
864 }
865
866 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
867 {
868         return 0;
869 }
870
871 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
872 {
873         unsigned long timer_hz;
874         struct loongarch_csrs *csr;
875
876         vcpu->arch.vpid = 0;
877
878         hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
879         vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
880
881         vcpu->arch.handle_exit = kvm_handle_exit;
882         vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
883         vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
884         if (!vcpu->arch.csr)
885                 return -ENOMEM;
886
887         /*
888          * All kvm exceptions share one exception entry, and host <-> guest
889          * switch also switch ECFG.VS field, keep host ECFG.VS info here.
890          */
891         vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
892
893         /* Init */
894         vcpu->arch.last_sched_cpu = -1;
895
896         /*
897          * Initialize guest register state to valid architectural reset state.
898          */
899         timer_hz = calc_const_freq();
900         kvm_init_timer(vcpu, timer_hz);
901
902         /* Set Initialize mode for guest */
903         csr = vcpu->arch.csr;
904         kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
905
906         /* Set cpuid */
907         kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
908
909         /* Start with no pending virtual guest interrupts */
910         csr->csrs[LOONGARCH_CSR_GINTC] = 0;
911
912         return 0;
913 }
914
915 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
916 {
917 }
918
919 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
920 {
921         int cpu;
922         struct kvm_context *context;
923
924         hrtimer_cancel(&vcpu->arch.swtimer);
925         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
926         kfree(vcpu->arch.csr);
927
928         /*
929          * If the vCPU is freed and reused as another vCPU, we don't want the
930          * matching pointer wrongly hanging around in last_vcpu.
931          */
932         for_each_possible_cpu(cpu) {
933                 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
934                 if (context->last_vcpu == vcpu)
935                         context->last_vcpu = NULL;
936         }
937 }
938
939 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
940 {
941         bool migrated;
942         struct kvm_context *context;
943         struct loongarch_csrs *csr = vcpu->arch.csr;
944
945         /*
946          * Have we migrated to a different CPU?
947          * If so, any old guest TLB state may be stale.
948          */
949         migrated = (vcpu->arch.last_sched_cpu != cpu);
950
951         /*
952          * Was this the last vCPU to run on this CPU?
953          * If not, any old guest state from this vCPU will have been clobbered.
954          */
955         context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
956         if (migrated || (context->last_vcpu != vcpu))
957                 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
958         context->last_vcpu = vcpu;
959
960         /* Restore timer state regardless */
961         kvm_restore_timer(vcpu);
962
963         /* Control guest page CCA attribute */
964         change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
965
966         /* Don't bother restoring registers multiple times unless necessary */
967         if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
968                 return 0;
969
970         write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
971
972         /* Restore guest CSR registers */
973         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
974         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
975         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
976         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
977         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
978         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
979         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
980         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
981         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
982         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
983         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
984         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
985         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
986         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
987         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
988         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
989         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
990         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
991         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
992         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
993         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
994         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
995         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
996         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
997         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
998         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
999         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1000         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1001         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1002         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1003         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1004         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1005         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1006         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1007         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1008         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1009         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1010         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1011         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1012         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1013         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1014         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1015         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1016         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1017
1018         /* Restore Root.GINTC from unused Guest.GINTC register */
1019         write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1020
1021         /*
1022          * We should clear linked load bit to break interrupted atomics. This
1023          * prevents a SC on the next vCPU from succeeding by matching a LL on
1024          * the previous vCPU.
1025          */
1026         if (vcpu->kvm->created_vcpus > 1)
1027                 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1028
1029         vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1030
1031         return 0;
1032 }
1033
1034 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1035 {
1036         unsigned long flags;
1037
1038         local_irq_save(flags);
1039         /* Restore guest state to registers */
1040         _kvm_vcpu_load(vcpu, cpu);
1041         local_irq_restore(flags);
1042 }
1043
1044 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1045 {
1046         struct loongarch_csrs *csr = vcpu->arch.csr;
1047
1048         kvm_lose_fpu(vcpu);
1049
1050         /*
1051          * Update CSR state from hardware if software CSR state is stale,
1052          * most CSR registers are kept unchanged during process context
1053          * switch except CSR registers like remaining timer tick value and
1054          * injected interrupt state.
1055          */
1056         if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1057                 goto out;
1058
1059         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1060         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1061         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1062         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1063         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1064         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1065         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1066         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1067         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1068         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1069         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1070         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1071         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1072         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1073         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1074         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1075         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1076         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1077         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1078         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1079         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1080         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1081         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1082         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1083         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1084         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1085         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1086         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1087         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1088         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1089         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1090         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1091         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1092         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1093         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1094         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1095         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1096         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1097         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1098         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1099         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1100         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1101         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1102         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1103         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1104         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1105         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1106
1107         vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1108
1109 out:
1110         kvm_save_timer(vcpu);
1111         /* Save Root.GINTC into unused Guest.GINTC register */
1112         csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1113
1114         return 0;
1115 }
1116
1117 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1118 {
1119         int cpu;
1120         unsigned long flags;
1121
1122         local_irq_save(flags);
1123         cpu = smp_processor_id();
1124         vcpu->arch.last_sched_cpu = cpu;
1125
1126         /* Save guest state in registers */
1127         _kvm_vcpu_put(vcpu, cpu);
1128         local_irq_restore(flags);
1129 }
1130
1131 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1132 {
1133         int r = -EINTR;
1134         struct kvm_run *run = vcpu->run;
1135
1136         if (vcpu->mmio_needed) {
1137                 if (!vcpu->mmio_is_write)
1138                         kvm_complete_mmio_read(vcpu, run);
1139                 vcpu->mmio_needed = 0;
1140         }
1141
1142         if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1143                 if (!run->iocsr_io.is_write)
1144                         kvm_complete_iocsr_read(vcpu, run);
1145         }
1146
1147         if (run->immediate_exit)
1148                 return r;
1149
1150         /* Clear exit_reason */
1151         run->exit_reason = KVM_EXIT_UNKNOWN;
1152         lose_fpu(1);
1153         vcpu_load(vcpu);
1154         kvm_sigset_activate(vcpu);
1155         r = kvm_pre_enter_guest(vcpu);
1156         if (r != RESUME_GUEST)
1157                 goto out;
1158
1159         guest_timing_enter_irqoff();
1160         guest_state_enter_irqoff();
1161         trace_kvm_enter(vcpu);
1162         r = kvm_loongarch_ops->enter_guest(run, vcpu);
1163
1164         trace_kvm_out(vcpu);
1165         /*
1166          * Guest exit is already recorded at kvm_handle_exit()
1167          * return value must not be RESUME_GUEST
1168          */
1169         local_irq_enable();
1170 out:
1171         kvm_sigset_deactivate(vcpu);
1172         vcpu_put(vcpu);
1173
1174         return r;
1175 }