MIPS: KVM: Reformat code and comments
[sfrench/cifs-2.6.git] / arch / mips / kvm / kvm_trap_emul.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16
17 #include <linux/kvm_host.h>
18
19 #include "kvm_mips_opcode.h"
20 #include "kvm_mips_int.h"
21
22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23 {
24         gpa_t gpa;
25         uint32_t kseg = KSEGX(gva);
26
27         if ((kseg == CKSEG0) || (kseg == CKSEG1))
28                 gpa = CPHYSADDR(gva);
29         else {
30                 printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31                 kvm_mips_dump_host_tlbs();
32                 gpa = KVM_INVALID_ADDR;
33         }
34
35         kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
36
37         return gpa;
38 }
39
40 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
41 {
42         struct kvm_run *run = vcpu->run;
43         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
44         unsigned long cause = vcpu->arch.host_cp0_cause;
45         enum emulation_result er = EMULATE_DONE;
46         int ret = RESUME_GUEST;
47
48         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
49                 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
50         else
51                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
52
53         switch (er) {
54         case EMULATE_DONE:
55                 ret = RESUME_GUEST;
56                 break;
57
58         case EMULATE_FAIL:
59                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
60                 ret = RESUME_HOST;
61                 break;
62
63         case EMULATE_WAIT:
64                 run->exit_reason = KVM_EXIT_INTR;
65                 ret = RESUME_HOST;
66                 break;
67
68         default:
69                 BUG();
70         }
71         return ret;
72 }
73
74 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
75 {
76         struct kvm_run *run = vcpu->run;
77         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
78         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
79         unsigned long cause = vcpu->arch.host_cp0_cause;
80         enum emulation_result er = EMULATE_DONE;
81         int ret = RESUME_GUEST;
82
83         if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
84             || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
85                 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
86                           cause, opc, badvaddr);
87                 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
88
89                 if (er == EMULATE_DONE)
90                         ret = RESUME_GUEST;
91                 else {
92                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
93                         ret = RESUME_HOST;
94                 }
95         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
96                 /*
97                  * XXXKYMA: The guest kernel does not expect to get this fault
98                  * when we are not using HIGHMEM. Need to address this in a
99                  * HIGHMEM kernel
100                  */
101                 printk
102                     ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
103                      cause, opc, badvaddr);
104                 kvm_mips_dump_host_tlbs();
105                 kvm_arch_vcpu_dump_regs(vcpu);
106                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
107                 ret = RESUME_HOST;
108         } else {
109                 printk
110                     ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
111                      cause, opc, badvaddr);
112                 kvm_mips_dump_host_tlbs();
113                 kvm_arch_vcpu_dump_regs(vcpu);
114                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
115                 ret = RESUME_HOST;
116         }
117         return ret;
118 }
119
120 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
121 {
122         struct kvm_run *run = vcpu->run;
123         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
124         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
125         unsigned long cause = vcpu->arch.host_cp0_cause;
126         enum emulation_result er = EMULATE_DONE;
127         int ret = RESUME_GUEST;
128
129         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
130             && KVM_GUEST_KERNEL_MODE(vcpu)) {
131                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
132                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
133                         ret = RESUME_HOST;
134                 }
135         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
136                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
137                 kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
138                           cause, opc, badvaddr);
139                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
140                 if (er == EMULATE_DONE)
141                         ret = RESUME_GUEST;
142                 else {
143                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
144                         ret = RESUME_HOST;
145                 }
146         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
147                 /*
148                  * All KSEG0 faults are handled by KVM, as the guest kernel does
149                  * not expect to ever get them
150                  */
151                 if (kvm_mips_handle_kseg0_tlb_fault
152                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
153                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
154                         ret = RESUME_HOST;
155                 }
156         } else {
157                 kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
158                         cause, opc, badvaddr);
159                 kvm_mips_dump_host_tlbs();
160                 kvm_arch_vcpu_dump_regs(vcpu);
161                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
162                 ret = RESUME_HOST;
163         }
164         return ret;
165 }
166
167 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
168 {
169         struct kvm_run *run = vcpu->run;
170         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
171         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
172         unsigned long cause = vcpu->arch.host_cp0_cause;
173         enum emulation_result er = EMULATE_DONE;
174         int ret = RESUME_GUEST;
175
176         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
177             && KVM_GUEST_KERNEL_MODE(vcpu)) {
178                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
179                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
180                         ret = RESUME_HOST;
181                 }
182         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
183                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
184                 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
185                           vcpu->arch.pc, badvaddr);
186
187                 /*
188                  * User Address (UA) fault, this could happen if
189                  * (1) TLB entry not present/valid in both Guest and shadow host
190                  *     TLBs, in this case we pass on the fault to the guest
191                  *     kernel and let it handle it.
192                  * (2) TLB entry is present in the Guest TLB but not in the
193                  *     shadow, in this case we inject the TLB from the Guest TLB
194                  *     into the shadow host TLB
195                  */
196
197                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
198                 if (er == EMULATE_DONE)
199                         ret = RESUME_GUEST;
200                 else {
201                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
202                         ret = RESUME_HOST;
203                 }
204         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
205                 if (kvm_mips_handle_kseg0_tlb_fault
206                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
207                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
208                         ret = RESUME_HOST;
209                 }
210         } else {
211                 printk
212                     ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
213                      cause, opc, badvaddr);
214                 kvm_mips_dump_host_tlbs();
215                 kvm_arch_vcpu_dump_regs(vcpu);
216                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
217                 ret = RESUME_HOST;
218         }
219         return ret;
220 }
221
222 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
223 {
224         struct kvm_run *run = vcpu->run;
225         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
226         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
227         unsigned long cause = vcpu->arch.host_cp0_cause;
228         enum emulation_result er = EMULATE_DONE;
229         int ret = RESUME_GUEST;
230
231         if (KVM_GUEST_KERNEL_MODE(vcpu)
232             && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
233                 kvm_debug("Emulate Store to MMIO space\n");
234                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
235                 if (er == EMULATE_FAIL) {
236                         printk("Emulate Store to MMIO space failed\n");
237                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
238                         ret = RESUME_HOST;
239                 } else {
240                         run->exit_reason = KVM_EXIT_MMIO;
241                         ret = RESUME_HOST;
242                 }
243         } else {
244                 printk
245                     ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
246                      cause, opc, badvaddr);
247                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
248                 ret = RESUME_HOST;
249         }
250         return ret;
251 }
252
253 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
254 {
255         struct kvm_run *run = vcpu->run;
256         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
257         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
258         unsigned long cause = vcpu->arch.host_cp0_cause;
259         enum emulation_result er = EMULATE_DONE;
260         int ret = RESUME_GUEST;
261
262         if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
263                 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
264                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
265                 if (er == EMULATE_FAIL) {
266                         printk("Emulate Load from MMIO space failed\n");
267                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
268                         ret = RESUME_HOST;
269                 } else {
270                         run->exit_reason = KVM_EXIT_MMIO;
271                         ret = RESUME_HOST;
272                 }
273         } else {
274                 printk
275                     ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
276                      cause, opc, badvaddr);
277                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278                 ret = RESUME_HOST;
279                 er = EMULATE_FAIL;
280         }
281         return ret;
282 }
283
284 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
285 {
286         struct kvm_run *run = vcpu->run;
287         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
288         unsigned long cause = vcpu->arch.host_cp0_cause;
289         enum emulation_result er = EMULATE_DONE;
290         int ret = RESUME_GUEST;
291
292         er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
293         if (er == EMULATE_DONE)
294                 ret = RESUME_GUEST;
295         else {
296                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
297                 ret = RESUME_HOST;
298         }
299         return ret;
300 }
301
302 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
303 {
304         struct kvm_run *run = vcpu->run;
305         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
306         unsigned long cause = vcpu->arch.host_cp0_cause;
307         enum emulation_result er = EMULATE_DONE;
308         int ret = RESUME_GUEST;
309
310         er = kvm_mips_handle_ri(cause, opc, run, vcpu);
311         if (er == EMULATE_DONE)
312                 ret = RESUME_GUEST;
313         else {
314                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
315                 ret = RESUME_HOST;
316         }
317         return ret;
318 }
319
320 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
321 {
322         struct kvm_run *run = vcpu->run;
323         uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
324         unsigned long cause = vcpu->arch.host_cp0_cause;
325         enum emulation_result er = EMULATE_DONE;
326         int ret = RESUME_GUEST;
327
328         er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
329         if (er == EMULATE_DONE)
330                 ret = RESUME_GUEST;
331         else {
332                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
333                 ret = RESUME_HOST;
334         }
335         return ret;
336 }
337
338 static int kvm_trap_emul_vm_init(struct kvm *kvm)
339 {
340         return 0;
341 }
342
343 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
344 {
345         return 0;
346 }
347
348 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
349 {
350         struct mips_coproc *cop0 = vcpu->arch.cop0;
351         uint32_t config1;
352         int vcpu_id = vcpu->vcpu_id;
353
354         /*
355          * Arch specific stuff, set up config registers properly so that the
356          * guest will come up as expected, for now we simulate a MIPS 24kc
357          */
358         kvm_write_c0_guest_prid(cop0, 0x00019300);
359         kvm_write_c0_guest_config(cop0,
360                                   MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
361                                   (MMU_TYPE_R4000 << CP0C0_MT));
362
363         /* Read the cache characteristics from the host Config1 Register */
364         config1 = (read_c0_config1() & ~0x7f);
365
366         /* Set up MMU size */
367         config1 &= ~(0x3f << 25);
368         config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
369
370         /* We unset some bits that we aren't emulating */
371         config1 &=
372             ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
373               (1 << CP0C1_WR) | (1 << CP0C1_CA));
374         kvm_write_c0_guest_config1(cop0, config1);
375
376         kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
377         /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
378         kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
379                                          (1 << CP0C3_ULRI));
380
381         /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
382         kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
383
384         /*
385          * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
386          */
387         kvm_write_c0_guest_intctl(cop0, 0xFC000000);
388
389         /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
390         kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
391
392         return 0;
393 }
394
395 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
396                                      const struct kvm_one_reg *reg,
397                                      s64 *v)
398 {
399         switch (reg->id) {
400         case KVM_REG_MIPS_CP0_COUNT:
401                 *v = kvm_mips_read_count(vcpu);
402                 break;
403         case KVM_REG_MIPS_COUNT_CTL:
404                 *v = vcpu->arch.count_ctl;
405                 break;
406         case KVM_REG_MIPS_COUNT_RESUME:
407                 *v = ktime_to_ns(vcpu->arch.count_resume);
408                 break;
409         case KVM_REG_MIPS_COUNT_HZ:
410                 *v = vcpu->arch.count_hz;
411                 break;
412         default:
413                 return -EINVAL;
414         }
415         return 0;
416 }
417
418 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
419                                      const struct kvm_one_reg *reg,
420                                      s64 v)
421 {
422         struct mips_coproc *cop0 = vcpu->arch.cop0;
423         int ret = 0;
424
425         switch (reg->id) {
426         case KVM_REG_MIPS_CP0_COUNT:
427                 kvm_mips_write_count(vcpu, v);
428                 break;
429         case KVM_REG_MIPS_CP0_COMPARE:
430                 kvm_mips_write_compare(vcpu, v);
431                 break;
432         case KVM_REG_MIPS_CP0_CAUSE:
433                 /*
434                  * If the timer is stopped or started (DC bit) it must look
435                  * atomic with changes to the interrupt pending bits (TI, IRQ5).
436                  * A timer interrupt should not happen in between.
437                  */
438                 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
439                         if (v & CAUSEF_DC) {
440                                 /* disable timer first */
441                                 kvm_mips_count_disable_cause(vcpu);
442                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
443                         } else {
444                                 /* enable timer last */
445                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
446                                 kvm_mips_count_enable_cause(vcpu);
447                         }
448                 } else {
449                         kvm_write_c0_guest_cause(cop0, v);
450                 }
451                 break;
452         case KVM_REG_MIPS_COUNT_CTL:
453                 ret = kvm_mips_set_count_ctl(vcpu, v);
454                 break;
455         case KVM_REG_MIPS_COUNT_RESUME:
456                 ret = kvm_mips_set_count_resume(vcpu, v);
457                 break;
458         case KVM_REG_MIPS_COUNT_HZ:
459                 ret = kvm_mips_set_count_hz(vcpu, v);
460                 break;
461         default:
462                 return -EINVAL;
463         }
464         return ret;
465 }
466
467 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
468         /* exit handlers */
469         .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
470         .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
471         .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
472         .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
473         .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
474         .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
475         .handle_syscall = kvm_trap_emul_handle_syscall,
476         .handle_res_inst = kvm_trap_emul_handle_res_inst,
477         .handle_break = kvm_trap_emul_handle_break,
478
479         .vm_init = kvm_trap_emul_vm_init,
480         .vcpu_init = kvm_trap_emul_vcpu_init,
481         .vcpu_setup = kvm_trap_emul_vcpu_setup,
482         .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
483         .queue_timer_int = kvm_mips_queue_timer_int_cb,
484         .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
485         .queue_io_int = kvm_mips_queue_io_int_cb,
486         .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
487         .irq_deliver = kvm_mips_irq_deliver_cb,
488         .irq_clear = kvm_mips_irq_clear_cb,
489         .get_one_reg = kvm_trap_emul_get_one_reg,
490         .set_one_reg = kvm_trap_emul_set_one_reg,
491 };
492
493 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
494 {
495         *install_callbacks = &kvm_trap_emul_callbacks;
496         return 0;
497 }