Merge branches 'for-4.10/asus', 'for-4.10/cp2112', 'for-4.10/i2c-hid-nopower', 'for...
[sfrench/cifs-2.6.git] / arch / mips / kvm / emulate.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Instruction/Exception emulation
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/vmalloc.h>
17 #include <linux/fs.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
26 #include <asm/inst.h>
27
28 #undef CONFIG_MIPS_MT
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
31
32 #include "interrupt.h"
33 #include "commpage.h"
34
35 #include "trace.h"
36
37 /*
38  * Compute the return address and do emulate branch simulation, if required.
39  * This function should be called only in branch delay slot active.
40  */
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42         unsigned long instpc)
43 {
44         unsigned int dspcontrol;
45         union mips_instruction insn;
46         struct kvm_vcpu_arch *arch = &vcpu->arch;
47         long epc = instpc;
48         long nextpc = KVM_INVALID_INST;
49
50         if (epc & 3)
51                 goto unaligned;
52
53         /* Read the instruction */
54         insn.word = kvm_get_inst((u32 *) epc, vcpu);
55
56         if (insn.word == KVM_INVALID_INST)
57                 return KVM_INVALID_INST;
58
59         switch (insn.i_format.opcode) {
60                 /* jr and jalr are in r_format format. */
61         case spec_op:
62                 switch (insn.r_format.func) {
63                 case jalr_op:
64                         arch->gprs[insn.r_format.rd] = epc + 8;
65                         /* Fall through */
66                 case jr_op:
67                         nextpc = arch->gprs[insn.r_format.rs];
68                         break;
69                 }
70                 break;
71
72                 /*
73                  * This group contains:
74                  * bltz_op, bgez_op, bltzl_op, bgezl_op,
75                  * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
76                  */
77         case bcond_op:
78                 switch (insn.i_format.rt) {
79                 case bltz_op:
80                 case bltzl_op:
81                         if ((long)arch->gprs[insn.i_format.rs] < 0)
82                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
83                         else
84                                 epc += 8;
85                         nextpc = epc;
86                         break;
87
88                 case bgez_op:
89                 case bgezl_op:
90                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
91                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
92                         else
93                                 epc += 8;
94                         nextpc = epc;
95                         break;
96
97                 case bltzal_op:
98                 case bltzall_op:
99                         arch->gprs[31] = epc + 8;
100                         if ((long)arch->gprs[insn.i_format.rs] < 0)
101                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
102                         else
103                                 epc += 8;
104                         nextpc = epc;
105                         break;
106
107                 case bgezal_op:
108                 case bgezall_op:
109                         arch->gprs[31] = epc + 8;
110                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
111                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
112                         else
113                                 epc += 8;
114                         nextpc = epc;
115                         break;
116                 case bposge32_op:
117                         if (!cpu_has_dsp)
118                                 goto sigill;
119
120                         dspcontrol = rddsp(0x01);
121
122                         if (dspcontrol >= 32)
123                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
124                         else
125                                 epc += 8;
126                         nextpc = epc;
127                         break;
128                 }
129                 break;
130
131                 /* These are unconditional and in j_format. */
132         case jal_op:
133                 arch->gprs[31] = instpc + 8;
134         case j_op:
135                 epc += 4;
136                 epc >>= 28;
137                 epc <<= 28;
138                 epc |= (insn.j_format.target << 2);
139                 nextpc = epc;
140                 break;
141
142                 /* These are conditional and in i_format. */
143         case beq_op:
144         case beql_op:
145                 if (arch->gprs[insn.i_format.rs] ==
146                     arch->gprs[insn.i_format.rt])
147                         epc = epc + 4 + (insn.i_format.simmediate << 2);
148                 else
149                         epc += 8;
150                 nextpc = epc;
151                 break;
152
153         case bne_op:
154         case bnel_op:
155                 if (arch->gprs[insn.i_format.rs] !=
156                     arch->gprs[insn.i_format.rt])
157                         epc = epc + 4 + (insn.i_format.simmediate << 2);
158                 else
159                         epc += 8;
160                 nextpc = epc;
161                 break;
162
163         case blez_op:   /* POP06 */
164 #ifndef CONFIG_CPU_MIPSR6
165         case blezl_op:  /* removed in R6 */
166 #endif
167                 if (insn.i_format.rt != 0)
168                         goto compact_branch;
169                 if ((long)arch->gprs[insn.i_format.rs] <= 0)
170                         epc = epc + 4 + (insn.i_format.simmediate << 2);
171                 else
172                         epc += 8;
173                 nextpc = epc;
174                 break;
175
176         case bgtz_op:   /* POP07 */
177 #ifndef CONFIG_CPU_MIPSR6
178         case bgtzl_op:  /* removed in R6 */
179 #endif
180                 if (insn.i_format.rt != 0)
181                         goto compact_branch;
182                 if ((long)arch->gprs[insn.i_format.rs] > 0)
183                         epc = epc + 4 + (insn.i_format.simmediate << 2);
184                 else
185                         epc += 8;
186                 nextpc = epc;
187                 break;
188
189                 /* And now the FPA/cp1 branch instructions. */
190         case cop1_op:
191                 kvm_err("%s: unsupported cop1_op\n", __func__);
192                 break;
193
194 #ifdef CONFIG_CPU_MIPSR6
195         /* R6 added the following compact branches with forbidden slots */
196         case blezl_op:  /* POP26 */
197         case bgtzl_op:  /* POP27 */
198                 /* only rt == 0 isn't compact branch */
199                 if (insn.i_format.rt != 0)
200                         goto compact_branch;
201                 break;
202         case pop10_op:
203         case pop30_op:
204                 /* only rs == rt == 0 is reserved, rest are compact branches */
205                 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
206                         goto compact_branch;
207                 break;
208         case pop66_op:
209         case pop76_op:
210                 /* only rs == 0 isn't compact branch */
211                 if (insn.i_format.rs != 0)
212                         goto compact_branch;
213                 break;
214 compact_branch:
215                 /*
216                  * If we've hit an exception on the forbidden slot, then
217                  * the branch must not have been taken.
218                  */
219                 epc += 8;
220                 nextpc = epc;
221                 break;
222 #else
223 compact_branch:
224                 /* Compact branches not supported before R6 */
225                 break;
226 #endif
227         }
228
229         return nextpc;
230
231 unaligned:
232         kvm_err("%s: unaligned epc\n", __func__);
233         return nextpc;
234
235 sigill:
236         kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
237         return nextpc;
238 }
239
240 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
241 {
242         unsigned long branch_pc;
243         enum emulation_result er = EMULATE_DONE;
244
245         if (cause & CAUSEF_BD) {
246                 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
247                 if (branch_pc == KVM_INVALID_INST) {
248                         er = EMULATE_FAIL;
249                 } else {
250                         vcpu->arch.pc = branch_pc;
251                         kvm_debug("BD update_pc(): New PC: %#lx\n",
252                                   vcpu->arch.pc);
253                 }
254         } else
255                 vcpu->arch.pc += 4;
256
257         kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
258
259         return er;
260 }
261
262 /**
263  * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
264  * @vcpu:       Virtual CPU.
265  *
266  * Returns:     1 if the CP0_Count timer is disabled by either the guest
267  *              CP0_Cause.DC bit or the count_ctl.DC bit.
268  *              0 otherwise (in which case CP0_Count timer is running).
269  */
270 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
271 {
272         struct mips_coproc *cop0 = vcpu->arch.cop0;
273
274         return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
275                 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
276 }
277
278 /**
279  * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
280  *
281  * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
282  *
283  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
284  */
285 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
286 {
287         s64 now_ns, periods;
288         u64 delta;
289
290         now_ns = ktime_to_ns(now);
291         delta = now_ns + vcpu->arch.count_dyn_bias;
292
293         if (delta >= vcpu->arch.count_period) {
294                 /* If delta is out of safe range the bias needs adjusting */
295                 periods = div64_s64(now_ns, vcpu->arch.count_period);
296                 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
297                 /* Recalculate delta with new bias */
298                 delta = now_ns + vcpu->arch.count_dyn_bias;
299         }
300
301         /*
302          * We've ensured that:
303          *   delta < count_period
304          *
305          * Therefore the intermediate delta*count_hz will never overflow since
306          * at the boundary condition:
307          *   delta = count_period
308          *   delta = NSEC_PER_SEC * 2^32 / count_hz
309          *   delta * count_hz = NSEC_PER_SEC * 2^32
310          */
311         return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
312 }
313
314 /**
315  * kvm_mips_count_time() - Get effective current time.
316  * @vcpu:       Virtual CPU.
317  *
318  * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
319  * except when the master disable bit is set in count_ctl, in which case it is
320  * count_resume, i.e. the time that the count was disabled.
321  *
322  * Returns:     Effective monotonic ktime for CP0_Count.
323  */
324 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
325 {
326         if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
327                 return vcpu->arch.count_resume;
328
329         return ktime_get();
330 }
331
332 /**
333  * kvm_mips_read_count_running() - Read the current count value as if running.
334  * @vcpu:       Virtual CPU.
335  * @now:        Kernel time to read CP0_Count at.
336  *
337  * Returns the current guest CP0_Count register at time @now and handles if the
338  * timer interrupt is pending and hasn't been handled yet.
339  *
340  * Returns:     The current value of the guest CP0_Count register.
341  */
342 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
343 {
344         struct mips_coproc *cop0 = vcpu->arch.cop0;
345         ktime_t expires, threshold;
346         u32 count, compare;
347         int running;
348
349         /* Calculate the biased and scaled guest CP0_Count */
350         count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
351         compare = kvm_read_c0_guest_compare(cop0);
352
353         /*
354          * Find whether CP0_Count has reached the closest timer interrupt. If
355          * not, we shouldn't inject it.
356          */
357         if ((s32)(count - compare) < 0)
358                 return count;
359
360         /*
361          * The CP0_Count we're going to return has already reached the closest
362          * timer interrupt. Quickly check if it really is a new interrupt by
363          * looking at whether the interval until the hrtimer expiry time is
364          * less than 1/4 of the timer period.
365          */
366         expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
367         threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
368         if (ktime_before(expires, threshold)) {
369                 /*
370                  * Cancel it while we handle it so there's no chance of
371                  * interference with the timeout handler.
372                  */
373                 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
374
375                 /* Nothing should be waiting on the timeout */
376                 kvm_mips_callbacks->queue_timer_int(vcpu);
377
378                 /*
379                  * Restart the timer if it was running based on the expiry time
380                  * we read, so that we don't push it back 2 periods.
381                  */
382                 if (running) {
383                         expires = ktime_add_ns(expires,
384                                                vcpu->arch.count_period);
385                         hrtimer_start(&vcpu->arch.comparecount_timer, expires,
386                                       HRTIMER_MODE_ABS);
387                 }
388         }
389
390         return count;
391 }
392
393 /**
394  * kvm_mips_read_count() - Read the current count value.
395  * @vcpu:       Virtual CPU.
396  *
397  * Read the current guest CP0_Count value, taking into account whether the timer
398  * is stopped.
399  *
400  * Returns:     The current guest CP0_Count value.
401  */
402 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
403 {
404         struct mips_coproc *cop0 = vcpu->arch.cop0;
405
406         /* If count disabled just read static copy of count */
407         if (kvm_mips_count_disabled(vcpu))
408                 return kvm_read_c0_guest_count(cop0);
409
410         return kvm_mips_read_count_running(vcpu, ktime_get());
411 }
412
413 /**
414  * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
415  * @vcpu:       Virtual CPU.
416  * @count:      Output pointer for CP0_Count value at point of freeze.
417  *
418  * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
419  * at the point it was frozen. It is guaranteed that any pending interrupts at
420  * the point it was frozen are handled, and none after that point.
421  *
422  * This is useful where the time/CP0_Count is needed in the calculation of the
423  * new parameters.
424  *
425  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
426  *
427  * Returns:     The ktime at the point of freeze.
428  */
429 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
430 {
431         ktime_t now;
432
433         /* stop hrtimer before finding time */
434         hrtimer_cancel(&vcpu->arch.comparecount_timer);
435         now = ktime_get();
436
437         /* find count at this point and handle pending hrtimer */
438         *count = kvm_mips_read_count_running(vcpu, now);
439
440         return now;
441 }
442
443 /**
444  * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
445  * @vcpu:       Virtual CPU.
446  * @now:        ktime at point of resume.
447  * @count:      CP0_Count at point of resume.
448  *
449  * Resumes the timer and updates the timer expiry based on @now and @count.
450  * This can be used in conjunction with kvm_mips_freeze_timer() when timer
451  * parameters need to be changed.
452  *
453  * It is guaranteed that a timer interrupt immediately after resume will be
454  * handled, but not if CP_Compare is exactly at @count. That case is already
455  * handled by kvm_mips_freeze_timer().
456  *
457  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
458  */
459 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
460                                     ktime_t now, u32 count)
461 {
462         struct mips_coproc *cop0 = vcpu->arch.cop0;
463         u32 compare;
464         u64 delta;
465         ktime_t expire;
466
467         /* Calculate timeout (wrap 0 to 2^32) */
468         compare = kvm_read_c0_guest_compare(cop0);
469         delta = (u64)(u32)(compare - count - 1) + 1;
470         delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
471         expire = ktime_add_ns(now, delta);
472
473         /* Update hrtimer to use new timeout */
474         hrtimer_cancel(&vcpu->arch.comparecount_timer);
475         hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
476 }
477
478 /**
479  * kvm_mips_write_count() - Modify the count and update timer.
480  * @vcpu:       Virtual CPU.
481  * @count:      Guest CP0_Count value to set.
482  *
483  * Sets the CP0_Count value and updates the timer accordingly.
484  */
485 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
486 {
487         struct mips_coproc *cop0 = vcpu->arch.cop0;
488         ktime_t now;
489
490         /* Calculate bias */
491         now = kvm_mips_count_time(vcpu);
492         vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
493
494         if (kvm_mips_count_disabled(vcpu))
495                 /* The timer's disabled, adjust the static count */
496                 kvm_write_c0_guest_count(cop0, count);
497         else
498                 /* Update timeout */
499                 kvm_mips_resume_hrtimer(vcpu, now, count);
500 }
501
502 /**
503  * kvm_mips_init_count() - Initialise timer.
504  * @vcpu:       Virtual CPU.
505  *
506  * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
507  * it going if it's enabled.
508  */
509 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
510 {
511         /* 100 MHz */
512         vcpu->arch.count_hz = 100*1000*1000;
513         vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
514                                           vcpu->arch.count_hz);
515         vcpu->arch.count_dyn_bias = 0;
516
517         /* Starting at 0 */
518         kvm_mips_write_count(vcpu, 0);
519 }
520
521 /**
522  * kvm_mips_set_count_hz() - Update the frequency of the timer.
523  * @vcpu:       Virtual CPU.
524  * @count_hz:   Frequency of CP0_Count timer in Hz.
525  *
526  * Change the frequency of the CP0_Count timer. This is done atomically so that
527  * CP0_Count is continuous and no timer interrupt is lost.
528  *
529  * Returns:     -EINVAL if @count_hz is out of range.
530  *              0 on success.
531  */
532 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
533 {
534         struct mips_coproc *cop0 = vcpu->arch.cop0;
535         int dc;
536         ktime_t now;
537         u32 count;
538
539         /* ensure the frequency is in a sensible range... */
540         if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
541                 return -EINVAL;
542         /* ... and has actually changed */
543         if (vcpu->arch.count_hz == count_hz)
544                 return 0;
545
546         /* Safely freeze timer so we can keep it continuous */
547         dc = kvm_mips_count_disabled(vcpu);
548         if (dc) {
549                 now = kvm_mips_count_time(vcpu);
550                 count = kvm_read_c0_guest_count(cop0);
551         } else {
552                 now = kvm_mips_freeze_hrtimer(vcpu, &count);
553         }
554
555         /* Update the frequency */
556         vcpu->arch.count_hz = count_hz;
557         vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
558         vcpu->arch.count_dyn_bias = 0;
559
560         /* Calculate adjusted bias so dynamic count is unchanged */
561         vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
562
563         /* Update and resume hrtimer */
564         if (!dc)
565                 kvm_mips_resume_hrtimer(vcpu, now, count);
566         return 0;
567 }
568
569 /**
570  * kvm_mips_write_compare() - Modify compare and update timer.
571  * @vcpu:       Virtual CPU.
572  * @compare:    New CP0_Compare value.
573  * @ack:        Whether to acknowledge timer interrupt.
574  *
575  * Update CP0_Compare to a new value and update the timeout.
576  * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
577  * any pending timer interrupt is preserved.
578  */
579 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
580 {
581         struct mips_coproc *cop0 = vcpu->arch.cop0;
582         int dc;
583         u32 old_compare = kvm_read_c0_guest_compare(cop0);
584         ktime_t now;
585         u32 count;
586
587         /* if unchanged, must just be an ack */
588         if (old_compare == compare) {
589                 if (!ack)
590                         return;
591                 kvm_mips_callbacks->dequeue_timer_int(vcpu);
592                 kvm_write_c0_guest_compare(cop0, compare);
593                 return;
594         }
595
596         /* freeze_hrtimer() takes care of timer interrupts <= count */
597         dc = kvm_mips_count_disabled(vcpu);
598         if (!dc)
599                 now = kvm_mips_freeze_hrtimer(vcpu, &count);
600
601         if (ack)
602                 kvm_mips_callbacks->dequeue_timer_int(vcpu);
603
604         kvm_write_c0_guest_compare(cop0, compare);
605
606         /* resume_hrtimer() takes care of timer interrupts > count */
607         if (!dc)
608                 kvm_mips_resume_hrtimer(vcpu, now, count);
609 }
610
611 /**
612  * kvm_mips_count_disable() - Disable count.
613  * @vcpu:       Virtual CPU.
614  *
615  * Disable the CP0_Count timer. A timer interrupt on or before the final stop
616  * time will be handled but not after.
617  *
618  * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
619  * count_ctl.DC has been set (count disabled).
620  *
621  * Returns:     The time that the timer was stopped.
622  */
623 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
624 {
625         struct mips_coproc *cop0 = vcpu->arch.cop0;
626         u32 count;
627         ktime_t now;
628
629         /* Stop hrtimer */
630         hrtimer_cancel(&vcpu->arch.comparecount_timer);
631
632         /* Set the static count from the dynamic count, handling pending TI */
633         now = ktime_get();
634         count = kvm_mips_read_count_running(vcpu, now);
635         kvm_write_c0_guest_count(cop0, count);
636
637         return now;
638 }
639
640 /**
641  * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
642  * @vcpu:       Virtual CPU.
643  *
644  * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
645  * before the final stop time will be handled if the timer isn't disabled by
646  * count_ctl.DC, but not after.
647  *
648  * Assumes CP0_Cause.DC is clear (count enabled).
649  */
650 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
651 {
652         struct mips_coproc *cop0 = vcpu->arch.cop0;
653
654         kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
655         if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
656                 kvm_mips_count_disable(vcpu);
657 }
658
659 /**
660  * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
661  * @vcpu:       Virtual CPU.
662  *
663  * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
664  * the start time will be handled if the timer isn't disabled by count_ctl.DC,
665  * potentially before even returning, so the caller should be careful with
666  * ordering of CP0_Cause modifications so as not to lose it.
667  *
668  * Assumes CP0_Cause.DC is set (count disabled).
669  */
670 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
671 {
672         struct mips_coproc *cop0 = vcpu->arch.cop0;
673         u32 count;
674
675         kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
676
677         /*
678          * Set the dynamic count to match the static count.
679          * This starts the hrtimer if count_ctl.DC allows it.
680          * Otherwise it conveniently updates the biases.
681          */
682         count = kvm_read_c0_guest_count(cop0);
683         kvm_mips_write_count(vcpu, count);
684 }
685
686 /**
687  * kvm_mips_set_count_ctl() - Update the count control KVM register.
688  * @vcpu:       Virtual CPU.
689  * @count_ctl:  Count control register new value.
690  *
691  * Set the count control KVM register. The timer is updated accordingly.
692  *
693  * Returns:     -EINVAL if reserved bits are set.
694  *              0 on success.
695  */
696 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
697 {
698         struct mips_coproc *cop0 = vcpu->arch.cop0;
699         s64 changed = count_ctl ^ vcpu->arch.count_ctl;
700         s64 delta;
701         ktime_t expire, now;
702         u32 count, compare;
703
704         /* Only allow defined bits to be changed */
705         if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
706                 return -EINVAL;
707
708         /* Apply new value */
709         vcpu->arch.count_ctl = count_ctl;
710
711         /* Master CP0_Count disable */
712         if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
713                 /* Is CP0_Cause.DC already disabling CP0_Count? */
714                 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
715                         if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
716                                 /* Just record the current time */
717                                 vcpu->arch.count_resume = ktime_get();
718                 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
719                         /* disable timer and record current time */
720                         vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
721                 } else {
722                         /*
723                          * Calculate timeout relative to static count at resume
724                          * time (wrap 0 to 2^32).
725                          */
726                         count = kvm_read_c0_guest_count(cop0);
727                         compare = kvm_read_c0_guest_compare(cop0);
728                         delta = (u64)(u32)(compare - count - 1) + 1;
729                         delta = div_u64(delta * NSEC_PER_SEC,
730                                         vcpu->arch.count_hz);
731                         expire = ktime_add_ns(vcpu->arch.count_resume, delta);
732
733                         /* Handle pending interrupt */
734                         now = ktime_get();
735                         if (ktime_compare(now, expire) >= 0)
736                                 /* Nothing should be waiting on the timeout */
737                                 kvm_mips_callbacks->queue_timer_int(vcpu);
738
739                         /* Resume hrtimer without changing bias */
740                         count = kvm_mips_read_count_running(vcpu, now);
741                         kvm_mips_resume_hrtimer(vcpu, now, count);
742                 }
743         }
744
745         return 0;
746 }
747
748 /**
749  * kvm_mips_set_count_resume() - Update the count resume KVM register.
750  * @vcpu:               Virtual CPU.
751  * @count_resume:       Count resume register new value.
752  *
753  * Set the count resume KVM register.
754  *
755  * Returns:     -EINVAL if out of valid range (0..now).
756  *              0 on success.
757  */
758 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
759 {
760         /*
761          * It doesn't make sense for the resume time to be in the future, as it
762          * would be possible for the next interrupt to be more than a full
763          * period in the future.
764          */
765         if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
766                 return -EINVAL;
767
768         vcpu->arch.count_resume = ns_to_ktime(count_resume);
769         return 0;
770 }
771
772 /**
773  * kvm_mips_count_timeout() - Push timer forward on timeout.
774  * @vcpu:       Virtual CPU.
775  *
776  * Handle an hrtimer event by push the hrtimer forward a period.
777  *
778  * Returns:     The hrtimer_restart value to return to the hrtimer subsystem.
779  */
780 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
781 {
782         /* Add the Count period to the current expiry time */
783         hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
784                                vcpu->arch.count_period);
785         return HRTIMER_RESTART;
786 }
787
788 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
789 {
790         struct mips_coproc *cop0 = vcpu->arch.cop0;
791         enum emulation_result er = EMULATE_DONE;
792
793         if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
794                 kvm_clear_c0_guest_status(cop0, ST0_ERL);
795                 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
796         } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
797                 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
798                           kvm_read_c0_guest_epc(cop0));
799                 kvm_clear_c0_guest_status(cop0, ST0_EXL);
800                 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
801
802         } else {
803                 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
804                         vcpu->arch.pc);
805                 er = EMULATE_FAIL;
806         }
807
808         return er;
809 }
810
811 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
812 {
813         kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
814                   vcpu->arch.pending_exceptions);
815
816         ++vcpu->stat.wait_exits;
817         trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
818         if (!vcpu->arch.pending_exceptions) {
819                 vcpu->arch.wait = 1;
820                 kvm_vcpu_block(vcpu);
821
822                 /*
823                  * We we are runnable, then definitely go off to user space to
824                  * check if any I/O interrupts are pending.
825                  */
826                 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
827                         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
828                         vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
829                 }
830         }
831
832         return EMULATE_DONE;
833 }
834
835 /*
836  * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
837  * we can catch this, if things ever change
838  */
839 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
840 {
841         struct mips_coproc *cop0 = vcpu->arch.cop0;
842         unsigned long pc = vcpu->arch.pc;
843
844         kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
845         return EMULATE_FAIL;
846 }
847
848 /**
849  * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
850  * @vcpu:       VCPU with changed mappings.
851  * @tlb:        TLB entry being removed.
852  *
853  * This is called to indicate a single change in guest MMU mappings, so that we
854  * can arrange TLB flushes on this and other CPUs.
855  */
856 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
857                                           struct kvm_mips_tlb *tlb)
858 {
859         int cpu, i;
860         bool user;
861
862         /* No need to flush for entries which are already invalid */
863         if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
864                 return;
865         /* User address space doesn't need flushing for KSeg2/3 changes */
866         user = tlb->tlb_hi < KVM_GUEST_KSEG0;
867
868         preempt_disable();
869
870         /*
871          * Probe the shadow host TLB for the entry being overwritten, if one
872          * matches, invalidate it
873          */
874         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
875
876         /* Invalidate the whole ASID on other CPUs */
877         cpu = smp_processor_id();
878         for_each_possible_cpu(i) {
879                 if (i == cpu)
880                         continue;
881                 if (user)
882                         vcpu->arch.guest_user_asid[i] = 0;
883                 vcpu->arch.guest_kernel_asid[i] = 0;
884         }
885
886         preempt_enable();
887 }
888
889 /* Write Guest TLB Entry @ Index */
890 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
891 {
892         struct mips_coproc *cop0 = vcpu->arch.cop0;
893         int index = kvm_read_c0_guest_index(cop0);
894         struct kvm_mips_tlb *tlb = NULL;
895         unsigned long pc = vcpu->arch.pc;
896
897         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
898                 kvm_debug("%s: illegal index: %d\n", __func__, index);
899                 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
900                           pc, index, kvm_read_c0_guest_entryhi(cop0),
901                           kvm_read_c0_guest_entrylo0(cop0),
902                           kvm_read_c0_guest_entrylo1(cop0),
903                           kvm_read_c0_guest_pagemask(cop0));
904                 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
905         }
906
907         tlb = &vcpu->arch.guest_tlb[index];
908
909         kvm_mips_invalidate_guest_tlb(vcpu, tlb);
910
911         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
912         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
913         tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
914         tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
915
916         kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
917                   pc, index, kvm_read_c0_guest_entryhi(cop0),
918                   kvm_read_c0_guest_entrylo0(cop0),
919                   kvm_read_c0_guest_entrylo1(cop0),
920                   kvm_read_c0_guest_pagemask(cop0));
921
922         return EMULATE_DONE;
923 }
924
925 /* Write Guest TLB Entry @ Random Index */
926 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
927 {
928         struct mips_coproc *cop0 = vcpu->arch.cop0;
929         struct kvm_mips_tlb *tlb = NULL;
930         unsigned long pc = vcpu->arch.pc;
931         int index;
932
933         get_random_bytes(&index, sizeof(index));
934         index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
935
936         tlb = &vcpu->arch.guest_tlb[index];
937
938         kvm_mips_invalidate_guest_tlb(vcpu, tlb);
939
940         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
941         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
942         tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
943         tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
944
945         kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
946                   pc, index, kvm_read_c0_guest_entryhi(cop0),
947                   kvm_read_c0_guest_entrylo0(cop0),
948                   kvm_read_c0_guest_entrylo1(cop0));
949
950         return EMULATE_DONE;
951 }
952
953 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
954 {
955         struct mips_coproc *cop0 = vcpu->arch.cop0;
956         long entryhi = kvm_read_c0_guest_entryhi(cop0);
957         unsigned long pc = vcpu->arch.pc;
958         int index = -1;
959
960         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
961
962         kvm_write_c0_guest_index(cop0, index);
963
964         kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
965                   index);
966
967         return EMULATE_DONE;
968 }
969
970 /**
971  * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
972  * @vcpu:       Virtual CPU.
973  *
974  * Finds the mask of bits which are writable in the guest's Config1 CP0
975  * register, by userland (currently read-only to the guest).
976  */
977 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
978 {
979         unsigned int mask = 0;
980
981         /* Permit FPU to be present if FPU is supported */
982         if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
983                 mask |= MIPS_CONF1_FP;
984
985         return mask;
986 }
987
988 /**
989  * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
990  * @vcpu:       Virtual CPU.
991  *
992  * Finds the mask of bits which are writable in the guest's Config3 CP0
993  * register, by userland (currently read-only to the guest).
994  */
995 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
996 {
997         /* Config4 and ULRI are optional */
998         unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
999
1000         /* Permit MSA to be present if MSA is supported */
1001         if (kvm_mips_guest_can_have_msa(&vcpu->arch))
1002                 mask |= MIPS_CONF3_MSA;
1003
1004         return mask;
1005 }
1006
1007 /**
1008  * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1009  * @vcpu:       Virtual CPU.
1010  *
1011  * Finds the mask of bits which are writable in the guest's Config4 CP0
1012  * register, by userland (currently read-only to the guest).
1013  */
1014 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1015 {
1016         /* Config5 is optional */
1017         unsigned int mask = MIPS_CONF_M;
1018
1019         /* KScrExist */
1020         mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
1021
1022         return mask;
1023 }
1024
1025 /**
1026  * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1027  * @vcpu:       Virtual CPU.
1028  *
1029  * Finds the mask of bits which are writable in the guest's Config5 CP0
1030  * register, by the guest itself.
1031  */
1032 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1033 {
1034         unsigned int mask = 0;
1035
1036         /* Permit MSAEn changes if MSA supported and enabled */
1037         if (kvm_mips_guest_has_msa(&vcpu->arch))
1038                 mask |= MIPS_CONF5_MSAEN;
1039
1040         /*
1041          * Permit guest FPU mode changes if FPU is enabled and the relevant
1042          * feature exists according to FIR register.
1043          */
1044         if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1045                 if (cpu_has_fre)
1046                         mask |= MIPS_CONF5_FRE;
1047                 /* We don't support UFR or UFE */
1048         }
1049
1050         return mask;
1051 }
1052
1053 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1054                                            u32 *opc, u32 cause,
1055                                            struct kvm_run *run,
1056                                            struct kvm_vcpu *vcpu)
1057 {
1058         struct mips_coproc *cop0 = vcpu->arch.cop0;
1059         enum emulation_result er = EMULATE_DONE;
1060         u32 rt, rd, sel;
1061         unsigned long curr_pc;
1062         int cpu, i;
1063
1064         /*
1065          * Update PC and hold onto current PC in case there is
1066          * an error and we want to rollback the PC
1067          */
1068         curr_pc = vcpu->arch.pc;
1069         er = update_pc(vcpu, cause);
1070         if (er == EMULATE_FAIL)
1071                 return er;
1072
1073         if (inst.co_format.co) {
1074                 switch (inst.co_format.func) {
1075                 case tlbr_op:   /*  Read indexed TLB entry  */
1076                         er = kvm_mips_emul_tlbr(vcpu);
1077                         break;
1078                 case tlbwi_op:  /*  Write indexed  */
1079                         er = kvm_mips_emul_tlbwi(vcpu);
1080                         break;
1081                 case tlbwr_op:  /*  Write random  */
1082                         er = kvm_mips_emul_tlbwr(vcpu);
1083                         break;
1084                 case tlbp_op:   /* TLB Probe */
1085                         er = kvm_mips_emul_tlbp(vcpu);
1086                         break;
1087                 case rfe_op:
1088                         kvm_err("!!!COP0_RFE!!!\n");
1089                         break;
1090                 case eret_op:
1091                         er = kvm_mips_emul_eret(vcpu);
1092                         goto dont_update_pc;
1093                 case wait_op:
1094                         er = kvm_mips_emul_wait(vcpu);
1095                         break;
1096                 }
1097         } else {
1098                 rt = inst.c0r_format.rt;
1099                 rd = inst.c0r_format.rd;
1100                 sel = inst.c0r_format.sel;
1101
1102                 switch (inst.c0r_format.rs) {
1103                 case mfc_op:
1104 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1105                         cop0->stat[rd][sel]++;
1106 #endif
1107                         /* Get reg */
1108                         if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1109                                 vcpu->arch.gprs[rt] =
1110                                     (s32)kvm_mips_read_count(vcpu);
1111                         } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1112                                 vcpu->arch.gprs[rt] = 0x0;
1113 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1114                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
1115 #endif
1116                         } else {
1117                                 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1118
1119 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1120                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
1121 #endif
1122                         }
1123
1124                         trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1125                                       KVM_TRACE_COP0(rd, sel),
1126                                       vcpu->arch.gprs[rt]);
1127                         break;
1128
1129                 case dmfc_op:
1130                         vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1131
1132                         trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1133                                       KVM_TRACE_COP0(rd, sel),
1134                                       vcpu->arch.gprs[rt]);
1135                         break;
1136
1137                 case mtc_op:
1138 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1139                         cop0->stat[rd][sel]++;
1140 #endif
1141                         trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1142                                       KVM_TRACE_COP0(rd, sel),
1143                                       vcpu->arch.gprs[rt]);
1144
1145                         if ((rd == MIPS_CP0_TLB_INDEX)
1146                             && (vcpu->arch.gprs[rt] >=
1147                                 KVM_MIPS_GUEST_TLB_SIZE)) {
1148                                 kvm_err("Invalid TLB Index: %ld",
1149                                         vcpu->arch.gprs[rt]);
1150                                 er = EMULATE_FAIL;
1151                                 break;
1152                         }
1153 #define C0_EBASE_CORE_MASK 0xff
1154                         if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1155                                 /* Preserve CORE number */
1156                                 kvm_change_c0_guest_ebase(cop0,
1157                                                           ~(C0_EBASE_CORE_MASK),
1158                                                           vcpu->arch.gprs[rt]);
1159                                 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1160                                         kvm_read_c0_guest_ebase(cop0));
1161                         } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1162                                 u32 nasid =
1163                                         vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
1164                                 if (((kvm_read_c0_guest_entryhi(cop0) &
1165                                       KVM_ENTRYHI_ASID) != nasid)) {
1166                                         trace_kvm_asid_change(vcpu,
1167                                                 kvm_read_c0_guest_entryhi(cop0)
1168                                                         & KVM_ENTRYHI_ASID,
1169                                                 nasid);
1170
1171                                         /*
1172                                          * Regenerate/invalidate kernel MMU
1173                                          * context.
1174                                          * The user MMU context will be
1175                                          * regenerated lazily on re-entry to
1176                                          * guest user if the guest ASID actually
1177                                          * changes.
1178                                          */
1179                                         preempt_disable();
1180                                         cpu = smp_processor_id();
1181                                         kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm,
1182                                                                 cpu, vcpu);
1183                                         vcpu->arch.guest_kernel_asid[cpu] =
1184                                                 vcpu->arch.guest_kernel_mm.context.asid[cpu];
1185                                         for_each_possible_cpu(i)
1186                                                 if (i != cpu)
1187                                                         vcpu->arch.guest_kernel_asid[i] = 0;
1188                                         preempt_enable();
1189                                 }
1190                                 kvm_write_c0_guest_entryhi(cop0,
1191                                                            vcpu->arch.gprs[rt]);
1192                         }
1193                         /* Are we writing to COUNT */
1194                         else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1195                                 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1196                                 goto done;
1197                         } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1198                                 /* If we are writing to COMPARE */
1199                                 /* Clear pending timer interrupt, if any */
1200                                 kvm_mips_write_compare(vcpu,
1201                                                        vcpu->arch.gprs[rt],
1202                                                        true);
1203                         } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1204                                 unsigned int old_val, val, change;
1205
1206                                 old_val = kvm_read_c0_guest_status(cop0);
1207                                 val = vcpu->arch.gprs[rt];
1208                                 change = val ^ old_val;
1209
1210                                 /* Make sure that the NMI bit is never set */
1211                                 val &= ~ST0_NMI;
1212
1213                                 /*
1214                                  * Don't allow CU1 or FR to be set unless FPU
1215                                  * capability enabled and exists in guest
1216                                  * configuration.
1217                                  */
1218                                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1219                                         val &= ~(ST0_CU1 | ST0_FR);
1220
1221                                 /*
1222                                  * Also don't allow FR to be set if host doesn't
1223                                  * support it.
1224                                  */
1225                                 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1226                                         val &= ~ST0_FR;
1227
1228
1229                                 /* Handle changes in FPU mode */
1230                                 preempt_disable();
1231
1232                                 /*
1233                                  * FPU and Vector register state is made
1234                                  * UNPREDICTABLE by a change of FR, so don't
1235                                  * even bother saving it.
1236                                  */
1237                                 if (change & ST0_FR)
1238                                         kvm_drop_fpu(vcpu);
1239
1240                                 /*
1241                                  * If MSA state is already live, it is undefined
1242                                  * how it interacts with FR=0 FPU state, and we
1243                                  * don't want to hit reserved instruction
1244                                  * exceptions trying to save the MSA state later
1245                                  * when CU=1 && FR=1, so play it safe and save
1246                                  * it first.
1247                                  */
1248                                 if (change & ST0_CU1 && !(val & ST0_FR) &&
1249                                     vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1250                                         kvm_lose_fpu(vcpu);
1251
1252                                 /*
1253                                  * Propagate CU1 (FPU enable) changes
1254                                  * immediately if the FPU context is already
1255                                  * loaded. When disabling we leave the context
1256                                  * loaded so it can be quickly enabled again in
1257                                  * the near future.
1258                                  */
1259                                 if (change & ST0_CU1 &&
1260                                     vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1261                                         change_c0_status(ST0_CU1, val);
1262
1263                                 preempt_enable();
1264
1265                                 kvm_write_c0_guest_status(cop0, val);
1266
1267 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1268                                 /*
1269                                  * If FPU present, we need CU1/FR bits to take
1270                                  * effect fairly soon.
1271                                  */
1272                                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1273                                         kvm_mips_trans_mtc0(inst, opc, vcpu);
1274 #endif
1275                         } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1276                                 unsigned int old_val, val, change, wrmask;
1277
1278                                 old_val = kvm_read_c0_guest_config5(cop0);
1279                                 val = vcpu->arch.gprs[rt];
1280
1281                                 /* Only a few bits are writable in Config5 */
1282                                 wrmask = kvm_mips_config5_wrmask(vcpu);
1283                                 change = (val ^ old_val) & wrmask;
1284                                 val = old_val ^ change;
1285
1286
1287                                 /* Handle changes in FPU/MSA modes */
1288                                 preempt_disable();
1289
1290                                 /*
1291                                  * Propagate FRE changes immediately if the FPU
1292                                  * context is already loaded.
1293                                  */
1294                                 if (change & MIPS_CONF5_FRE &&
1295                                     vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1296                                         change_c0_config5(MIPS_CONF5_FRE, val);
1297
1298                                 /*
1299                                  * Propagate MSAEn changes immediately if the
1300                                  * MSA context is already loaded. When disabling
1301                                  * we leave the context loaded so it can be
1302                                  * quickly enabled again in the near future.
1303                                  */
1304                                 if (change & MIPS_CONF5_MSAEN &&
1305                                     vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1306                                         change_c0_config5(MIPS_CONF5_MSAEN,
1307                                                           val);
1308
1309                                 preempt_enable();
1310
1311                                 kvm_write_c0_guest_config5(cop0, val);
1312                         } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1313                                 u32 old_cause, new_cause;
1314
1315                                 old_cause = kvm_read_c0_guest_cause(cop0);
1316                                 new_cause = vcpu->arch.gprs[rt];
1317                                 /* Update R/W bits */
1318                                 kvm_change_c0_guest_cause(cop0, 0x08800300,
1319                                                           new_cause);
1320                                 /* DC bit enabling/disabling timer? */
1321                                 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1322                                         if (new_cause & CAUSEF_DC)
1323                                                 kvm_mips_count_disable_cause(vcpu);
1324                                         else
1325                                                 kvm_mips_count_enable_cause(vcpu);
1326                                 }
1327                         } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1328                                 u32 mask = MIPS_HWRENA_CPUNUM |
1329                                            MIPS_HWRENA_SYNCISTEP |
1330                                            MIPS_HWRENA_CC |
1331                                            MIPS_HWRENA_CCRES;
1332
1333                                 if (kvm_read_c0_guest_config3(cop0) &
1334                                     MIPS_CONF3_ULRI)
1335                                         mask |= MIPS_HWRENA_ULR;
1336                                 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1337                         } else {
1338                                 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1339 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1340                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
1341 #endif
1342                         }
1343                         break;
1344
1345                 case dmtc_op:
1346                         kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1347                                 vcpu->arch.pc, rt, rd, sel);
1348                         trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1349                                       KVM_TRACE_COP0(rd, sel),
1350                                       vcpu->arch.gprs[rt]);
1351                         er = EMULATE_FAIL;
1352                         break;
1353
1354                 case mfmc0_op:
1355 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1356                         cop0->stat[MIPS_CP0_STATUS][0]++;
1357 #endif
1358                         if (rt != 0)
1359                                 vcpu->arch.gprs[rt] =
1360                                     kvm_read_c0_guest_status(cop0);
1361                         /* EI */
1362                         if (inst.mfmc0_format.sc) {
1363                                 kvm_debug("[%#lx] mfmc0_op: EI\n",
1364                                           vcpu->arch.pc);
1365                                 kvm_set_c0_guest_status(cop0, ST0_IE);
1366                         } else {
1367                                 kvm_debug("[%#lx] mfmc0_op: DI\n",
1368                                           vcpu->arch.pc);
1369                                 kvm_clear_c0_guest_status(cop0, ST0_IE);
1370                         }
1371
1372                         break;
1373
1374                 case wrpgpr_op:
1375                         {
1376                                 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1377                                 u32 pss =
1378                                     (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1379                                 /*
1380                                  * We don't support any shadow register sets, so
1381                                  * SRSCtl[PSS] == SRSCtl[CSS] = 0
1382                                  */
1383                                 if (css || pss) {
1384                                         er = EMULATE_FAIL;
1385                                         break;
1386                                 }
1387                                 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1388                                           vcpu->arch.gprs[rt]);
1389                                 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1390                         }
1391                         break;
1392                 default:
1393                         kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1394                                 vcpu->arch.pc, inst.c0r_format.rs);
1395                         er = EMULATE_FAIL;
1396                         break;
1397                 }
1398         }
1399
1400 done:
1401         /* Rollback PC only if emulation was unsuccessful */
1402         if (er == EMULATE_FAIL)
1403                 vcpu->arch.pc = curr_pc;
1404
1405 dont_update_pc:
1406         /*
1407          * This is for special instructions whose emulation
1408          * updates the PC, so do not overwrite the PC under
1409          * any circumstances
1410          */
1411
1412         return er;
1413 }
1414
1415 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1416                                              u32 cause,
1417                                              struct kvm_run *run,
1418                                              struct kvm_vcpu *vcpu)
1419 {
1420         enum emulation_result er = EMULATE_DO_MMIO;
1421         u32 rt;
1422         u32 bytes;
1423         void *data = run->mmio.data;
1424         unsigned long curr_pc;
1425
1426         /*
1427          * Update PC and hold onto current PC in case there is
1428          * an error and we want to rollback the PC
1429          */
1430         curr_pc = vcpu->arch.pc;
1431         er = update_pc(vcpu, cause);
1432         if (er == EMULATE_FAIL)
1433                 return er;
1434
1435         rt = inst.i_format.rt;
1436
1437         switch (inst.i_format.opcode) {
1438         case sb_op:
1439                 bytes = 1;
1440                 if (bytes > sizeof(run->mmio.data)) {
1441                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1442                                run->mmio.len);
1443                 }
1444                 run->mmio.phys_addr =
1445                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1446                                                    host_cp0_badvaddr);
1447                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1448                         er = EMULATE_FAIL;
1449                         break;
1450                 }
1451                 run->mmio.len = bytes;
1452                 run->mmio.is_write = 1;
1453                 vcpu->mmio_needed = 1;
1454                 vcpu->mmio_is_write = 1;
1455                 *(u8 *) data = vcpu->arch.gprs[rt];
1456                 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1457                           vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1458                           *(u8 *) data);
1459
1460                 break;
1461
1462         case sw_op:
1463                 bytes = 4;
1464                 if (bytes > sizeof(run->mmio.data)) {
1465                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1466                                run->mmio.len);
1467                 }
1468                 run->mmio.phys_addr =
1469                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1470                                                    host_cp0_badvaddr);
1471                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1472                         er = EMULATE_FAIL;
1473                         break;
1474                 }
1475
1476                 run->mmio.len = bytes;
1477                 run->mmio.is_write = 1;
1478                 vcpu->mmio_needed = 1;
1479                 vcpu->mmio_is_write = 1;
1480                 *(u32 *) data = vcpu->arch.gprs[rt];
1481
1482                 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1483                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1484                           vcpu->arch.gprs[rt], *(u32 *) data);
1485                 break;
1486
1487         case sh_op:
1488                 bytes = 2;
1489                 if (bytes > sizeof(run->mmio.data)) {
1490                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1491                                run->mmio.len);
1492                 }
1493                 run->mmio.phys_addr =
1494                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1495                                                    host_cp0_badvaddr);
1496                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1497                         er = EMULATE_FAIL;
1498                         break;
1499                 }
1500
1501                 run->mmio.len = bytes;
1502                 run->mmio.is_write = 1;
1503                 vcpu->mmio_needed = 1;
1504                 vcpu->mmio_is_write = 1;
1505                 *(u16 *) data = vcpu->arch.gprs[rt];
1506
1507                 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1508                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1509                           vcpu->arch.gprs[rt], *(u32 *) data);
1510                 break;
1511
1512         default:
1513                 kvm_err("Store not yet supported (inst=0x%08x)\n",
1514                         inst.word);
1515                 er = EMULATE_FAIL;
1516                 break;
1517         }
1518
1519         /* Rollback PC if emulation was unsuccessful */
1520         if (er == EMULATE_FAIL)
1521                 vcpu->arch.pc = curr_pc;
1522
1523         return er;
1524 }
1525
1526 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1527                                             u32 cause, struct kvm_run *run,
1528                                             struct kvm_vcpu *vcpu)
1529 {
1530         enum emulation_result er = EMULATE_DO_MMIO;
1531         unsigned long curr_pc;
1532         u32 op, rt;
1533         u32 bytes;
1534
1535         rt = inst.i_format.rt;
1536         op = inst.i_format.opcode;
1537
1538         /*
1539          * Find the resume PC now while we have safe and easy access to the
1540          * prior branch instruction, and save it for
1541          * kvm_mips_complete_mmio_load() to restore later.
1542          */
1543         curr_pc = vcpu->arch.pc;
1544         er = update_pc(vcpu, cause);
1545         if (er == EMULATE_FAIL)
1546                 return er;
1547         vcpu->arch.io_pc = vcpu->arch.pc;
1548         vcpu->arch.pc = curr_pc;
1549
1550         vcpu->arch.io_gpr = rt;
1551
1552         switch (op) {
1553         case lw_op:
1554                 bytes = 4;
1555                 if (bytes > sizeof(run->mmio.data)) {
1556                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1557                                run->mmio.len);
1558                         er = EMULATE_FAIL;
1559                         break;
1560                 }
1561                 run->mmio.phys_addr =
1562                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1563                                                    host_cp0_badvaddr);
1564                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1565                         er = EMULATE_FAIL;
1566                         break;
1567                 }
1568
1569                 run->mmio.len = bytes;
1570                 run->mmio.is_write = 0;
1571                 vcpu->mmio_needed = 1;
1572                 vcpu->mmio_is_write = 0;
1573                 break;
1574
1575         case lh_op:
1576         case lhu_op:
1577                 bytes = 2;
1578                 if (bytes > sizeof(run->mmio.data)) {
1579                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1580                                run->mmio.len);
1581                         er = EMULATE_FAIL;
1582                         break;
1583                 }
1584                 run->mmio.phys_addr =
1585                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1586                                                    host_cp0_badvaddr);
1587                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1588                         er = EMULATE_FAIL;
1589                         break;
1590                 }
1591
1592                 run->mmio.len = bytes;
1593                 run->mmio.is_write = 0;
1594                 vcpu->mmio_needed = 1;
1595                 vcpu->mmio_is_write = 0;
1596
1597                 if (op == lh_op)
1598                         vcpu->mmio_needed = 2;
1599                 else
1600                         vcpu->mmio_needed = 1;
1601
1602                 break;
1603
1604         case lbu_op:
1605         case lb_op:
1606                 bytes = 1;
1607                 if (bytes > sizeof(run->mmio.data)) {
1608                         kvm_err("%s: bad MMIO length: %d\n", __func__,
1609                                run->mmio.len);
1610                         er = EMULATE_FAIL;
1611                         break;
1612                 }
1613                 run->mmio.phys_addr =
1614                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1615                                                    host_cp0_badvaddr);
1616                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1617                         er = EMULATE_FAIL;
1618                         break;
1619                 }
1620
1621                 run->mmio.len = bytes;
1622                 run->mmio.is_write = 0;
1623                 vcpu->mmio_is_write = 0;
1624
1625                 if (op == lb_op)
1626                         vcpu->mmio_needed = 2;
1627                 else
1628                         vcpu->mmio_needed = 1;
1629
1630                 break;
1631
1632         default:
1633                 kvm_err("Load not yet supported (inst=0x%08x)\n",
1634                         inst.word);
1635                 er = EMULATE_FAIL;
1636                 break;
1637         }
1638
1639         return er;
1640 }
1641
1642 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1643                                              u32 *opc, u32 cause,
1644                                              struct kvm_run *run,
1645                                              struct kvm_vcpu *vcpu)
1646 {
1647         struct mips_coproc *cop0 = vcpu->arch.cop0;
1648         enum emulation_result er = EMULATE_DONE;
1649         u32 cache, op_inst, op, base;
1650         s16 offset;
1651         struct kvm_vcpu_arch *arch = &vcpu->arch;
1652         unsigned long va;
1653         unsigned long curr_pc;
1654
1655         /*
1656          * Update PC and hold onto current PC in case there is
1657          * an error and we want to rollback the PC
1658          */
1659         curr_pc = vcpu->arch.pc;
1660         er = update_pc(vcpu, cause);
1661         if (er == EMULATE_FAIL)
1662                 return er;
1663
1664         base = inst.i_format.rs;
1665         op_inst = inst.i_format.rt;
1666         if (cpu_has_mips_r6)
1667                 offset = inst.spec3_format.simmediate;
1668         else
1669                 offset = inst.i_format.simmediate;
1670         cache = op_inst & CacheOp_Cache;
1671         op = op_inst & CacheOp_Op;
1672
1673         va = arch->gprs[base] + offset;
1674
1675         kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1676                   cache, op, base, arch->gprs[base], offset);
1677
1678         /*
1679          * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1680          * invalidate the caches entirely by stepping through all the
1681          * ways/indexes
1682          */
1683         if (op == Index_Writeback_Inv) {
1684                 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1685                           vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1686                           arch->gprs[base], offset);
1687
1688                 if (cache == Cache_D)
1689                         r4k_blast_dcache();
1690                 else if (cache == Cache_I)
1691                         r4k_blast_icache();
1692                 else {
1693                         kvm_err("%s: unsupported CACHE INDEX operation\n",
1694                                 __func__);
1695                         return EMULATE_FAIL;
1696                 }
1697
1698 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1699                 kvm_mips_trans_cache_index(inst, opc, vcpu);
1700 #endif
1701                 goto done;
1702         }
1703
1704         preempt_disable();
1705         if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1706                 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1707                     kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1708                         kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1709                                 __func__, va, vcpu, read_c0_entryhi());
1710                         er = EMULATE_FAIL;
1711                         preempt_enable();
1712                         goto done;
1713                 }
1714         } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1715                    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1716                 int index;
1717
1718                 /* If an entry already exists then skip */
1719                 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1720                         goto skip_fault;
1721
1722                 /*
1723                  * If address not in the guest TLB, then give the guest a fault,
1724                  * the resulting handler will do the right thing
1725                  */
1726                 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1727                                                   (kvm_read_c0_guest_entryhi
1728                                                    (cop0) & KVM_ENTRYHI_ASID));
1729
1730                 if (index < 0) {
1731                         vcpu->arch.host_cp0_badvaddr = va;
1732                         vcpu->arch.pc = curr_pc;
1733                         er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1734                                                          vcpu);
1735                         preempt_enable();
1736                         goto dont_update_pc;
1737                 } else {
1738                         struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1739                         /*
1740                          * Check if the entry is valid, if not then setup a TLB
1741                          * invalid exception to the guest
1742                          */
1743                         if (!TLB_IS_VALID(*tlb, va)) {
1744                                 vcpu->arch.host_cp0_badvaddr = va;
1745                                 vcpu->arch.pc = curr_pc;
1746                                 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1747                                                                 run, vcpu);
1748                                 preempt_enable();
1749                                 goto dont_update_pc;
1750                         }
1751                         /*
1752                          * We fault an entry from the guest tlb to the
1753                          * shadow host TLB
1754                          */
1755                         if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
1756                                 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1757                                         __func__, va, index, vcpu,
1758                                         read_c0_entryhi());
1759                                 er = EMULATE_FAIL;
1760                                 preempt_enable();
1761                                 goto done;
1762                         }
1763                 }
1764         } else {
1765                 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1766                         cache, op, base, arch->gprs[base], offset);
1767                 er = EMULATE_FAIL;
1768                 preempt_enable();
1769                 goto done;
1770
1771         }
1772
1773 skip_fault:
1774         /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1775         if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1776                 flush_dcache_line(va);
1777
1778 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1779                 /*
1780                  * Replace the CACHE instruction, with a SYNCI, not the same,
1781                  * but avoids a trap
1782                  */
1783                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1784 #endif
1785         } else if (op_inst == Hit_Invalidate_I) {
1786                 flush_dcache_line(va);
1787                 flush_icache_line(va);
1788
1789 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1790                 /* Replace the CACHE instruction, with a SYNCI */
1791                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1792 #endif
1793         } else {
1794                 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1795                         cache, op, base, arch->gprs[base], offset);
1796                 er = EMULATE_FAIL;
1797         }
1798
1799         preempt_enable();
1800 done:
1801         /* Rollback PC only if emulation was unsuccessful */
1802         if (er == EMULATE_FAIL)
1803                 vcpu->arch.pc = curr_pc;
1804
1805 dont_update_pc:
1806         /*
1807          * This is for exceptions whose emulation updates the PC, so do not
1808          * overwrite the PC under any circumstances
1809          */
1810
1811         return er;
1812 }
1813
1814 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1815                                             struct kvm_run *run,
1816                                             struct kvm_vcpu *vcpu)
1817 {
1818         union mips_instruction inst;
1819         enum emulation_result er = EMULATE_DONE;
1820
1821         /* Fetch the instruction. */
1822         if (cause & CAUSEF_BD)
1823                 opc += 1;
1824
1825         inst.word = kvm_get_inst(opc, vcpu);
1826
1827         switch (inst.r_format.opcode) {
1828         case cop0_op:
1829                 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1830                 break;
1831         case sb_op:
1832         case sh_op:
1833         case sw_op:
1834                 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1835                 break;
1836         case lb_op:
1837         case lbu_op:
1838         case lhu_op:
1839         case lh_op:
1840         case lw_op:
1841                 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1842                 break;
1843
1844 #ifndef CONFIG_CPU_MIPSR6
1845         case cache_op:
1846                 ++vcpu->stat.cache_exits;
1847                 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1848                 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1849                 break;
1850 #else
1851         case spec3_op:
1852                 switch (inst.spec3_format.func) {
1853                 case cache6_op:
1854                         ++vcpu->stat.cache_exits;
1855                         trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1856                         er = kvm_mips_emulate_cache(inst, opc, cause, run,
1857                                                     vcpu);
1858                         break;
1859                 default:
1860                         goto unknown;
1861                 };
1862                 break;
1863 unknown:
1864 #endif
1865
1866         default:
1867                 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1868                         inst.word);
1869                 kvm_arch_vcpu_dump_regs(vcpu);
1870                 er = EMULATE_FAIL;
1871                 break;
1872         }
1873
1874         return er;
1875 }
1876
1877 enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1878                                                u32 *opc,
1879                                                struct kvm_run *run,
1880                                                struct kvm_vcpu *vcpu)
1881 {
1882         struct mips_coproc *cop0 = vcpu->arch.cop0;
1883         struct kvm_vcpu_arch *arch = &vcpu->arch;
1884         enum emulation_result er = EMULATE_DONE;
1885
1886         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1887                 /* save old pc */
1888                 kvm_write_c0_guest_epc(cop0, arch->pc);
1889                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1890
1891                 if (cause & CAUSEF_BD)
1892                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1893                 else
1894                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1895
1896                 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1897
1898                 kvm_change_c0_guest_cause(cop0, (0xff),
1899                                           (EXCCODE_SYS << CAUSEB_EXCCODE));
1900
1901                 /* Set PC to the exception entry point */
1902                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1903
1904         } else {
1905                 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1906                 er = EMULATE_FAIL;
1907         }
1908
1909         return er;
1910 }
1911
1912 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1913                                                   u32 *opc,
1914                                                   struct kvm_run *run,
1915                                                   struct kvm_vcpu *vcpu)
1916 {
1917         struct mips_coproc *cop0 = vcpu->arch.cop0;
1918         struct kvm_vcpu_arch *arch = &vcpu->arch;
1919         unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
1920                         (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1921
1922         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1923                 /* save old pc */
1924                 kvm_write_c0_guest_epc(cop0, arch->pc);
1925                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1926
1927                 if (cause & CAUSEF_BD)
1928                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1929                 else
1930                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1931
1932                 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1933                           arch->pc);
1934
1935                 /* set pc to the exception entry point */
1936                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1937
1938         } else {
1939                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1940                           arch->pc);
1941
1942                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1943         }
1944
1945         kvm_change_c0_guest_cause(cop0, (0xff),
1946                                   (EXCCODE_TLBL << CAUSEB_EXCCODE));
1947
1948         /* setup badvaddr, context and entryhi registers for the guest */
1949         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1950         /* XXXKYMA: is the context register used by linux??? */
1951         kvm_write_c0_guest_entryhi(cop0, entryhi);
1952         /* Blow away the shadow host TLBs */
1953         kvm_mips_flush_host_tlb(1);
1954
1955         return EMULATE_DONE;
1956 }
1957
1958 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1959                                                  u32 *opc,
1960                                                  struct kvm_run *run,
1961                                                  struct kvm_vcpu *vcpu)
1962 {
1963         struct mips_coproc *cop0 = vcpu->arch.cop0;
1964         struct kvm_vcpu_arch *arch = &vcpu->arch;
1965         unsigned long entryhi =
1966                 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1967                 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1968
1969         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1970                 /* save old pc */
1971                 kvm_write_c0_guest_epc(cop0, arch->pc);
1972                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1973
1974                 if (cause & CAUSEF_BD)
1975                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1976                 else
1977                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1978
1979                 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1980                           arch->pc);
1981
1982                 /* set pc to the exception entry point */
1983                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1984
1985         } else {
1986                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1987                           arch->pc);
1988                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1989         }
1990
1991         kvm_change_c0_guest_cause(cop0, (0xff),
1992                                   (EXCCODE_TLBL << CAUSEB_EXCCODE));
1993
1994         /* setup badvaddr, context and entryhi registers for the guest */
1995         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1996         /* XXXKYMA: is the context register used by linux??? */
1997         kvm_write_c0_guest_entryhi(cop0, entryhi);
1998         /* Blow away the shadow host TLBs */
1999         kvm_mips_flush_host_tlb(1);
2000
2001         return EMULATE_DONE;
2002 }
2003
2004 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
2005                                                   u32 *opc,
2006                                                   struct kvm_run *run,
2007                                                   struct kvm_vcpu *vcpu)
2008 {
2009         struct mips_coproc *cop0 = vcpu->arch.cop0;
2010         struct kvm_vcpu_arch *arch = &vcpu->arch;
2011         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2012                         (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2013
2014         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2015                 /* save old pc */
2016                 kvm_write_c0_guest_epc(cop0, arch->pc);
2017                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2018
2019                 if (cause & CAUSEF_BD)
2020                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2021                 else
2022                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2023
2024                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2025                           arch->pc);
2026
2027                 /* Set PC to the exception entry point */
2028                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
2029         } else {
2030                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2031                           arch->pc);
2032                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2033         }
2034
2035         kvm_change_c0_guest_cause(cop0, (0xff),
2036                                   (EXCCODE_TLBS << CAUSEB_EXCCODE));
2037
2038         /* setup badvaddr, context and entryhi registers for the guest */
2039         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2040         /* XXXKYMA: is the context register used by linux??? */
2041         kvm_write_c0_guest_entryhi(cop0, entryhi);
2042         /* Blow away the shadow host TLBs */
2043         kvm_mips_flush_host_tlb(1);
2044
2045         return EMULATE_DONE;
2046 }
2047
2048 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
2049                                                  u32 *opc,
2050                                                  struct kvm_run *run,
2051                                                  struct kvm_vcpu *vcpu)
2052 {
2053         struct mips_coproc *cop0 = vcpu->arch.cop0;
2054         struct kvm_vcpu_arch *arch = &vcpu->arch;
2055         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2056                 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2057
2058         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2059                 /* save old pc */
2060                 kvm_write_c0_guest_epc(cop0, arch->pc);
2061                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2062
2063                 if (cause & CAUSEF_BD)
2064                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2065                 else
2066                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2067
2068                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2069                           arch->pc);
2070
2071                 /* Set PC to the exception entry point */
2072                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2073         } else {
2074                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2075                           arch->pc);
2076                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2077         }
2078
2079         kvm_change_c0_guest_cause(cop0, (0xff),
2080                                   (EXCCODE_TLBS << CAUSEB_EXCCODE));
2081
2082         /* setup badvaddr, context and entryhi registers for the guest */
2083         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2084         /* XXXKYMA: is the context register used by linux??? */
2085         kvm_write_c0_guest_entryhi(cop0, entryhi);
2086         /* Blow away the shadow host TLBs */
2087         kvm_mips_flush_host_tlb(1);
2088
2089         return EMULATE_DONE;
2090 }
2091
2092 /* TLBMOD: store into address matching TLB with Dirty bit off */
2093 enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
2094                                              struct kvm_run *run,
2095                                              struct kvm_vcpu *vcpu)
2096 {
2097         enum emulation_result er = EMULATE_DONE;
2098 #ifdef DEBUG
2099         struct mips_coproc *cop0 = vcpu->arch.cop0;
2100         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2101                         (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2102         int index;
2103
2104         /* If address not in the guest TLB, then we are in trouble */
2105         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2106         if (index < 0) {
2107                 /* XXXKYMA Invalidate and retry */
2108                 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
2109                 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2110                      __func__, entryhi);
2111                 kvm_mips_dump_guest_tlbs(vcpu);
2112                 kvm_mips_dump_host_tlbs();
2113                 return EMULATE_FAIL;
2114         }
2115 #endif
2116
2117         er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2118         return er;
2119 }
2120
2121 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2122                                               u32 *opc,
2123                                               struct kvm_run *run,
2124                                               struct kvm_vcpu *vcpu)
2125 {
2126         struct mips_coproc *cop0 = vcpu->arch.cop0;
2127         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2128                         (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2129         struct kvm_vcpu_arch *arch = &vcpu->arch;
2130
2131         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2132                 /* save old pc */
2133                 kvm_write_c0_guest_epc(cop0, arch->pc);
2134                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2135
2136                 if (cause & CAUSEF_BD)
2137                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2138                 else
2139                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2140
2141                 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2142                           arch->pc);
2143
2144                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2145         } else {
2146                 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2147                           arch->pc);
2148                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2149         }
2150
2151         kvm_change_c0_guest_cause(cop0, (0xff),
2152                                   (EXCCODE_MOD << CAUSEB_EXCCODE));
2153
2154         /* setup badvaddr, context and entryhi registers for the guest */
2155         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2156         /* XXXKYMA: is the context register used by linux??? */
2157         kvm_write_c0_guest_entryhi(cop0, entryhi);
2158         /* Blow away the shadow host TLBs */
2159         kvm_mips_flush_host_tlb(1);
2160
2161         return EMULATE_DONE;
2162 }
2163
2164 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2165                                                u32 *opc,
2166                                                struct kvm_run *run,
2167                                                struct kvm_vcpu *vcpu)
2168 {
2169         struct mips_coproc *cop0 = vcpu->arch.cop0;
2170         struct kvm_vcpu_arch *arch = &vcpu->arch;
2171
2172         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2173                 /* save old pc */
2174                 kvm_write_c0_guest_epc(cop0, arch->pc);
2175                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2176
2177                 if (cause & CAUSEF_BD)
2178                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2179                 else
2180                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2181
2182         }
2183
2184         arch->pc = KVM_GUEST_KSEG0 + 0x180;
2185
2186         kvm_change_c0_guest_cause(cop0, (0xff),
2187                                   (EXCCODE_CPU << CAUSEB_EXCCODE));
2188         kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2189
2190         return EMULATE_DONE;
2191 }
2192
2193 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2194                                               u32 *opc,
2195                                               struct kvm_run *run,
2196                                               struct kvm_vcpu *vcpu)
2197 {
2198         struct mips_coproc *cop0 = vcpu->arch.cop0;
2199         struct kvm_vcpu_arch *arch = &vcpu->arch;
2200         enum emulation_result er = EMULATE_DONE;
2201
2202         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2203                 /* save old pc */
2204                 kvm_write_c0_guest_epc(cop0, arch->pc);
2205                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2206
2207                 if (cause & CAUSEF_BD)
2208                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2209                 else
2210                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2211
2212                 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2213
2214                 kvm_change_c0_guest_cause(cop0, (0xff),
2215                                           (EXCCODE_RI << CAUSEB_EXCCODE));
2216
2217                 /* Set PC to the exception entry point */
2218                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2219
2220         } else {
2221                 kvm_err("Trying to deliver RI when EXL is already set\n");
2222                 er = EMULATE_FAIL;
2223         }
2224
2225         return er;
2226 }
2227
2228 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2229                                               u32 *opc,
2230                                               struct kvm_run *run,
2231                                               struct kvm_vcpu *vcpu)
2232 {
2233         struct mips_coproc *cop0 = vcpu->arch.cop0;
2234         struct kvm_vcpu_arch *arch = &vcpu->arch;
2235         enum emulation_result er = EMULATE_DONE;
2236
2237         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2238                 /* save old pc */
2239                 kvm_write_c0_guest_epc(cop0, arch->pc);
2240                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2241
2242                 if (cause & CAUSEF_BD)
2243                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2244                 else
2245                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2246
2247                 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2248
2249                 kvm_change_c0_guest_cause(cop0, (0xff),
2250                                           (EXCCODE_BP << CAUSEB_EXCCODE));
2251
2252                 /* Set PC to the exception entry point */
2253                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2254
2255         } else {
2256                 kvm_err("Trying to deliver BP when EXL is already set\n");
2257                 er = EMULATE_FAIL;
2258         }
2259
2260         return er;
2261 }
2262
2263 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2264                                                 u32 *opc,
2265                                                 struct kvm_run *run,
2266                                                 struct kvm_vcpu *vcpu)
2267 {
2268         struct mips_coproc *cop0 = vcpu->arch.cop0;
2269         struct kvm_vcpu_arch *arch = &vcpu->arch;
2270         enum emulation_result er = EMULATE_DONE;
2271
2272         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2273                 /* save old pc */
2274                 kvm_write_c0_guest_epc(cop0, arch->pc);
2275                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2276
2277                 if (cause & CAUSEF_BD)
2278                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2279                 else
2280                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2281
2282                 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2283
2284                 kvm_change_c0_guest_cause(cop0, (0xff),
2285                                           (EXCCODE_TR << CAUSEB_EXCCODE));
2286
2287                 /* Set PC to the exception entry point */
2288                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2289
2290         } else {
2291                 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2292                 er = EMULATE_FAIL;
2293         }
2294
2295         return er;
2296 }
2297
2298 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2299                                                   u32 *opc,
2300                                                   struct kvm_run *run,
2301                                                   struct kvm_vcpu *vcpu)
2302 {
2303         struct mips_coproc *cop0 = vcpu->arch.cop0;
2304         struct kvm_vcpu_arch *arch = &vcpu->arch;
2305         enum emulation_result er = EMULATE_DONE;
2306
2307         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2308                 /* save old pc */
2309                 kvm_write_c0_guest_epc(cop0, arch->pc);
2310                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2311
2312                 if (cause & CAUSEF_BD)
2313                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2314                 else
2315                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2316
2317                 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2318
2319                 kvm_change_c0_guest_cause(cop0, (0xff),
2320                                           (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2321
2322                 /* Set PC to the exception entry point */
2323                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2324
2325         } else {
2326                 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2327                 er = EMULATE_FAIL;
2328         }
2329
2330         return er;
2331 }
2332
2333 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2334                                                u32 *opc,
2335                                                struct kvm_run *run,
2336                                                struct kvm_vcpu *vcpu)
2337 {
2338         struct mips_coproc *cop0 = vcpu->arch.cop0;
2339         struct kvm_vcpu_arch *arch = &vcpu->arch;
2340         enum emulation_result er = EMULATE_DONE;
2341
2342         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2343                 /* save old pc */
2344                 kvm_write_c0_guest_epc(cop0, arch->pc);
2345                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2346
2347                 if (cause & CAUSEF_BD)
2348                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2349                 else
2350                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2351
2352                 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2353
2354                 kvm_change_c0_guest_cause(cop0, (0xff),
2355                                           (EXCCODE_FPE << CAUSEB_EXCCODE));
2356
2357                 /* Set PC to the exception entry point */
2358                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2359
2360         } else {
2361                 kvm_err("Trying to deliver FPE when EXL is already set\n");
2362                 er = EMULATE_FAIL;
2363         }
2364
2365         return er;
2366 }
2367
2368 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2369                                                   u32 *opc,
2370                                                   struct kvm_run *run,
2371                                                   struct kvm_vcpu *vcpu)
2372 {
2373         struct mips_coproc *cop0 = vcpu->arch.cop0;
2374         struct kvm_vcpu_arch *arch = &vcpu->arch;
2375         enum emulation_result er = EMULATE_DONE;
2376
2377         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2378                 /* save old pc */
2379                 kvm_write_c0_guest_epc(cop0, arch->pc);
2380                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2381
2382                 if (cause & CAUSEF_BD)
2383                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2384                 else
2385                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2386
2387                 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2388
2389                 kvm_change_c0_guest_cause(cop0, (0xff),
2390                                           (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2391
2392                 /* Set PC to the exception entry point */
2393                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2394
2395         } else {
2396                 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2397                 er = EMULATE_FAIL;
2398         }
2399
2400         return er;
2401 }
2402
2403 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2404                                          struct kvm_run *run,
2405                                          struct kvm_vcpu *vcpu)
2406 {
2407         struct mips_coproc *cop0 = vcpu->arch.cop0;
2408         struct kvm_vcpu_arch *arch = &vcpu->arch;
2409         enum emulation_result er = EMULATE_DONE;
2410         unsigned long curr_pc;
2411         union mips_instruction inst;
2412
2413         /*
2414          * Update PC and hold onto current PC in case there is
2415          * an error and we want to rollback the PC
2416          */
2417         curr_pc = vcpu->arch.pc;
2418         er = update_pc(vcpu, cause);
2419         if (er == EMULATE_FAIL)
2420                 return er;
2421
2422         /* Fetch the instruction. */
2423         if (cause & CAUSEF_BD)
2424                 opc += 1;
2425
2426         inst.word = kvm_get_inst(opc, vcpu);
2427
2428         if (inst.word == KVM_INVALID_INST) {
2429                 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2430                 return EMULATE_FAIL;
2431         }
2432
2433         if (inst.r_format.opcode == spec3_op &&
2434             inst.r_format.func == rdhwr_op &&
2435             inst.r_format.rs == 0 &&
2436             (inst.r_format.re >> 3) == 0) {
2437                 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2438                 int rd = inst.r_format.rd;
2439                 int rt = inst.r_format.rt;
2440                 int sel = inst.r_format.re & 0x7;
2441
2442                 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2443                 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2444                         kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2445                                   rd, opc);
2446                         goto emulate_ri;
2447                 }
2448                 switch (rd) {
2449                 case MIPS_HWR_CPUNUM:           /* CPU number */
2450                         arch->gprs[rt] = vcpu->vcpu_id;
2451                         break;
2452                 case MIPS_HWR_SYNCISTEP:        /* SYNCI length */
2453                         arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2454                                              current_cpu_data.icache.linesz);
2455                         break;
2456                 case MIPS_HWR_CC:               /* Read count register */
2457                         arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2458                         break;
2459                 case MIPS_HWR_CCRES:            /* Count register resolution */
2460                         switch (current_cpu_data.cputype) {
2461                         case CPU_20KC:
2462                         case CPU_25KF:
2463                                 arch->gprs[rt] = 1;
2464                                 break;
2465                         default:
2466                                 arch->gprs[rt] = 2;
2467                         }
2468                         break;
2469                 case MIPS_HWR_ULR:              /* Read UserLocal register */
2470                         arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2471                         break;
2472
2473                 default:
2474                         kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2475                         goto emulate_ri;
2476                 }
2477
2478                 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2479                               vcpu->arch.gprs[rt]);
2480         } else {
2481                 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2482                           opc, inst.word);
2483                 goto emulate_ri;
2484         }
2485
2486         return EMULATE_DONE;
2487
2488 emulate_ri:
2489         /*
2490          * Rollback PC (if in branch delay slot then the PC already points to
2491          * branch target), and pass the RI exception to the guest OS.
2492          */
2493         vcpu->arch.pc = curr_pc;
2494         return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2495 }
2496
2497 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2498                                                   struct kvm_run *run)
2499 {
2500         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2501         enum emulation_result er = EMULATE_DONE;
2502
2503         if (run->mmio.len > sizeof(*gpr)) {
2504                 kvm_err("Bad MMIO length: %d", run->mmio.len);
2505                 er = EMULATE_FAIL;
2506                 goto done;
2507         }
2508
2509         /* Restore saved resume PC */
2510         vcpu->arch.pc = vcpu->arch.io_pc;
2511
2512         switch (run->mmio.len) {
2513         case 4:
2514                 *gpr = *(s32 *) run->mmio.data;
2515                 break;
2516
2517         case 2:
2518                 if (vcpu->mmio_needed == 2)
2519                         *gpr = *(s16 *) run->mmio.data;
2520                 else
2521                         *gpr = *(u16 *)run->mmio.data;
2522
2523                 break;
2524         case 1:
2525                 if (vcpu->mmio_needed == 2)
2526                         *gpr = *(s8 *) run->mmio.data;
2527                 else
2528                         *gpr = *(u8 *) run->mmio.data;
2529                 break;
2530         }
2531
2532 done:
2533         return er;
2534 }
2535
2536 static enum emulation_result kvm_mips_emulate_exc(u32 cause,
2537                                                   u32 *opc,
2538                                                   struct kvm_run *run,
2539                                                   struct kvm_vcpu *vcpu)
2540 {
2541         u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2542         struct mips_coproc *cop0 = vcpu->arch.cop0;
2543         struct kvm_vcpu_arch *arch = &vcpu->arch;
2544         enum emulation_result er = EMULATE_DONE;
2545
2546         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2547                 /* save old pc */
2548                 kvm_write_c0_guest_epc(cop0, arch->pc);
2549                 kvm_set_c0_guest_status(cop0, ST0_EXL);
2550
2551                 if (cause & CAUSEF_BD)
2552                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2553                 else
2554                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2555
2556                 kvm_change_c0_guest_cause(cop0, (0xff),
2557                                           (exccode << CAUSEB_EXCCODE));
2558
2559                 /* Set PC to the exception entry point */
2560                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2561                 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2562
2563                 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2564                           exccode, kvm_read_c0_guest_epc(cop0),
2565                           kvm_read_c0_guest_badvaddr(cop0));
2566         } else {
2567                 kvm_err("Trying to deliver EXC when EXL is already set\n");
2568                 er = EMULATE_FAIL;
2569         }
2570
2571         return er;
2572 }
2573
2574 enum emulation_result kvm_mips_check_privilege(u32 cause,
2575                                                u32 *opc,
2576                                                struct kvm_run *run,
2577                                                struct kvm_vcpu *vcpu)
2578 {
2579         enum emulation_result er = EMULATE_DONE;
2580         u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2581         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2582
2583         int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2584
2585         if (usermode) {
2586                 switch (exccode) {
2587                 case EXCCODE_INT:
2588                 case EXCCODE_SYS:
2589                 case EXCCODE_BP:
2590                 case EXCCODE_RI:
2591                 case EXCCODE_TR:
2592                 case EXCCODE_MSAFPE:
2593                 case EXCCODE_FPE:
2594                 case EXCCODE_MSADIS:
2595                         break;
2596
2597                 case EXCCODE_CPU:
2598                         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2599                                 er = EMULATE_PRIV_FAIL;
2600                         break;
2601
2602                 case EXCCODE_MOD:
2603                         break;
2604
2605                 case EXCCODE_TLBL:
2606                         /*
2607                          * We we are accessing Guest kernel space, then send an
2608                          * address error exception to the guest
2609                          */
2610                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2611                                 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2612                                           badvaddr);
2613                                 cause &= ~0xff;
2614                                 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2615                                 er = EMULATE_PRIV_FAIL;
2616                         }
2617                         break;
2618
2619                 case EXCCODE_TLBS:
2620                         /*
2621                          * We we are accessing Guest kernel space, then send an
2622                          * address error exception to the guest
2623                          */
2624                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2625                                 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2626                                           badvaddr);
2627                                 cause &= ~0xff;
2628                                 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2629                                 er = EMULATE_PRIV_FAIL;
2630                         }
2631                         break;
2632
2633                 case EXCCODE_ADES:
2634                         kvm_debug("%s: address error ST @ %#lx\n", __func__,
2635                                   badvaddr);
2636                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2637                                 cause &= ~0xff;
2638                                 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2639                         }
2640                         er = EMULATE_PRIV_FAIL;
2641                         break;
2642                 case EXCCODE_ADEL:
2643                         kvm_debug("%s: address error LD @ %#lx\n", __func__,
2644                                   badvaddr);
2645                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2646                                 cause &= ~0xff;
2647                                 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2648                         }
2649                         er = EMULATE_PRIV_FAIL;
2650                         break;
2651                 default:
2652                         er = EMULATE_PRIV_FAIL;
2653                         break;
2654                 }
2655         }
2656
2657         if (er == EMULATE_PRIV_FAIL)
2658                 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2659
2660         return er;
2661 }
2662
2663 /*
2664  * User Address (UA) fault, this could happen if
2665  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2666  *     case we pass on the fault to the guest kernel and let it handle it.
2667  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2668  *     case we inject the TLB from the Guest TLB into the shadow host TLB
2669  */
2670 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2671                                               u32 *opc,
2672                                               struct kvm_run *run,
2673                                               struct kvm_vcpu *vcpu)
2674 {
2675         enum emulation_result er = EMULATE_DONE;
2676         u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2677         unsigned long va = vcpu->arch.host_cp0_badvaddr;
2678         int index;
2679
2680         kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2681                   vcpu->arch.host_cp0_badvaddr);
2682
2683         /*
2684          * KVM would not have got the exception if this entry was valid in the
2685          * shadow host TLB. Check the Guest TLB, if the entry is not there then
2686          * send the guest an exception. The guest exc handler should then inject
2687          * an entry into the guest TLB.
2688          */
2689         index = kvm_mips_guest_tlb_lookup(vcpu,
2690                       (va & VPN2_MASK) |
2691                       (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2692                        KVM_ENTRYHI_ASID));
2693         if (index < 0) {
2694                 if (exccode == EXCCODE_TLBL) {
2695                         er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2696                 } else if (exccode == EXCCODE_TLBS) {
2697                         er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2698                 } else {
2699                         kvm_err("%s: invalid exc code: %d\n", __func__,
2700                                 exccode);
2701                         er = EMULATE_FAIL;
2702                 }
2703         } else {
2704                 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2705
2706                 /*
2707                  * Check if the entry is valid, if not then setup a TLB invalid
2708                  * exception to the guest
2709                  */
2710                 if (!TLB_IS_VALID(*tlb, va)) {
2711                         if (exccode == EXCCODE_TLBL) {
2712                                 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2713                                                                 vcpu);
2714                         } else if (exccode == EXCCODE_TLBS) {
2715                                 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2716                                                                 vcpu);
2717                         } else {
2718                                 kvm_err("%s: invalid exc code: %d\n", __func__,
2719                                         exccode);
2720                                 er = EMULATE_FAIL;
2721                         }
2722                 } else {
2723                         kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2724                                   tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
2725                         /*
2726                          * OK we have a Guest TLB entry, now inject it into the
2727                          * shadow host TLB
2728                          */
2729                         if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
2730                                 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2731                                         __func__, va, index, vcpu,
2732                                         read_c0_entryhi());
2733                                 er = EMULATE_FAIL;
2734                         }
2735                 }
2736         }
2737
2738         return er;
2739 }