2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/book3s/64/mmu-hash.h>
33 #include <asm/xive-regs.h>
35 /* Sign-extend HDEC if not on POWER9 */
36 #define EXTEND_HDEC(reg) \
39 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
41 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
43 /* Values in HSTATE_NAPPING(r13) */
44 #define NAPPING_CEDE 1
45 #define NAPPING_NOVCPU 2
47 /* Stack frame offsets for kvmppc_hv_entry */
49 #define STACK_SLOT_TRAP (SFS-4)
50 #define STACK_SLOT_TID (SFS-16)
51 #define STACK_SLOT_PSSCR (SFS-24)
52 #define STACK_SLOT_PID (SFS-32)
53 #define STACK_SLOT_IAMR (SFS-40)
54 #define STACK_SLOT_CIABR (SFS-48)
55 #define STACK_SLOT_DAWR (SFS-56)
56 #define STACK_SLOT_DAWRX (SFS-64)
59 * Call kvmppc_hv_entry in real mode.
60 * Must be called with interrupts hard-disabled.
64 * LR = return address to continue at after eventually re-enabling MMU
66 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
68 std r0, PPC_LR_STKOFF(r1)
71 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
76 mtmsrd r0,1 /* clear RI in MSR */
82 ld r4, HSTATE_KVM_VCPU(r13)
85 /* Back from guest - restore host state and return to caller */
88 /* Restore host DABR and DABRX */
89 ld r5,HSTATE_DABR(r13)
93 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
96 ld r3,PACA_SPRG_VDSO(r13)
97 mtspr SPRN_SPRG_VDSO_WRITE,r3
99 /* Reload the host's PMU registers */
100 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
101 lbz r4, LPPACA_PMCINUSE(r3)
103 beq 23f /* skip if not */
105 ld r3, HSTATE_MMCR0(r13)
106 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
109 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
110 lwz r3, HSTATE_PMC1(r13)
111 lwz r4, HSTATE_PMC2(r13)
112 lwz r5, HSTATE_PMC3(r13)
113 lwz r6, HSTATE_PMC4(r13)
114 lwz r8, HSTATE_PMC5(r13)
115 lwz r9, HSTATE_PMC6(r13)
122 ld r3, HSTATE_MMCR0(r13)
123 ld r4, HSTATE_MMCR1(r13)
124 ld r5, HSTATE_MMCRA(r13)
125 ld r6, HSTATE_SIAR(r13)
126 ld r7, HSTATE_SDAR(r13)
132 ld r8, HSTATE_MMCR2(r13)
133 ld r9, HSTATE_SIER(r13)
136 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
142 * Reload DEC. HDEC interrupts were disabled when
143 * we reloaded the host's LPCR value.
145 ld r3, HSTATE_DECEXP(r13)
150 /* hwthread_req may have got set by cede or no vcpu, so clear it */
152 stb r0, HSTATE_HWTHREAD_REQ(r13)
155 * For external and machine check interrupts, we need
156 * to call the Linux handler to process the interrupt.
157 * We do that by jumping to absolute address 0x500 for
158 * external interrupts, or the machine_check_fwnmi label
159 * for machine checks (since firmware might have patched
160 * the vector area at 0x200). The [h]rfid at the end of the
161 * handler will return to the book3s_hv_interrupts.S code.
162 * For other interrupts we do the rfid to get back
163 * to the book3s_hv_interrupts.S code here.
165 ld r8, 112+PPC_LR_STKOFF(r1)
167 ld r7, HSTATE_HOST_MSR(r13)
170 * If we came back from the guest via a relocation-on interrupt,
171 * we will be in virtual mode at this point, which makes it a
172 * little easier to get back to the caller.
175 andi. r0, r0, MSR_IR /* in real mode? */
178 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
179 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
181 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
182 beq 15f /* Invoke the H_DOORBELL handler */
183 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
184 beq cr2, 14f /* HMI check */
186 /* RFI into the highmem handler, or branch to interrupt handler */
190 mtmsrd r6, 1 /* Clear RI in MSR */
193 beq cr1, 13f /* machine check */
196 /* On POWER7, we have external interrupts set to use HSRR0/1 */
197 11: mtspr SPRN_HSRR0, r8
201 13: b machine_check_fwnmi
203 14: mtspr SPRN_HSRR0, r8
205 b hmi_exception_after_realmode
207 15: mtspr SPRN_HSRR0, r8
211 /* Virtual-mode return - can't get here for HMI or machine check */
213 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
215 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
217 andi. r0, r7, MSR_EE /* were interrupts hard-enabled? */
219 mtmsrd r7, 1 /* if so then re-enable them */
223 16: mtspr SPRN_HSRR0, r8 /* jump to reloc-on external vector */
225 b exc_virt_0x4500_hardware_interrupt
227 17: mtspr SPRN_HSRR0, r8
229 b exc_virt_0x4e80_h_doorbell
231 kvmppc_primary_no_guest:
232 /* We handle this much like a ceded vcpu */
233 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
234 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
235 /* HDEC value came from DEC in the first place, it will fit */
239 * Make sure the primary has finished the MMU switch.
240 * We should never get here on a secondary thread, but
241 * check it for robustness' sake.
243 ld r5, HSTATE_KVM_VCORE(r13)
244 65: lbz r0, VCORE_IN_GUEST(r5)
251 /* set our bit in napping_threads */
252 ld r5, HSTATE_KVM_VCORE(r13)
253 lbz r7, HSTATE_PTID(r13)
256 addi r6, r5, VCORE_NAPPING_THREADS
261 /* order napping_threads update vs testing entry_exit_map */
264 lwz r7, VCORE_ENTRY_EXIT(r5)
266 bge kvm_novcpu_exit /* another thread already exiting */
267 li r3, NAPPING_NOVCPU
268 stb r3, HSTATE_NAPPING(r13)
270 li r3, 0 /* Don't wake on privileged (OS) doorbell */
275 * Entered from kvm_start_guest if kvm_hstate.napping is set
281 ld r1, HSTATE_HOST_R1(r13)
282 ld r5, HSTATE_KVM_VCORE(r13)
284 stb r0, HSTATE_NAPPING(r13)
286 /* check the wake reason */
287 bl kvmppc_check_wake_reason
290 * Restore volatile registers since we could have called
291 * a C routine in kvmppc_check_wake_reason.
294 ld r5, HSTATE_KVM_VCORE(r13)
296 /* see if any other thread is already exiting */
297 lwz r0, VCORE_ENTRY_EXIT(r5)
301 /* clear our bit in napping_threads */
302 lbz r7, HSTATE_PTID(r13)
305 addi r6, r5, VCORE_NAPPING_THREADS
311 /* See if the wake reason means we need to exit */
315 /* See if our timeslice has expired (HDEC is negative) */
318 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
322 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
323 ld r4, HSTATE_KVM_VCPU(r13)
325 beq kvmppc_primary_no_guest
327 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
328 addi r3, r4, VCPU_TB_RMENTRY
329 bl kvmhv_start_timing
334 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
335 ld r4, HSTATE_KVM_VCPU(r13)
338 addi r3, r4, VCPU_TB_RMEXIT
339 bl kvmhv_accumulate_time
342 stw r12, STACK_SLOT_TRAP(r1)
343 bl kvmhv_commence_exit
345 lwz r12, STACK_SLOT_TRAP(r1)
346 b kvmhv_switch_to_host
349 * We come in here when wakened from nap mode.
350 * Relocation is off and most register values are lost.
351 * r13 points to the PACA.
353 .globl kvm_start_guest
356 /* Set runlatch bit the minute you wake up from nap */
363 li r0,KVM_HWTHREAD_IN_KVM
364 stb r0,HSTATE_HWTHREAD_STATE(r13)
366 /* NV GPR values from power7_idle() will no longer be valid */
368 stb r0,PACA_NAPSTATELOST(r13)
370 /* were we napping due to cede? */
371 lbz r0,HSTATE_NAPPING(r13)
372 cmpwi r0,NAPPING_CEDE
374 cmpwi r0,NAPPING_NOVCPU
375 beq kvm_novcpu_wakeup
377 ld r1,PACAEMERGSP(r13)
378 subi r1,r1,STACK_FRAME_OVERHEAD
381 * We weren't napping due to cede, so this must be a secondary
382 * thread being woken up to run a guest, or being woken up due
383 * to a stray IPI. (Or due to some machine check or hypervisor
384 * maintenance interrupt while the core is in KVM.)
387 /* Check the wake reason in SRR1 to see why we got here */
388 bl kvmppc_check_wake_reason
390 * kvmppc_check_wake_reason could invoke a C routine, but we
391 * have no volatile registers to restore when we return.
397 /* get vcore pointer, NULL if we have nothing to run */
398 ld r5,HSTATE_KVM_VCORE(r13)
400 /* if we have no vcore to run, go back to sleep */
403 kvm_secondary_got_guest:
405 /* Set HSTATE_DSCR(r13) to something sensible */
406 ld r6, PACA_DSCR_DEFAULT(r13)
407 std r6, HSTATE_DSCR(r13)
409 /* On thread 0 of a subcore, set HDEC to max */
410 lbz r4, HSTATE_PTID(r13)
413 LOAD_REG_ADDR(r6, decrementer_max)
416 /* and set per-LPAR registers, if doing dynamic micro-threading */
417 ld r6, HSTATE_SPLIT_MODE(r13)
420 ld r0, KVM_SPLIT_RPR(r6)
422 ld r0, KVM_SPLIT_PMMAR(r6)
424 ld r0, KVM_SPLIT_LDBAR(r6)
428 /* Order load of vcpu after load of vcore */
430 ld r4, HSTATE_KVM_VCPU(r13)
433 /* Back from the guest, go back to nap */
434 /* Clear our vcpu and vcore pointers so we don't come back in early */
436 std r0, HSTATE_KVM_VCPU(r13)
438 * Once we clear HSTATE_KVM_VCORE(r13), the code in
439 * kvmppc_run_core() is going to assume that all our vcpu
440 * state is visible in memory. This lwsync makes sure
444 std r0, HSTATE_KVM_VCORE(r13)
447 * All secondaries exiting guest will fall through this path.
448 * Before proceeding, just check for HMI interrupt and
449 * invoke opal hmi handler. By now we are sure that the
450 * primary thread on this core/subcore has already made partition
451 * switch/TB resync and we are good to call opal hmi handler.
453 cmpwi r12, BOOK3S_INTERRUPT_HMI
456 li r3,0 /* NULL argument */
457 bl hmi_exception_realmode
459 * At this point we have finished executing in the guest.
460 * We need to wait for hwthread_req to become zero, since
461 * we may not turn on the MMU while hwthread_req is non-zero.
462 * While waiting we also need to check if we get given a vcpu to run.
465 lbz r3, HSTATE_HWTHREAD_REQ(r13)
469 li r0, KVM_HWTHREAD_IN_KERNEL
470 stb r0, HSTATE_HWTHREAD_STATE(r13)
471 /* need to recheck hwthread_req after a barrier, to avoid race */
473 lbz r3, HSTATE_HWTHREAD_REQ(r13)
477 * We jump to pnv_wakeup_loss, which will return to the caller
478 * of power7_nap in the powernv cpu offline loop. The value we
479 * put in r3 becomes the return value for power7_nap.
483 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
489 ld r5, HSTATE_KVM_VCORE(r13)
492 ld r3, HSTATE_SPLIT_MODE(r13)
495 lbz r0, KVM_SPLIT_DO_NAP(r3)
501 b kvm_secondary_got_guest
503 54: li r0, KVM_HWTHREAD_IN_KVM
504 stb r0, HSTATE_HWTHREAD_STATE(r13)
508 * Here the primary thread is trying to return the core to
509 * whole-core mode, so we need to nap.
513 * When secondaries are napping in kvm_unsplit_nap() with
514 * hwthread_req = 1, HMI goes ignored even though subcores are
515 * already exited the guest. Hence HMI keeps waking up secondaries
516 * from nap in a loop and secondaries always go back to nap since
517 * no vcore is assigned to them. This makes impossible for primary
518 * thread to get hold of secondary threads resulting into a soft
519 * lockup in KVM path.
521 * Let us check if HMI is pending and handle it before we go to nap.
523 cmpwi r12, BOOK3S_INTERRUPT_HMI
525 li r3, 0 /* NULL argument */
526 bl hmi_exception_realmode
529 * Ensure that secondary doesn't nap when it has
530 * its vcore pointer set.
532 sync /* matches smp_mb() before setting split_info.do_nap */
533 ld r0, HSTATE_KVM_VCORE(r13)
536 /* clear any pending message */
538 lis r6, (PPC_DBELL_SERVER << (63-36))@h
540 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
541 /* Set kvm_split_mode.napped[tid] = 1 */
542 ld r3, HSTATE_SPLIT_MODE(r13)
544 lhz r4, PACAPACAINDEX(r13)
545 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
546 addi r4, r4, KVM_SPLIT_NAPPED
548 /* Check the do_nap flag again after setting napped[] */
550 lbz r0, KVM_SPLIT_DO_NAP(r3)
553 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
555 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
562 /******************************************************************************
566 *****************************************************************************/
568 .global kvmppc_hv_entry
573 * R4 = vcpu pointer (or NULL)
578 * all other volatile GPRS = free
579 * Does not preserve non-volatile GPRs or CR fields
582 std r0, PPC_LR_STKOFF(r1)
585 /* Save R1 in the PACA */
586 std r1, HSTATE_HOST_R1(r13)
588 li r6, KVM_GUEST_MODE_HOST_HV
589 stb r6, HSTATE_IN_GUEST(r13)
591 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
592 /* Store initial timestamp */
595 addi r3, r4, VCPU_TB_RMENTRY
596 bl kvmhv_start_timing
600 /* Use cr7 as an indication of radix mode */
601 ld r5, HSTATE_KVM_VCORE(r13)
602 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
603 lbz r0, KVM_RADIX(r9)
606 /* Clear out SLB if hash */
614 * POWER7/POWER8 host -> guest partition switch code.
615 * We don't have to lock against concurrent tlbies,
616 * but we do have to coordinate across hardware threads.
618 /* Set bit in entry map iff exit map is zero. */
620 lbz r6, HSTATE_PTID(r13)
622 addi r8, r5, VCORE_ENTRY_EXIT
624 cmpwi r3, 0x100 /* any threads starting to exit? */
625 bge secondary_too_late /* if so we're too late to the party */
630 /* Primary thread switches to guest partition. */
636 li r0,LPID_RSVD /* switch to reserved LPID */
639 mtspr SPRN_SDR1,r6 /* switch to partition page table */
640 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
644 /* See if we need to flush the TLB */
645 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
648 * On POWER9, individual threads can come in here, but the
649 * TLB is shared between the 4 threads in a core, hence
650 * invalidating on one thread invalidates for all.
651 * Thus we make all 4 threads use the same bit here.
654 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
655 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
656 srdi r6,r6,6 /* doubleword number */
657 sldi r6,r6,3 /* address offset */
659 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
665 /* Flush the TLB of any entries for this LPID */
666 lwz r0,KVM_TLB_SETS(r9)
668 li r7,0x800 /* IS field = 0b10 */
670 li r0,0 /* RS for P9 version of tlbiel */
672 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
676 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
680 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
685 /* Add timebase offset onto timebase */
686 22: ld r8,VCORE_TB_OFFSET(r5)
689 mftb r6 /* current host timebase */
691 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
692 mftb r7 /* check if lower 24 bits overflowed */
697 addis r8,r8,0x100 /* if so, increment upper 40 bits */
700 /* Load guest PCR value to select appropriate compat mode */
701 37: ld r7, VCORE_PCR(r5)
708 /* DPDES and VTB are shared between threads */
709 ld r8, VCORE_DPDES(r5)
713 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
715 /* Mark the subcore state as inside guest */
716 bl kvmppc_subcore_enter_guest
718 ld r5, HSTATE_KVM_VCORE(r13)
719 ld r4, HSTATE_KVM_VCPU(r13)
721 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
723 /* Do we have a guest vcpu to run? */
725 beq kvmppc_primary_no_guest
728 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
729 lwz r5,VCPU_SLB_MAX(r4)
734 1: ld r8,VCPU_SLB_E(r6)
737 addi r6,r6,VCPU_SLB_SIZE
740 /* Increment yield count if they have a VPA */
744 li r6, LPPACA_YIELDCOUNT
749 stb r6, VCPU_VPA_DIRTY(r4)
752 /* Save purr/spurr */
755 std r5,HSTATE_PURR(r13)
756 std r6,HSTATE_SPURR(r13)
762 /* Save host values of some registers */
768 std r5, STACK_SLOT_TID(r1)
769 std r6, STACK_SLOT_PSSCR(r1)
770 std r7, STACK_SLOT_PID(r1)
771 std r8, STACK_SLOT_IAMR(r1)
772 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
777 std r5, STACK_SLOT_CIABR(r1)
778 std r6, STACK_SLOT_DAWR(r1)
779 std r7, STACK_SLOT_DAWRX(r1)
780 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
783 /* Set partition DABR */
784 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
785 lwz r5,VCPU_DABRX(r4)
790 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
792 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
795 END_FTR_SECTION_IFSET(CPU_FTR_TM)
798 /* Load guest PMU registers */
799 /* R4 is live here (vcpu pointer) */
801 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
802 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
806 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
809 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
810 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
811 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
812 lwz r6, VCPU_PMC + 8(r4)
813 lwz r7, VCPU_PMC + 12(r4)
814 lwz r8, VCPU_PMC + 16(r4)
815 lwz r9, VCPU_PMC + 20(r4)
823 ld r5, VCPU_MMCR + 8(r4)
824 ld r6, VCPU_MMCR + 16(r4)
832 ld r5, VCPU_MMCR + 24(r4)
836 BEGIN_FTR_SECTION_NESTED(96)
837 lwz r7, VCPU_PMC + 24(r4)
838 lwz r8, VCPU_PMC + 28(r4)
839 ld r9, VCPU_MMCR + 32(r4)
843 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
844 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
848 /* Load up FP, VMX and VSX registers */
851 ld r14, VCPU_GPR(R14)(r4)
852 ld r15, VCPU_GPR(R15)(r4)
853 ld r16, VCPU_GPR(R16)(r4)
854 ld r17, VCPU_GPR(R17)(r4)
855 ld r18, VCPU_GPR(R18)(r4)
856 ld r19, VCPU_GPR(R19)(r4)
857 ld r20, VCPU_GPR(R20)(r4)
858 ld r21, VCPU_GPR(R21)(r4)
859 ld r22, VCPU_GPR(R22)(r4)
860 ld r23, VCPU_GPR(R23)(r4)
861 ld r24, VCPU_GPR(R24)(r4)
862 ld r25, VCPU_GPR(R25)(r4)
863 ld r26, VCPU_GPR(R26)(r4)
864 ld r27, VCPU_GPR(R27)(r4)
865 ld r28, VCPU_GPR(R28)(r4)
866 ld r29, VCPU_GPR(R29)(r4)
867 ld r30, VCPU_GPR(R30)(r4)
868 ld r31, VCPU_GPR(R31)(r4)
870 /* Switch DSCR to guest value */
875 /* Skip next section on POWER7 */
877 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
878 /* Load up POWER8-specific registers */
880 lwz r6, VCPU_PSPB(r4)
886 ld r6, VCPU_DAWRX(r4)
887 ld r7, VCPU_CIABR(r4)
894 ld r8, VCPU_EBBHR(r4)
897 ld r5, VCPU_EBBRR(r4)
898 ld r6, VCPU_BESCR(r4)
899 lwz r7, VCPU_GUEST_PID(r4)
907 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
909 /* POWER8-only registers */
910 ld r5, VCPU_TCSCR(r4)
912 ld r7, VCPU_CSIGR(r4)
919 /* POWER9-only registers */
921 ld r6, VCPU_PSSCR(r4)
922 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
925 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
929 * Set the decrementer to the guest decrementer.
931 ld r8,VCPU_DEC_EXPIRES(r4)
932 /* r8 is a host timebase value here, convert to guest TB */
933 ld r5,HSTATE_KVM_VCORE(r13)
934 ld r6,VCORE_TB_OFFSET(r5)
941 ld r5, VCPU_SPRG0(r4)
942 ld r6, VCPU_SPRG1(r4)
943 ld r7, VCPU_SPRG2(r4)
944 ld r8, VCPU_SPRG3(r4)
950 /* Load up DAR and DSISR */
952 lwz r6, VCPU_DSISR(r4)
956 /* Restore AMR and UAMOR, set AMOR to all 1s */
964 /* Restore state of CTRL run bit; assume 1 on entry */
972 /* Secondary threads wait for primary to have done partition switch */
973 ld r5, HSTATE_KVM_VCORE(r13)
974 lbz r6, HSTATE_PTID(r13)
977 lbz r0, VCORE_IN_GUEST(r5)
981 20: lwz r3, VCORE_ENTRY_EXIT(r5)
984 lbz r0, VCORE_IN_GUEST(r5)
994 /* Check if HDEC expires soon */
997 cmpdi r3, 512 /* 1 microsecond */
1000 #ifdef CONFIG_KVM_XICS
1001 /* We are entering the guest on that thread, push VCPU to XIVE */
1002 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1005 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1009 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1010 li r9, TM_QW1_OS + TM_WORD2
1013 stw r9, VCPU_XIVE_PUSHED(r4)
1015 #endif /* CONFIG_KVM_XICS */
1017 deliver_guest_interrupt:
1024 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
1026 ld r11, VCPU_MSR(r4)
1027 ld r6, VCPU_SRR0(r4)
1028 ld r7, VCPU_SRR1(r4)
1032 /* r11 = vcpu->arch.msr & ~MSR_HV */
1033 rldicl r11, r11, 63 - MSR_HV_LG, 1
1034 rotldi r11, r11, 1 + MSR_HV_LG
1035 ori r11, r11, MSR_ME
1037 /* Check if we can deliver an external or decrementer interrupt now */
1038 ld r0, VCPU_PENDING_EXC(r4)
1039 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1041 andi. r8, r11, MSR_EE
1043 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1044 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1048 li r0, BOOK3S_INTERRUPT_EXTERNAL
1052 li r0, BOOK3S_INTERRUPT_DECREMENTER
1055 12: mtspr SPRN_SRR0, r10
1057 mtspr SPRN_SRR1, r11
1059 bl kvmppc_msr_interrupt
1065 * R10: value for HSRR0
1066 * R11: value for HSRR1
1071 stb r0,VCPU_CEDED(r4) /* cancel cede */
1072 mtspr SPRN_HSRR0,r10
1073 mtspr SPRN_HSRR1,r11
1075 /* Activate guest mode, so faults get handled by KVM */
1076 li r9, KVM_GUEST_MODE_GUEST_HV
1077 stb r9, HSTATE_IN_GUEST(r13)
1079 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1080 /* Accumulate timing */
1081 addi r3, r4, VCPU_TB_GUEST
1082 bl kvmhv_accumulate_time
1088 ld r5, VCPU_CFAR(r4)
1090 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1093 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1100 ld r1, VCPU_GPR(R1)(r4)
1101 ld r2, VCPU_GPR(R2)(r4)
1102 ld r3, VCPU_GPR(R3)(r4)
1103 ld r5, VCPU_GPR(R5)(r4)
1104 ld r6, VCPU_GPR(R6)(r4)
1105 ld r7, VCPU_GPR(R7)(r4)
1106 ld r8, VCPU_GPR(R8)(r4)
1107 ld r9, VCPU_GPR(R9)(r4)
1108 ld r10, VCPU_GPR(R10)(r4)
1109 ld r11, VCPU_GPR(R11)(r4)
1110 ld r12, VCPU_GPR(R12)(r4)
1111 ld r13, VCPU_GPR(R13)(r4)
1115 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1116 ld r0, VCPU_GPR(R0)(r4)
1117 ld r4, VCPU_GPR(R4)(r4)
1126 stw r12, VCPU_TRAP(r4)
1127 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1128 addi r3, r4, VCPU_TB_RMEXIT
1129 bl kvmhv_accumulate_time
1131 11: b kvmhv_switch_to_host
1138 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1139 12: stw r12, VCPU_TRAP(r4)
1141 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1142 addi r3, r4, VCPU_TB_RMEXIT
1143 bl kvmhv_accumulate_time
1147 /******************************************************************************
1151 *****************************************************************************/
1154 * We come here from the first-level interrupt handlers.
1156 .globl kvmppc_interrupt_hv
1157 kvmppc_interrupt_hv:
1159 * Register contents:
1160 * R12 = (guest CR << 32) | interrupt vector
1162 * guest R12 saved in shadow VCPU SCRATCH0
1163 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1164 * guest R13 saved in SPRN_SCRATCH0
1166 std r9, HSTATE_SCRATCH2(r13)
1167 lbz r9, HSTATE_IN_GUEST(r13)
1168 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1169 beq kvmppc_bad_host_intr
1170 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1171 cmpwi r9, KVM_GUEST_MODE_GUEST
1172 ld r9, HSTATE_SCRATCH2(r13)
1173 beq kvmppc_interrupt_pr
1175 /* We're now back in the host but in guest MMU context */
1176 li r9, KVM_GUEST_MODE_HOST_HV
1177 stb r9, HSTATE_IN_GUEST(r13)
1179 ld r9, HSTATE_KVM_VCPU(r13)
1181 /* Save registers */
1183 std r0, VCPU_GPR(R0)(r9)
1184 std r1, VCPU_GPR(R1)(r9)
1185 std r2, VCPU_GPR(R2)(r9)
1186 std r3, VCPU_GPR(R3)(r9)
1187 std r4, VCPU_GPR(R4)(r9)
1188 std r5, VCPU_GPR(R5)(r9)
1189 std r6, VCPU_GPR(R6)(r9)
1190 std r7, VCPU_GPR(R7)(r9)
1191 std r8, VCPU_GPR(R8)(r9)
1192 ld r0, HSTATE_SCRATCH2(r13)
1193 std r0, VCPU_GPR(R9)(r9)
1194 std r10, VCPU_GPR(R10)(r9)
1195 std r11, VCPU_GPR(R11)(r9)
1196 ld r3, HSTATE_SCRATCH0(r13)
1197 std r3, VCPU_GPR(R12)(r9)
1198 /* CR is in the high half of r12 */
1202 ld r3, HSTATE_CFAR(r13)
1203 std r3, VCPU_CFAR(r9)
1204 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1206 ld r4, HSTATE_PPR(r13)
1207 std r4, VCPU_PPR(r9)
1208 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1210 /* Restore R1/R2 so we can handle faults */
1211 ld r1, HSTATE_HOST_R1(r13)
1214 mfspr r10, SPRN_SRR0
1215 mfspr r11, SPRN_SRR1
1216 std r10, VCPU_SRR0(r9)
1217 std r11, VCPU_SRR1(r9)
1218 /* trap is in the low half of r12, clear CR from the high half */
1220 andi. r0, r12, 2 /* need to read HSRR0/1? */
1222 mfspr r10, SPRN_HSRR0
1223 mfspr r11, SPRN_HSRR1
1225 1: std r10, VCPU_PC(r9)
1226 std r11, VCPU_MSR(r9)
1230 std r3, VCPU_GPR(R13)(r9)
1233 stw r12,VCPU_TRAP(r9)
1235 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1236 addi r3, r9, VCPU_TB_RMINTR
1238 bl kvmhv_accumulate_time
1239 ld r5, VCPU_GPR(R5)(r9)
1240 ld r6, VCPU_GPR(R6)(r9)
1241 ld r7, VCPU_GPR(R7)(r9)
1242 ld r8, VCPU_GPR(R8)(r9)
1245 /* Save HEIR (HV emulation assist reg) in emul_inst
1246 if this is an HEI (HV emulation interrupt, e40) */
1247 li r3,KVM_INST_FETCH_FAILED
1248 stw r3,VCPU_LAST_INST(r9)
1249 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1252 11: stw r3,VCPU_HEIR(r9)
1254 /* these are volatile across C function calls */
1255 #ifdef CONFIG_RELOCATABLE
1256 ld r3, HSTATE_SCRATCH1(r13)
1262 std r3, VCPU_CTR(r9)
1263 std r4, VCPU_XER(r9)
1265 /* If this is a page table miss then see if it's theirs or ours */
1266 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1268 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1271 /* See if this is a leftover HDEC interrupt */
1272 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1277 bge fast_guest_return
1279 /* See if this is an hcall we can handle in real mode */
1280 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1281 beq hcall_try_real_mode
1283 /* Hypervisor doorbell - exit only if host IPI flag set */
1284 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1286 lbz r0, HSTATE_HOST_IPI(r13)
1291 /* External interrupt ? */
1292 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1293 bne+ guest_exit_cont
1295 /* External interrupt, first check for host_ipi. If this is
1296 * set, we know the host wants us out so let's do it now
1301 * Restore the active volatile registers after returning from
1304 ld r9, HSTATE_KVM_VCPU(r13)
1305 li r12, BOOK3S_INTERRUPT_EXTERNAL
1308 * kvmppc_read_intr return codes:
1310 * Exit to host (r3 > 0)
1311 * 1 An interrupt is pending that needs to be handled by the host
1312 * Exit guest and return to host by branching to guest_exit_cont
1314 * 2 Passthrough that needs completion in the host
1315 * Exit guest and return to host by branching to guest_exit_cont
1316 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1317 * to indicate to the host to complete handling the interrupt
1319 * Before returning to guest, we check if any CPU is heading out
1320 * to the host and if so, we head out also. If no CPUs are heading
1321 * check return values <= 0.
1323 * Return to guest (r3 <= 0)
1324 * 0 No external interrupt is pending
1325 * -1 A guest wakeup IPI (which has now been cleared)
1326 * In either case, we return to guest to deliver any pending
1329 * -2 A PCI passthrough external interrupt was handled
1330 * (interrupt was delivered directly to guest)
1331 * Return to guest to deliver any pending guest interrupts.
1337 /* Return code = 2 */
1338 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1339 stw r12, VCPU_TRAP(r9)
1342 1: /* Return code <= 1 */
1346 /* Return code <= 0 */
1347 4: ld r5, HSTATE_KVM_VCORE(r13)
1348 lwz r0, VCORE_ENTRY_EXIT(r5)
1351 blt deliver_guest_interrupt
1353 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1354 #ifdef CONFIG_KVM_XICS
1355 /* We are exiting, pull the VP from the XIVE */
1356 lwz r0, VCPU_XIVE_PUSHED(r9)
1359 li r7, TM_SPC_PULL_OS_CTX
1362 andi. r0, r0, MSR_IR /* in real mode? */
1364 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1367 /* First load to pull the context, we ignore the value */
1370 /* Second load to recover the context state (Words 0 and 1) */
1373 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1376 /* First load to pull the context, we ignore the value */
1379 /* Second load to recover the context state (Words 0 and 1) */
1381 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1382 /* Fixup some of the state for the next load */
1385 stw r10, VCPU_XIVE_PUSHED(r9)
1386 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1387 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1389 #endif /* CONFIG_KVM_XICS */
1390 /* Save more register state */
1393 std r6, VCPU_DAR(r9)
1394 stw r7, VCPU_DSISR(r9)
1395 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1396 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1398 std r6, VCPU_FAULT_DAR(r9)
1399 stw r7, VCPU_FAULT_DSISR(r9)
1401 /* See if it is a machine check */
1402 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1403 beq machine_check_realmode
1405 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1406 addi r3, r9, VCPU_TB_RMEXIT
1408 bl kvmhv_accumulate_time
1412 /* Increment exit count, poke other threads to exit */
1413 bl kvmhv_commence_exit
1415 ld r9, HSTATE_KVM_VCPU(r13)
1416 lwz r12, VCPU_TRAP(r9)
1418 /* Stop others sending VCPU interrupts to this physical CPU */
1420 stw r0, VCPU_CPU(r9)
1421 stw r0, VCPU_THREAD_CPU(r9)
1423 /* Save guest CTRL register, set runlatch to 1 */
1425 stw r6,VCPU_CTRL(r9)
1431 /* Read the guest SLB and save it away */
1433 lbz r0, KVM_RADIX(r5)
1436 bne 3f /* for radix, save 0 entries */
1437 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1442 andis. r0,r8,SLB_ESID_V@h
1444 add r8,r8,r6 /* put index in */
1446 std r8,VCPU_SLB_E(r7)
1447 std r3,VCPU_SLB_V(r7)
1448 addi r7,r7,VCPU_SLB_SIZE
1452 3: stw r5,VCPU_SLB_MAX(r9)
1455 * Save the guest PURR/SPURR
1460 ld r8,VCPU_SPURR(r9)
1461 std r5,VCPU_PURR(r9)
1462 std r6,VCPU_SPURR(r9)
1467 * Restore host PURR/SPURR and add guest times
1468 * so that the time in the guest gets accounted.
1470 ld r3,HSTATE_PURR(r13)
1471 ld r4,HSTATE_SPURR(r13)
1482 /* r5 is a guest timebase value here, convert to host TB */
1483 ld r3,HSTATE_KVM_VCORE(r13)
1484 ld r4,VCORE_TB_OFFSET(r3)
1486 std r5,VCPU_DEC_EXPIRES(r9)
1490 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1491 /* Save POWER8-specific registers */
1495 std r5, VCPU_IAMR(r9)
1496 stw r6, VCPU_PSPB(r9)
1497 std r7, VCPU_FSCR(r9)
1501 std r7, VCPU_TAR(r9)
1502 mfspr r8, SPRN_EBBHR
1503 std r8, VCPU_EBBHR(r9)
1504 mfspr r5, SPRN_EBBRR
1505 mfspr r6, SPRN_BESCR
1508 std r5, VCPU_EBBRR(r9)
1509 std r6, VCPU_BESCR(r9)
1510 stw r7, VCPU_GUEST_PID(r9)
1511 std r8, VCPU_WORT(r9)
1513 mfspr r5, SPRN_TCSCR
1515 mfspr r7, SPRN_CSIGR
1517 std r5, VCPU_TCSCR(r9)
1518 std r6, VCPU_ACOP(r9)
1519 std r7, VCPU_CSIGR(r9)
1520 std r8, VCPU_TACR(r9)
1523 mfspr r6, SPRN_PSSCR
1524 std r5, VCPU_TID(r9)
1525 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1527 std r6, VCPU_PSSCR(r9)
1528 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1530 * Restore various registers to 0, where non-zero values
1531 * set by the guest could disrupt the host.
1538 mtspr SPRN_TCSCR, r0
1539 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1542 mtspr SPRN_MMCRS, r0
1543 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1546 /* Save and reset AMR and UAMOR before turning on the MMU */
1550 std r6,VCPU_UAMOR(r9)
1553 mtspr SPRN_UAMOR, r6
1555 /* Switch DSCR back to host value */
1557 ld r7, HSTATE_DSCR(r13)
1558 std r8, VCPU_DSCR(r9)
1561 /* Save non-volatile GPRs */
1562 std r14, VCPU_GPR(R14)(r9)
1563 std r15, VCPU_GPR(R15)(r9)
1564 std r16, VCPU_GPR(R16)(r9)
1565 std r17, VCPU_GPR(R17)(r9)
1566 std r18, VCPU_GPR(R18)(r9)
1567 std r19, VCPU_GPR(R19)(r9)
1568 std r20, VCPU_GPR(R20)(r9)
1569 std r21, VCPU_GPR(R21)(r9)
1570 std r22, VCPU_GPR(R22)(r9)
1571 std r23, VCPU_GPR(R23)(r9)
1572 std r24, VCPU_GPR(R24)(r9)
1573 std r25, VCPU_GPR(R25)(r9)
1574 std r26, VCPU_GPR(R26)(r9)
1575 std r27, VCPU_GPR(R27)(r9)
1576 std r28, VCPU_GPR(R28)(r9)
1577 std r29, VCPU_GPR(R29)(r9)
1578 std r30, VCPU_GPR(R30)(r9)
1579 std r31, VCPU_GPR(R31)(r9)
1582 mfspr r3, SPRN_SPRG0
1583 mfspr r4, SPRN_SPRG1
1584 mfspr r5, SPRN_SPRG2
1585 mfspr r6, SPRN_SPRG3
1586 std r3, VCPU_SPRG0(r9)
1587 std r4, VCPU_SPRG1(r9)
1588 std r5, VCPU_SPRG2(r9)
1589 std r6, VCPU_SPRG3(r9)
1595 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1598 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1601 /* Increment yield count if they have a VPA */
1602 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1605 li r4, LPPACA_YIELDCOUNT
1610 stb r3, VCPU_VPA_DIRTY(r9)
1612 /* Save PMU registers if requested */
1613 /* r8 and cr0.eq are live here */
1616 * POWER8 seems to have a hardware bug where setting
1617 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1618 * when some counters are already negative doesn't seem
1619 * to cause a performance monitor alert (and hence interrupt).
1620 * The effect of this is that when saving the PMU state,
1621 * if there is no PMU alert pending when we read MMCR0
1622 * before freezing the counters, but one becomes pending
1623 * before we read the counters, we lose it.
1624 * To work around this, we need a way to freeze the counters
1625 * before reading MMCR0. Normally, freezing the counters
1626 * is done by writing MMCR0 (to set MMCR0[FC]) which
1627 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1628 * we can also freeze the counters using MMCR2, by writing
1629 * 1s to all the counter freeze condition bits (there are
1630 * 9 bits each for 6 counters).
1632 li r3, -1 /* set all freeze bits */
1634 mfspr r10, SPRN_MMCR2
1635 mtspr SPRN_MMCR2, r3
1637 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1639 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1640 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1641 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1642 mfspr r6, SPRN_MMCRA
1643 /* Clear MMCRA in order to disable SDAR updates */
1645 mtspr SPRN_MMCRA, r7
1647 beq 21f /* if no VPA, save PMU stuff anyway */
1648 lbz r7, LPPACA_PMCINUSE(r8)
1649 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1651 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1653 21: mfspr r5, SPRN_MMCR1
1656 std r4, VCPU_MMCR(r9)
1657 std r5, VCPU_MMCR + 8(r9)
1658 std r6, VCPU_MMCR + 16(r9)
1660 std r10, VCPU_MMCR + 24(r9)
1661 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1662 std r7, VCPU_SIAR(r9)
1663 std r8, VCPU_SDAR(r9)
1670 stw r3, VCPU_PMC(r9)
1671 stw r4, VCPU_PMC + 4(r9)
1672 stw r5, VCPU_PMC + 8(r9)
1673 stw r6, VCPU_PMC + 12(r9)
1674 stw r7, VCPU_PMC + 16(r9)
1675 stw r8, VCPU_PMC + 20(r9)
1678 std r5, VCPU_SIER(r9)
1679 BEGIN_FTR_SECTION_NESTED(96)
1680 mfspr r6, SPRN_SPMC1
1681 mfspr r7, SPRN_SPMC2
1682 mfspr r8, SPRN_MMCRS
1683 stw r6, VCPU_PMC + 24(r9)
1684 stw r7, VCPU_PMC + 28(r9)
1685 std r8, VCPU_MMCR + 32(r9)
1687 mtspr SPRN_MMCRS, r4
1688 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1689 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1697 /* Restore host values of some registers */
1699 ld r5, STACK_SLOT_CIABR(r1)
1700 ld r6, STACK_SLOT_DAWR(r1)
1701 ld r7, STACK_SLOT_DAWRX(r1)
1702 mtspr SPRN_CIABR, r5
1704 mtspr SPRN_DAWRX, r7
1705 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1707 ld r5, STACK_SLOT_TID(r1)
1708 ld r6, STACK_SLOT_PSSCR(r1)
1709 ld r7, STACK_SLOT_PID(r1)
1710 ld r8, STACK_SLOT_IAMR(r1)
1712 mtspr SPRN_PSSCR, r6
1715 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1718 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1721 * POWER7/POWER8 guest -> host partition switch code.
1722 * We don't have to lock against tlbies but we do
1723 * have to coordinate the hardware threads.
1725 kvmhv_switch_to_host:
1726 /* Secondary threads wait for primary to do partition switch */
1727 ld r5,HSTATE_KVM_VCORE(r13)
1728 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1729 lbz r3,HSTATE_PTID(r13)
1733 13: lbz r3,VCORE_IN_GUEST(r5)
1739 /* Primary thread waits for all the secondaries to exit guest */
1740 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1741 rlwinm r0,r3,32-8,0xff
1747 /* Did we actually switch to the guest at all? */
1748 lbz r6, VCORE_IN_GUEST(r5)
1752 /* Primary thread switches back to host partition */
1753 lwz r7,KVM_HOST_LPID(r4)
1755 ld r6,KVM_HOST_SDR1(r4)
1756 li r8,LPID_RSVD /* switch to reserved LPID */
1759 mtspr SPRN_SDR1,r6 /* switch to host page table */
1760 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1765 /* DPDES and VTB are shared between threads */
1766 mfspr r7, SPRN_DPDES
1768 std r7, VCORE_DPDES(r5)
1769 std r8, VCORE_VTB(r5)
1770 /* clear DPDES so we don't get guest doorbells in the host */
1772 mtspr SPRN_DPDES, r8
1773 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1775 /* If HMI, call kvmppc_realmode_hmi_handler() */
1776 cmpwi r12, BOOK3S_INTERRUPT_HMI
1778 bl kvmppc_realmode_hmi_handler
1780 li r12, BOOK3S_INTERRUPT_HMI
1782 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1783 * the TB. Hence it is not required to subtract guest timebase
1784 * offset from timebase. So, skip it.
1786 * Also, do not call kvmppc_subcore_exit_guest() because it has
1787 * been invoked as part of kvmppc_realmode_hmi_handler().
1792 /* Subtract timebase offset from timebase */
1793 ld r8,VCORE_TB_OFFSET(r5)
1796 mftb r6 /* current guest timebase */
1798 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1799 mftb r7 /* check if lower 24 bits overflowed */
1804 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1807 17: bl kvmppc_subcore_exit_guest
1809 30: ld r5,HSTATE_KVM_VCORE(r13)
1810 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1813 ld r0, VCORE_PCR(r5)
1819 /* Signal secondary CPUs to continue */
1820 stb r0,VCORE_IN_GUEST(r5)
1821 19: lis r8,0x7fff /* MAX_INT@h */
1824 16: ld r8,KVM_HOST_LPCR(r4)
1828 /* load host SLB entries */
1829 BEGIN_MMU_FTR_SECTION
1831 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1832 ld r8,PACA_SLBSHADOWPTR(r13)
1834 .rept SLB_NUM_BOLTED
1835 li r3, SLBSHADOW_SAVEAREA
1839 andis. r7,r5,SLB_ESID_V@h
1845 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1846 /* Finish timing, if we have a vcpu */
1847 ld r4, HSTATE_KVM_VCPU(r13)
1851 bl kvmhv_accumulate_time
1854 /* Unset guest mode */
1855 li r0, KVM_GUEST_MODE_NONE
1856 stb r0, HSTATE_IN_GUEST(r13)
1858 ld r0, SFS+PPC_LR_STKOFF(r1)
1864 * Check whether an HDSI is an HPTE not found fault or something else.
1865 * If it is an HPTE not found fault that is due to the guest accessing
1866 * a page that they have mapped but which we have paged out, then
1867 * we continue on with the guest exit path. In all other cases,
1868 * reflect the HDSI to the guest as a DSI.
1872 lbz r0, KVM_RADIX(r3)
1875 mfspr r6, SPRN_HDSISR
1876 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
1877 /* HPTE not found fault or protection fault? */
1878 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1879 beq 1f /* if not, send it to the guest */
1880 andi. r0, r11, MSR_DR /* data relocation enabled? */
1883 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1885 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1887 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1888 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1889 bne 7f /* if no SLB entry found */
1890 4: std r4, VCPU_FAULT_DAR(r9)
1891 stw r6, VCPU_FAULT_DSISR(r9)
1893 /* Search the hash table. */
1894 mr r3, r9 /* vcpu pointer */
1895 li r7, 1 /* data fault */
1896 bl kvmppc_hpte_hv_fault
1897 ld r9, HSTATE_KVM_VCPU(r13)
1899 ld r11, VCPU_MSR(r9)
1900 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1901 cmpdi r3, 0 /* retry the instruction */
1903 cmpdi r3, -1 /* handle in kernel mode */
1905 cmpdi r3, -2 /* MMIO emulation; need instr word */
1908 /* Synthesize a DSI (or DSegI) for the guest */
1909 ld r4, VCPU_FAULT_DAR(r9)
1911 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
1912 mtspr SPRN_DSISR, r6
1913 7: mtspr SPRN_DAR, r4
1914 mtspr SPRN_SRR0, r10
1915 mtspr SPRN_SRR1, r11
1917 bl kvmppc_msr_interrupt
1918 fast_interrupt_c_return:
1919 6: ld r7, VCPU_CTR(r9)
1926 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1927 ld r5, KVM_VRMA_SLB_V(r5)
1930 /* If this is for emulated MMIO, load the instruction word */
1931 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1933 /* Set guest mode to 'jump over instruction' so if lwz faults
1934 * we'll just continue at the next IP. */
1935 li r0, KVM_GUEST_MODE_SKIP
1936 stb r0, HSTATE_IN_GUEST(r13)
1938 /* Do the access with MSR:DR enabled */
1940 ori r4, r3, MSR_DR /* Enable paging for data */
1945 /* Store the result */
1946 stw r8, VCPU_LAST_INST(r9)
1948 /* Unset guest mode. */
1949 li r0, KVM_GUEST_MODE_HOST_HV
1950 stb r0, HSTATE_IN_GUEST(r13)
1954 std r4, VCPU_FAULT_DAR(r9)
1955 stw r6, VCPU_FAULT_DSISR(r9)
1958 std r5, VCPU_FAULT_GPA(r9)
1962 * Similarly for an HISI, reflect it to the guest as an ISI unless
1963 * it is an HPTE not found fault for a page that we have paged out.
1967 lbz r0, KVM_RADIX(r3)
1969 bne .Lradix_hisi /* for radix, just save ASDR */
1970 andis. r0, r11, SRR1_ISI_NOPT@h
1972 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1975 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1977 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1979 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1980 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1981 bne 7f /* if no SLB entry found */
1983 /* Search the hash table. */
1984 mr r3, r9 /* vcpu pointer */
1987 li r7, 0 /* instruction fault */
1988 bl kvmppc_hpte_hv_fault
1989 ld r9, HSTATE_KVM_VCPU(r13)
1991 ld r11, VCPU_MSR(r9)
1992 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1993 cmpdi r3, 0 /* retry the instruction */
1994 beq fast_interrupt_c_return
1995 cmpdi r3, -1 /* handle in kernel mode */
1998 /* Synthesize an ISI (or ISegI) for the guest */
2000 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2001 7: mtspr SPRN_SRR0, r10
2002 mtspr SPRN_SRR1, r11
2004 bl kvmppc_msr_interrupt
2005 b fast_interrupt_c_return
2007 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2008 ld r5, KVM_VRMA_SLB_V(r6)
2012 * Try to handle an hcall in real mode.
2013 * Returns to the guest if we handle it, or continues on up to
2014 * the kernel if we can't (i.e. if we don't have a handler for
2015 * it, or if the handler returns H_TOO_HARD).
2017 * r5 - r8 contain hcall args,
2018 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2020 hcall_try_real_mode:
2021 ld r3,VCPU_GPR(R3)(r9)
2023 /* sc 1 from userspace - reflect to guest syscall */
2024 bne sc_1_fast_return
2026 cmpldi r3,hcall_real_table_end - hcall_real_table
2028 /* See if this hcall is enabled for in-kernel handling */
2030 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2031 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2033 ld r0, KVM_ENABLED_HCALLS(r4)
2034 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2038 /* Get pointer to handler, if any, and call it */
2039 LOAD_REG_ADDR(r4, hcall_real_table)
2045 mr r3,r9 /* get vcpu pointer */
2046 ld r4,VCPU_GPR(R4)(r9)
2049 beq hcall_real_fallback
2050 ld r4,HSTATE_KVM_VCPU(r13)
2051 std r3,VCPU_GPR(R3)(r4)
2059 li r10, BOOK3S_INTERRUPT_SYSCALL
2060 bl kvmppc_msr_interrupt
2064 /* We've attempted a real mode hcall, but it's punted it back
2065 * to userspace. We need to restore some clobbered volatiles
2066 * before resuming the pass-it-to-qemu path */
2067 hcall_real_fallback:
2068 li r12,BOOK3S_INTERRUPT_SYSCALL
2069 ld r9, HSTATE_KVM_VCPU(r13)
2073 .globl hcall_real_table
2075 .long 0 /* 0 - unused */
2076 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2077 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2078 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2079 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2080 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2081 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2082 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2083 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2084 .long 0 /* 0x24 - H_SET_SPRG0 */
2085 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2100 #ifdef CONFIG_KVM_XICS
2101 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2102 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2103 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2104 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2105 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2107 .long 0 /* 0x64 - H_EOI */
2108 .long 0 /* 0x68 - H_CPPR */
2109 .long 0 /* 0x6c - H_IPI */
2110 .long 0 /* 0x70 - H_IPOLL */
2111 .long 0 /* 0x74 - H_XIRR */
2139 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2140 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2156 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2160 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2161 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2162 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2274 #ifdef CONFIG_KVM_XICS
2275 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2277 .long 0 /* 0x2fc - H_XIRR_X*/
2279 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2280 .globl hcall_real_table_end
2281 hcall_real_table_end:
2283 _GLOBAL(kvmppc_h_set_xdabr)
2284 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2286 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2289 6: li r3, H_PARAMETER
2292 _GLOBAL(kvmppc_h_set_dabr)
2293 li r5, DABRX_USER | DABRX_KERNEL
2297 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2298 std r4,VCPU_DABR(r3)
2299 stw r5, VCPU_DABRX(r3)
2300 mtspr SPRN_DABRX, r5
2301 /* Work around P7 bug where DABR can get corrupted on mtspr */
2302 1: mtspr SPRN_DABR,r4
2310 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2311 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2312 rlwimi r5, r4, 2, DAWRX_WT
2314 std r4, VCPU_DAWR(r3)
2315 std r5, VCPU_DAWRX(r3)
2317 mtspr SPRN_DAWRX, r5
2321 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2323 std r11,VCPU_MSR(r3)
2325 stb r0,VCPU_CEDED(r3)
2326 sync /* order setting ceded vs. testing prodded */
2327 lbz r5,VCPU_PRODDED(r3)
2329 bne kvm_cede_prodded
2330 li r12,0 /* set trap to 0 to say hcall is handled */
2331 stw r12,VCPU_TRAP(r3)
2333 std r0,VCPU_GPR(R3)(r3)
2336 * Set our bit in the bitmask of napping threads unless all the
2337 * other threads are already napping, in which case we send this
2340 ld r5,HSTATE_KVM_VCORE(r13)
2341 lbz r6,HSTATE_PTID(r13)
2342 lwz r8,VCORE_ENTRY_EXIT(r5)
2346 addi r6,r5,VCORE_NAPPING_THREADS
2353 /* order napping_threads update vs testing entry_exit_map */
2356 stb r0,HSTATE_NAPPING(r13)
2357 lwz r7,VCORE_ENTRY_EXIT(r5)
2359 bge 33f /* another thread already exiting */
2362 * Although not specifically required by the architecture, POWER7
2363 * preserves the following registers in nap mode, even if an SMT mode
2364 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2365 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2367 /* Save non-volatile GPRs */
2368 std r14, VCPU_GPR(R14)(r3)
2369 std r15, VCPU_GPR(R15)(r3)
2370 std r16, VCPU_GPR(R16)(r3)
2371 std r17, VCPU_GPR(R17)(r3)
2372 std r18, VCPU_GPR(R18)(r3)
2373 std r19, VCPU_GPR(R19)(r3)
2374 std r20, VCPU_GPR(R20)(r3)
2375 std r21, VCPU_GPR(R21)(r3)
2376 std r22, VCPU_GPR(R22)(r3)
2377 std r23, VCPU_GPR(R23)(r3)
2378 std r24, VCPU_GPR(R24)(r3)
2379 std r25, VCPU_GPR(R25)(r3)
2380 std r26, VCPU_GPR(R26)(r3)
2381 std r27, VCPU_GPR(R27)(r3)
2382 std r28, VCPU_GPR(R28)(r3)
2383 std r29, VCPU_GPR(R29)(r3)
2384 std r30, VCPU_GPR(R30)(r3)
2385 std r31, VCPU_GPR(R31)(r3)
2390 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2392 ld r9, HSTATE_KVM_VCPU(r13)
2394 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2398 * Set DEC to the smaller of DEC and HDEC, so that we wake
2399 * no later than the end of our timeslice (HDEC interrupts
2400 * don't wake us from nap).
2411 /* save expiry time of guest decrementer */
2413 ld r4, HSTATE_KVM_VCPU(r13)
2414 ld r5, HSTATE_KVM_VCORE(r13)
2415 ld r6, VCORE_TB_OFFSET(r5)
2416 subf r3, r6, r3 /* convert to host TB value */
2417 std r3, VCPU_DEC_EXPIRES(r4)
2419 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2420 ld r4, HSTATE_KVM_VCPU(r13)
2421 addi r3, r4, VCPU_TB_CEDE
2422 bl kvmhv_accumulate_time
2425 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2428 * Take a nap until a decrementer or external or doobell interrupt
2429 * occurs, with PECE1 and PECE0 set in LPCR.
2430 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2431 * Also clear the runlatch bit before napping.
2434 mfspr r0, SPRN_CTRLF
2436 mtspr SPRN_CTRLT, r0
2439 stb r0,HSTATE_HWTHREAD_REQ(r13)
2441 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2443 ori r5, r5, LPCR_PECEDH
2444 rlwimi r5, r3, 0, LPCR_PECEDP
2445 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2447 kvm_nap_sequence: /* desired LPCR value in r5 */
2450 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2451 * enable state loss = 1 (allow SMT mode switch)
2452 * requested level = 0 (just stop dispatching)
2454 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2455 mtspr SPRN_PSSCR, r3
2456 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2457 li r4, LPCR_PECE_HVEE@higher
2460 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2464 std r0, HSTATE_SCRATCH0(r13)
2466 ld r0, HSTATE_SCRATCH0(r13)
2473 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2482 /* get vcpu pointer */
2483 ld r4, HSTATE_KVM_VCPU(r13)
2485 /* Woken by external or decrementer interrupt */
2486 ld r1, HSTATE_HOST_R1(r13)
2488 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2489 addi r3, r4, VCPU_TB_RMINTR
2490 bl kvmhv_accumulate_time
2493 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2495 bl kvmppc_restore_tm
2496 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2499 /* load up FP state */
2502 /* Restore guest decrementer */
2503 ld r3, VCPU_DEC_EXPIRES(r4)
2504 ld r5, HSTATE_KVM_VCORE(r13)
2505 ld r6, VCORE_TB_OFFSET(r5)
2506 add r3, r3, r6 /* convert host TB to guest TB value */
2512 ld r14, VCPU_GPR(R14)(r4)
2513 ld r15, VCPU_GPR(R15)(r4)
2514 ld r16, VCPU_GPR(R16)(r4)
2515 ld r17, VCPU_GPR(R17)(r4)
2516 ld r18, VCPU_GPR(R18)(r4)
2517 ld r19, VCPU_GPR(R19)(r4)
2518 ld r20, VCPU_GPR(R20)(r4)
2519 ld r21, VCPU_GPR(R21)(r4)
2520 ld r22, VCPU_GPR(R22)(r4)
2521 ld r23, VCPU_GPR(R23)(r4)
2522 ld r24, VCPU_GPR(R24)(r4)
2523 ld r25, VCPU_GPR(R25)(r4)
2524 ld r26, VCPU_GPR(R26)(r4)
2525 ld r27, VCPU_GPR(R27)(r4)
2526 ld r28, VCPU_GPR(R28)(r4)
2527 ld r29, VCPU_GPR(R29)(r4)
2528 ld r30, VCPU_GPR(R30)(r4)
2529 ld r31, VCPU_GPR(R31)(r4)
2531 /* Check the wake reason in SRR1 to see why we got here */
2532 bl kvmppc_check_wake_reason
2535 * Restore volatile registers since we could have called a
2536 * C routine in kvmppc_check_wake_reason
2538 * r3 tells us whether we need to return to host or not
2539 * WARNING: it gets checked further down:
2540 * should not modify r3 until this check is done.
2542 ld r4, HSTATE_KVM_VCPU(r13)
2544 /* clear our bit in vcore->napping_threads */
2545 34: ld r5,HSTATE_KVM_VCORE(r13)
2546 lbz r7,HSTATE_PTID(r13)
2549 addi r6,r5,VCORE_NAPPING_THREADS
2555 stb r0,HSTATE_NAPPING(r13)
2557 /* See if the wake reason saved in r3 means we need to exit */
2558 stw r12, VCPU_TRAP(r4)
2563 /* see if any other thread is already exiting */
2564 lwz r0,VCORE_ENTRY_EXIT(r5)
2568 b kvmppc_cede_reentry /* if not go back to guest */
2570 /* cede when already previously prodded case */
2573 stb r0,VCPU_PRODDED(r3)
2574 sync /* order testing prodded vs. clearing ceded */
2575 stb r0,VCPU_CEDED(r3)
2579 /* we've ceded but we want to give control to the host */
2581 ld r9, HSTATE_KVM_VCPU(r13)
2584 /* Try to handle a machine check in real mode */
2585 machine_check_realmode:
2586 mr r3, r9 /* get vcpu pointer */
2587 bl kvmppc_realmode_machine_check
2589 ld r9, HSTATE_KVM_VCPU(r13)
2590 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2592 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2593 * machine check interrupt (set HSRR0 to 0x200). And for handled
2594 * errors (no-fatal), just go back to guest execution with current
2595 * HSRR0 instead of exiting guest. This new approach will inject
2596 * machine check to guest for fatal error causing guest to crash.
2598 * The old code used to return to host for unhandled errors which
2599 * was causing guest to hang with soft lockups inside guest and
2600 * makes it difficult to recover guest instance.
2602 * if we receive machine check with MSR(RI=0) then deliver it to
2603 * guest as machine check causing guest to crash.
2605 ld r11, VCPU_MSR(r9)
2606 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2607 bne mc_cont /* if so, exit to host */
2608 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2609 beq 1f /* Deliver a machine check to guest */
2611 cmpdi r3, 0 /* Did we handle MCE ? */
2612 bne 2f /* Continue guest execution. */
2613 /* If not, deliver a machine check. SRR0/1 are already set */
2614 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2615 bl kvmppc_msr_interrupt
2616 2: b fast_interrupt_c_return
2619 * Check the reason we woke from nap, and take appropriate action.
2621 * 0 if nothing needs to be done
2622 * 1 if something happened that needs to be handled by the host
2623 * -1 if there was a guest wakeup (IPI or msgsnd)
2624 * -2 if we handled a PCI passthrough interrupt (returned by
2625 * kvmppc_read_intr only)
2627 * Also sets r12 to the interrupt vector for any interrupt that needs
2628 * to be handled now by the host (0x500 for external interrupt), or zero.
2629 * Modifies all volatile registers (since it may call a C function).
2630 * This routine calls kvmppc_read_intr, a C function, if an external
2631 * interrupt is pending.
2633 kvmppc_check_wake_reason:
2636 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2638 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2639 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2640 cmpwi r6, 8 /* was it an external interrupt? */
2641 beq 7f /* if so, see what it was */
2644 cmpwi r6, 6 /* was it the decrementer? */
2647 cmpwi r6, 5 /* privileged doorbell? */
2649 cmpwi r6, 3 /* hypervisor doorbell? */
2651 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2652 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2654 li r3, 1 /* anything else, return 1 */
2657 /* hypervisor doorbell */
2658 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2661 * Clear the doorbell as we will invoke the handler
2662 * explicitly in the guest exit path.
2664 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2666 /* see if it's a host IPI */
2668 lbz r0, HSTATE_HOST_IPI(r13)
2671 /* if not, return -1 */
2675 /* Woken up due to Hypervisor maintenance interrupt */
2676 4: li r12, BOOK3S_INTERRUPT_HMI
2680 /* external interrupt - create a stack frame so we can call C */
2682 std r0, PPC_LR_STKOFF(r1)
2683 stdu r1, -PPC_MIN_STKFRM(r1)
2686 li r12, BOOK3S_INTERRUPT_EXTERNAL
2691 * Return code of 2 means PCI passthrough interrupt, but
2692 * we need to return back to host to complete handling the
2693 * interrupt. Trap reason is expected in r12 by guest
2696 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2698 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2699 addi r1, r1, PPC_MIN_STKFRM
2704 * Save away FP, VMX and VSX registers.
2706 * N.B. r30 and r31 are volatile across this function,
2707 * thus it is not callable from C.
2714 #ifdef CONFIG_ALTIVEC
2716 oris r8,r8,MSR_VEC@h
2717 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2721 oris r8,r8,MSR_VSX@h
2722 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2725 addi r3,r3,VCPU_FPRS
2727 #ifdef CONFIG_ALTIVEC
2729 addi r3,r31,VCPU_VRS
2731 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2733 mfspr r6,SPRN_VRSAVE
2734 stw r6,VCPU_VRSAVE(r31)
2739 * Load up FP, VMX and VSX registers
2741 * N.B. r30 and r31 are volatile across this function,
2742 * thus it is not callable from C.
2749 #ifdef CONFIG_ALTIVEC
2751 oris r8,r8,MSR_VEC@h
2752 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2756 oris r8,r8,MSR_VSX@h
2757 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2760 addi r3,r4,VCPU_FPRS
2762 #ifdef CONFIG_ALTIVEC
2764 addi r3,r31,VCPU_VRS
2766 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2768 lwz r7,VCPU_VRSAVE(r31)
2769 mtspr SPRN_VRSAVE,r7
2774 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2776 * Save transactional state and TM-related registers.
2777 * Called with r9 pointing to the vcpu struct.
2778 * This can modify all checkpointed registers, but
2779 * restores r1, r2 and r9 (vcpu pointer) before exit.
2783 std r0, PPC_LR_STKOFF(r1)
2788 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2792 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2793 beq 1f /* TM not active in guest. */
2795 std r1, HSTATE_HOST_R1(r13)
2796 li r3, TM_CAUSE_KVM_RESCHED
2798 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2802 /* All GPRs are volatile at this point. */
2805 /* Temporarily store r13 and r9 so we have some regs to play with */
2808 std r9, PACATMSCRATCH(r13)
2809 ld r9, HSTATE_KVM_VCPU(r13)
2811 /* Get a few more GPRs free. */
2812 std r29, VCPU_GPRS_TM(29)(r9)
2813 std r30, VCPU_GPRS_TM(30)(r9)
2814 std r31, VCPU_GPRS_TM(31)(r9)
2816 /* Save away PPR and DSCR soon so don't run with user values. */
2819 mfspr r30, SPRN_DSCR
2820 ld r29, HSTATE_DSCR(r13)
2821 mtspr SPRN_DSCR, r29
2823 /* Save all but r9, r13 & r29-r31 */
2826 .if (reg != 9) && (reg != 13)
2827 std reg, VCPU_GPRS_TM(reg)(r9)
2831 /* ... now save r13 */
2833 std r4, VCPU_GPRS_TM(13)(r9)
2834 /* ... and save r9 */
2835 ld r4, PACATMSCRATCH(r13)
2836 std r4, VCPU_GPRS_TM(9)(r9)
2838 /* Reload stack pointer and TOC. */
2839 ld r1, HSTATE_HOST_R1(r13)
2842 /* Set MSR RI now we have r1 and r13 back. */
2846 /* Save away checkpinted SPRs. */
2847 std r31, VCPU_PPR_TM(r9)
2848 std r30, VCPU_DSCR_TM(r9)
2855 std r5, VCPU_LR_TM(r9)
2856 stw r6, VCPU_CR_TM(r9)
2857 std r7, VCPU_CTR_TM(r9)
2858 std r8, VCPU_AMR_TM(r9)
2859 std r10, VCPU_TAR_TM(r9)
2860 std r11, VCPU_XER_TM(r9)
2862 /* Restore r12 as trap number. */
2863 lwz r12, VCPU_TRAP(r9)
2866 addi r3, r9, VCPU_FPRS_TM
2868 addi r3, r9, VCPU_VRS_TM
2870 mfspr r6, SPRN_VRSAVE
2871 stw r6, VCPU_VRSAVE_TM(r9)
2874 * We need to save these SPRs after the treclaim so that the software
2875 * error code is recorded correctly in the TEXASR. Also the user may
2876 * change these outside of a transaction, so they must always be
2879 mfspr r5, SPRN_TFHAR
2880 mfspr r6, SPRN_TFIAR
2881 mfspr r7, SPRN_TEXASR
2882 std r5, VCPU_TFHAR(r9)
2883 std r6, VCPU_TFIAR(r9)
2884 std r7, VCPU_TEXASR(r9)
2886 ld r0, PPC_LR_STKOFF(r1)
2891 * Restore transactional state and TM-related registers.
2892 * Called with r4 pointing to the vcpu struct.
2893 * This potentially modifies all checkpointed registers.
2894 * It restores r1, r2, r4 from the PACA.
2898 std r0, PPC_LR_STKOFF(r1)
2900 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2906 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2910 * The user may change these outside of a transaction, so they must
2911 * always be context switched.
2913 ld r5, VCPU_TFHAR(r4)
2914 ld r6, VCPU_TFIAR(r4)
2915 ld r7, VCPU_TEXASR(r4)
2916 mtspr SPRN_TFHAR, r5
2917 mtspr SPRN_TFIAR, r6
2918 mtspr SPRN_TEXASR, r7
2921 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2922 beqlr /* TM not active in guest */
2923 std r1, HSTATE_HOST_R1(r13)
2925 /* Make sure the failure summary is set, otherwise we'll program check
2926 * when we trechkpt. It's possible that this might have been not set
2927 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2930 oris r7, r7, (TEXASR_FS)@h
2931 mtspr SPRN_TEXASR, r7
2934 * We need to load up the checkpointed state for the guest.
2935 * We need to do this early as it will blow away any GPRs, VSRs and
2940 addi r3, r31, VCPU_FPRS_TM
2942 addi r3, r31, VCPU_VRS_TM
2945 lwz r7, VCPU_VRSAVE_TM(r4)
2946 mtspr SPRN_VRSAVE, r7
2948 ld r5, VCPU_LR_TM(r4)
2949 lwz r6, VCPU_CR_TM(r4)
2950 ld r7, VCPU_CTR_TM(r4)
2951 ld r8, VCPU_AMR_TM(r4)
2952 ld r9, VCPU_TAR_TM(r4)
2953 ld r10, VCPU_XER_TM(r4)
2962 * Load up PPR and DSCR values but don't put them in the actual SPRs
2963 * till the last moment to avoid running with userspace PPR and DSCR for
2966 ld r29, VCPU_DSCR_TM(r4)
2967 ld r30, VCPU_PPR_TM(r4)
2969 std r2, PACATMSCRATCH(r13) /* Save TOC */
2971 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2975 /* Load GPRs r0-r28 */
2978 ld reg, VCPU_GPRS_TM(reg)(r31)
2982 mtspr SPRN_DSCR, r29
2985 /* Load final GPRs */
2986 ld 29, VCPU_GPRS_TM(29)(r31)
2987 ld 30, VCPU_GPRS_TM(30)(r31)
2988 ld 31, VCPU_GPRS_TM(31)(r31)
2990 /* TM checkpointed state is now setup. All GPRs are now volatile. */
2993 /* Now let's get back the state we need. */
2996 ld r29, HSTATE_DSCR(r13)
2997 mtspr SPRN_DSCR, r29
2998 ld r4, HSTATE_KVM_VCPU(r13)
2999 ld r1, HSTATE_HOST_R1(r13)
3000 ld r2, PACATMSCRATCH(r13)
3002 /* Set the MSR RI since we have our registers back. */
3006 ld r0, PPC_LR_STKOFF(r1)
3012 * We come here if we get any exception or interrupt while we are
3013 * executing host real mode code while in guest MMU context.
3014 * For now just spin, but we should do something better.
3016 kvmppc_bad_host_intr:
3020 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3021 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3022 * r11 has the guest MSR value (in/out)
3023 * r9 has a vcpu pointer (in)
3024 * r0 is used as a scratch register
3026 kvmppc_msr_interrupt:
3027 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3028 cmpwi r0, 2 /* Check if we are in transactional state.. */
3029 ld r11, VCPU_INTR_MSR(r9)
3031 /* ... if transactional, change to suspended */
3033 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3037 * This works around a hardware bug on POWER8E processors, where
3038 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3039 * performance monitor interrupt. Instead, when we need to have
3040 * an interrupt pending, we have to arrange for a counter to overflow.
3044 mtspr SPRN_MMCR2, r3
3045 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3046 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3047 mtspr SPRN_MMCR0, r3
3054 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3056 * Start timing an activity
3057 * r3 = pointer to time accumulation struct, r4 = vcpu
3060 ld r5, HSTATE_KVM_VCORE(r13)
3061 lbz r6, VCORE_IN_GUEST(r5)
3063 beq 5f /* if in guest, need to */
3064 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
3067 std r3, VCPU_CUR_ACTIVITY(r4)
3068 std r5, VCPU_ACTIVITY_START(r4)
3072 * Accumulate time to one activity and start another.
3073 * r3 = pointer to new time accumulation struct, r4 = vcpu
3075 kvmhv_accumulate_time:
3076 ld r5, HSTATE_KVM_VCORE(r13)
3077 lbz r8, VCORE_IN_GUEST(r5)
3079 beq 4f /* if in guest, need to */
3080 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
3081 4: ld r5, VCPU_CUR_ACTIVITY(r4)
3082 ld r6, VCPU_ACTIVITY_START(r4)
3083 std r3, VCPU_CUR_ACTIVITY(r4)
3086 std r7, VCPU_ACTIVITY_START(r4)
3090 ld r8, TAS_SEQCOUNT(r5)
3093 std r8, TAS_SEQCOUNT(r5)
3095 ld r7, TAS_TOTAL(r5)
3097 std r7, TAS_TOTAL(r5)
3103 3: std r3, TAS_MIN(r5)
3109 std r8, TAS_SEQCOUNT(r5)