2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/book3s/64/mmu-hash.h>
33 #include <asm/xive-regs.h>
34 #include <asm/thread_info.h>
36 /* Sign-extend HDEC if not on POWER9 */
37 #define EXTEND_HDEC(reg) \
40 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
42 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
44 /* Values in HSTATE_NAPPING(r13) */
45 #define NAPPING_CEDE 1
46 #define NAPPING_NOVCPU 2
48 /* Stack frame offsets for kvmppc_hv_entry */
50 #define STACK_SLOT_TRAP (SFS-4)
51 #define STACK_SLOT_TID (SFS-16)
52 #define STACK_SLOT_PSSCR (SFS-24)
53 #define STACK_SLOT_PID (SFS-32)
54 #define STACK_SLOT_IAMR (SFS-40)
55 #define STACK_SLOT_CIABR (SFS-48)
56 #define STACK_SLOT_DAWR (SFS-56)
57 #define STACK_SLOT_DAWRX (SFS-64)
58 #define STACK_SLOT_HFSCR (SFS-72)
61 * Call kvmppc_hv_entry in real mode.
62 * Must be called with interrupts hard-disabled.
66 * LR = return address to continue at after eventually re-enabling MMU
68 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
70 std r0, PPC_LR_STKOFF(r1)
73 std r10, HSTATE_HOST_MSR(r13)
74 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
79 mtmsrd r0,1 /* clear RI in MSR */
86 /* On P9, do LPCR setting, if necessary */
87 ld r3, HSTATE_SPLIT_MODE(r13)
90 lwz r4, KVM_SPLIT_DO_SET(r3)
96 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
98 ld r4, HSTATE_KVM_VCPU(r13)
101 /* Back from guest - restore host state and return to caller */
104 /* Restore host DABR and DABRX */
105 ld r5,HSTATE_DABR(r13)
109 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
112 ld r3,PACA_SPRG_VDSO(r13)
113 mtspr SPRN_SPRG_VDSO_WRITE,r3
115 /* Reload the host's PMU registers */
116 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
117 lbz r4, LPPACA_PMCINUSE(r3)
119 beq 23f /* skip if not */
121 ld r3, HSTATE_MMCR0(r13)
122 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
125 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
126 lwz r3, HSTATE_PMC1(r13)
127 lwz r4, HSTATE_PMC2(r13)
128 lwz r5, HSTATE_PMC3(r13)
129 lwz r6, HSTATE_PMC4(r13)
130 lwz r8, HSTATE_PMC5(r13)
131 lwz r9, HSTATE_PMC6(r13)
138 ld r3, HSTATE_MMCR0(r13)
139 ld r4, HSTATE_MMCR1(r13)
140 ld r5, HSTATE_MMCRA(r13)
141 ld r6, HSTATE_SIAR(r13)
142 ld r7, HSTATE_SDAR(r13)
148 ld r8, HSTATE_MMCR2(r13)
149 ld r9, HSTATE_SIER(r13)
152 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
158 * Reload DEC. HDEC interrupts were disabled when
159 * we reloaded the host's LPCR value.
161 ld r3, HSTATE_DECEXP(r13)
166 /* hwthread_req may have got set by cede or no vcpu, so clear it */
168 stb r0, HSTATE_HWTHREAD_REQ(r13)
171 * For external interrupts we need to call the Linux
172 * handler to process the interrupt. We do that by jumping
173 * to absolute address 0x500 for external interrupts.
174 * The [h]rfid at the end of the handler will return to
175 * the book3s_hv_interrupts.S code. For other interrupts
176 * we do the rfid to get back to the book3s_hv_interrupts.S
179 ld r8, 112+PPC_LR_STKOFF(r1)
181 ld r7, HSTATE_HOST_MSR(r13)
183 /* Return the trap number on this thread as the return value */
187 * If we came back from the guest via a relocation-on interrupt,
188 * we will be in virtual mode at this point, which makes it a
189 * little easier to get back to the caller.
192 andi. r0, r0, MSR_IR /* in real mode? */
195 /* RFI into the highmem handler */
199 mtmsrd r6, 1 /* Clear RI in MSR */
204 /* Virtual-mode return */
209 kvmppc_primary_no_guest:
210 /* We handle this much like a ceded vcpu */
211 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
212 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
213 /* HDEC value came from DEC in the first place, it will fit */
217 * Make sure the primary has finished the MMU switch.
218 * We should never get here on a secondary thread, but
219 * check it for robustness' sake.
221 ld r5, HSTATE_KVM_VCORE(r13)
222 65: lbz r0, VCORE_IN_GUEST(r5)
229 /* set our bit in napping_threads */
230 ld r5, HSTATE_KVM_VCORE(r13)
231 lbz r7, HSTATE_PTID(r13)
234 addi r6, r5, VCORE_NAPPING_THREADS
239 /* order napping_threads update vs testing entry_exit_map */
242 lwz r7, VCORE_ENTRY_EXIT(r5)
244 bge kvm_novcpu_exit /* another thread already exiting */
245 li r3, NAPPING_NOVCPU
246 stb r3, HSTATE_NAPPING(r13)
248 li r3, 0 /* Don't wake on privileged (OS) doorbell */
253 * Entered from kvm_start_guest if kvm_hstate.napping is set
259 ld r1, HSTATE_HOST_R1(r13)
260 ld r5, HSTATE_KVM_VCORE(r13)
262 stb r0, HSTATE_NAPPING(r13)
264 /* check the wake reason */
265 bl kvmppc_check_wake_reason
268 * Restore volatile registers since we could have called
269 * a C routine in kvmppc_check_wake_reason.
272 ld r5, HSTATE_KVM_VCORE(r13)
274 /* see if any other thread is already exiting */
275 lwz r0, VCORE_ENTRY_EXIT(r5)
279 /* clear our bit in napping_threads */
280 lbz r7, HSTATE_PTID(r13)
283 addi r6, r5, VCORE_NAPPING_THREADS
289 /* See if the wake reason means we need to exit */
293 /* See if our timeslice has expired (HDEC is negative) */
296 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
300 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
301 ld r4, HSTATE_KVM_VCPU(r13)
303 beq kvmppc_primary_no_guest
305 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
306 addi r3, r4, VCPU_TB_RMENTRY
307 bl kvmhv_start_timing
312 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
313 ld r4, HSTATE_KVM_VCPU(r13)
316 addi r3, r4, VCPU_TB_RMEXIT
317 bl kvmhv_accumulate_time
320 stw r12, STACK_SLOT_TRAP(r1)
321 bl kvmhv_commence_exit
323 lwz r12, STACK_SLOT_TRAP(r1)
324 b kvmhv_switch_to_host
327 * We come in here when wakened from nap mode.
328 * Relocation is off and most register values are lost.
329 * r13 points to the PACA.
330 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
332 .globl kvm_start_guest
334 /* Set runlatch bit the minute you wake up from nap */
340 * Could avoid this and pass it through in r3. For now,
341 * code expects it to be in SRR1.
347 li r0,KVM_HWTHREAD_IN_KVM
348 stb r0,HSTATE_HWTHREAD_STATE(r13)
350 /* NV GPR values from power7_idle() will no longer be valid */
352 stb r0,PACA_NAPSTATELOST(r13)
354 /* were we napping due to cede? */
355 lbz r0,HSTATE_NAPPING(r13)
356 cmpwi r0,NAPPING_CEDE
358 cmpwi r0,NAPPING_NOVCPU
359 beq kvm_novcpu_wakeup
361 ld r1,PACAEMERGSP(r13)
362 subi r1,r1,STACK_FRAME_OVERHEAD
365 * We weren't napping due to cede, so this must be a secondary
366 * thread being woken up to run a guest, or being woken up due
367 * to a stray IPI. (Or due to some machine check or hypervisor
368 * maintenance interrupt while the core is in KVM.)
371 /* Check the wake reason in SRR1 to see why we got here */
372 bl kvmppc_check_wake_reason
374 * kvmppc_check_wake_reason could invoke a C routine, but we
375 * have no volatile registers to restore when we return.
381 /* get vcore pointer, NULL if we have nothing to run */
382 ld r5,HSTATE_KVM_VCORE(r13)
384 /* if we have no vcore to run, go back to sleep */
387 kvm_secondary_got_guest:
389 /* Set HSTATE_DSCR(r13) to something sensible */
390 ld r6, PACA_DSCR_DEFAULT(r13)
391 std r6, HSTATE_DSCR(r13)
393 /* On thread 0 of a subcore, set HDEC to max */
394 lbz r4, HSTATE_PTID(r13)
397 LOAD_REG_ADDR(r6, decrementer_max)
400 /* and set per-LPAR registers, if doing dynamic micro-threading */
401 ld r6, HSTATE_SPLIT_MODE(r13)
405 ld r0, KVM_SPLIT_RPR(r6)
407 ld r0, KVM_SPLIT_PMMAR(r6)
409 ld r0, KVM_SPLIT_LDBAR(r6)
413 /* On P9 we use the split_info for coordinating LPCR changes */
414 lwz r4, KVM_SPLIT_DO_SET(r6)
420 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
422 /* Order load of vcpu after load of vcore */
424 ld r4, HSTATE_KVM_VCPU(r13)
427 /* Back from the guest, go back to nap */
428 /* Clear our vcpu and vcore pointers so we don't come back in early */
430 std r0, HSTATE_KVM_VCPU(r13)
432 * Once we clear HSTATE_KVM_VCORE(r13), the code in
433 * kvmppc_run_core() is going to assume that all our vcpu
434 * state is visible in memory. This lwsync makes sure
438 std r0, HSTATE_KVM_VCORE(r13)
441 * All secondaries exiting guest will fall through this path.
442 * Before proceeding, just check for HMI interrupt and
443 * invoke opal hmi handler. By now we are sure that the
444 * primary thread on this core/subcore has already made partition
445 * switch/TB resync and we are good to call opal hmi handler.
447 cmpwi r12, BOOK3S_INTERRUPT_HMI
450 li r3,0 /* NULL argument */
451 bl hmi_exception_realmode
453 * At this point we have finished executing in the guest.
454 * We need to wait for hwthread_req to become zero, since
455 * we may not turn on the MMU while hwthread_req is non-zero.
456 * While waiting we also need to check if we get given a vcpu to run.
459 lbz r3, HSTATE_HWTHREAD_REQ(r13)
463 li r0, KVM_HWTHREAD_IN_KERNEL
464 stb r0, HSTATE_HWTHREAD_STATE(r13)
465 /* need to recheck hwthread_req after a barrier, to avoid race */
467 lbz r3, HSTATE_HWTHREAD_REQ(r13)
471 * We jump to pnv_wakeup_loss, which will return to the caller
472 * of power7_nap in the powernv cpu offline loop. The value we
473 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
474 * requires SRR1 in r12.
478 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
485 ld r5, HSTATE_KVM_VCORE(r13)
488 ld r3, HSTATE_SPLIT_MODE(r13)
491 lwz r0, KVM_SPLIT_DO_SET(r3)
494 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
497 lbz r0, KVM_SPLIT_DO_NAP(r3)
503 b kvm_secondary_got_guest
505 54: li r0, KVM_HWTHREAD_IN_KVM
506 stb r0, HSTATE_HWTHREAD_STATE(r13)
510 /* Set LPCR, LPIDR etc. on P9 */
518 bl kvmhv_p9_restore_lpcr
523 * Here the primary thread is trying to return the core to
524 * whole-core mode, so we need to nap.
528 * When secondaries are napping in kvm_unsplit_nap() with
529 * hwthread_req = 1, HMI goes ignored even though subcores are
530 * already exited the guest. Hence HMI keeps waking up secondaries
531 * from nap in a loop and secondaries always go back to nap since
532 * no vcore is assigned to them. This makes impossible for primary
533 * thread to get hold of secondary threads resulting into a soft
534 * lockup in KVM path.
536 * Let us check if HMI is pending and handle it before we go to nap.
538 cmpwi r12, BOOK3S_INTERRUPT_HMI
540 li r3, 0 /* NULL argument */
541 bl hmi_exception_realmode
544 * Ensure that secondary doesn't nap when it has
545 * its vcore pointer set.
547 sync /* matches smp_mb() before setting split_info.do_nap */
548 ld r0, HSTATE_KVM_VCORE(r13)
551 /* clear any pending message */
553 lis r6, (PPC_DBELL_SERVER << (63-36))@h
555 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
556 /* Set kvm_split_mode.napped[tid] = 1 */
557 ld r3, HSTATE_SPLIT_MODE(r13)
559 lbz r4, HSTATE_TID(r13)
560 addi r4, r4, KVM_SPLIT_NAPPED
562 /* Check the do_nap flag again after setting napped[] */
564 lbz r0, KVM_SPLIT_DO_NAP(r3)
567 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
569 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
576 /******************************************************************************
580 *****************************************************************************/
582 .global kvmppc_hv_entry
587 * R4 = vcpu pointer (or NULL)
592 * all other volatile GPRS = free
593 * Does not preserve non-volatile GPRs or CR fields
596 std r0, PPC_LR_STKOFF(r1)
599 /* Save R1 in the PACA */
600 std r1, HSTATE_HOST_R1(r13)
602 li r6, KVM_GUEST_MODE_HOST_HV
603 stb r6, HSTATE_IN_GUEST(r13)
605 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
606 /* Store initial timestamp */
609 addi r3, r4, VCPU_TB_RMENTRY
610 bl kvmhv_start_timing
614 /* Use cr7 as an indication of radix mode */
615 ld r5, HSTATE_KVM_VCORE(r13)
616 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
617 lbz r0, KVM_RADIX(r9)
620 /* Clear out SLB if hash */
628 * POWER7/POWER8 host -> guest partition switch code.
629 * We don't have to lock against concurrent tlbies,
630 * but we do have to coordinate across hardware threads.
632 /* Set bit in entry map iff exit map is zero. */
634 lbz r6, HSTATE_PTID(r13)
636 addi r8, r5, VCORE_ENTRY_EXIT
638 cmpwi r3, 0x100 /* any threads starting to exit? */
639 bge secondary_too_late /* if so we're too late to the party */
644 /* Primary thread switches to guest partition. */
650 li r0,LPID_RSVD /* switch to reserved LPID */
653 mtspr SPRN_SDR1,r6 /* switch to partition page table */
654 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
658 /* See if we need to flush the TLB */
659 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
662 * On POWER9, individual threads can come in here, but the
663 * TLB is shared between the 4 threads in a core, hence
664 * invalidating on one thread invalidates for all.
665 * Thus we make all 4 threads use the same bit here.
668 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
669 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
670 srdi r6,r6,6 /* doubleword number */
671 sldi r6,r6,3 /* address offset */
673 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
679 /* Flush the TLB of any entries for this LPID */
680 lwz r0,KVM_TLB_SETS(r9)
682 li r7,0x800 /* IS field = 0b10 */
684 li r0,0 /* RS for P9 version of tlbiel */
686 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
690 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
694 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
699 /* Add timebase offset onto timebase */
700 22: ld r8,VCORE_TB_OFFSET(r5)
703 mftb r6 /* current host timebase */
705 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
706 mftb r7 /* check if lower 24 bits overflowed */
711 addis r8,r8,0x100 /* if so, increment upper 40 bits */
714 /* Load guest PCR value to select appropriate compat mode */
715 37: ld r7, VCORE_PCR(r5)
722 /* DPDES and VTB are shared between threads */
723 ld r8, VCORE_DPDES(r5)
727 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
729 /* Mark the subcore state as inside guest */
730 bl kvmppc_subcore_enter_guest
732 ld r5, HSTATE_KVM_VCORE(r13)
733 ld r4, HSTATE_KVM_VCPU(r13)
735 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
737 /* Do we have a guest vcpu to run? */
739 beq kvmppc_primary_no_guest
742 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
743 lwz r5,VCPU_SLB_MAX(r4)
748 1: ld r8,VCPU_SLB_E(r6)
751 addi r6,r6,VCPU_SLB_SIZE
754 /* Increment yield count if they have a VPA */
758 li r6, LPPACA_YIELDCOUNT
763 stb r6, VCPU_VPA_DIRTY(r4)
766 /* Save purr/spurr */
769 std r5,HSTATE_PURR(r13)
770 std r6,HSTATE_SPURR(r13)
776 /* Save host values of some registers */
782 std r5, STACK_SLOT_TID(r1)
783 std r6, STACK_SLOT_PSSCR(r1)
784 std r7, STACK_SLOT_PID(r1)
785 std r8, STACK_SLOT_IAMR(r1)
787 std r5, STACK_SLOT_HFSCR(r1)
788 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
793 std r5, STACK_SLOT_CIABR(r1)
794 std r6, STACK_SLOT_DAWR(r1)
795 std r7, STACK_SLOT_DAWRX(r1)
796 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
799 /* Set partition DABR */
800 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
801 lwz r5,VCPU_DABRX(r4)
806 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
808 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
811 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
814 END_FTR_SECTION_IFSET(CPU_FTR_TM)
817 /* Load guest PMU registers */
818 /* R4 is live here (vcpu pointer) */
820 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
821 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
825 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
828 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
829 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
830 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
831 lwz r6, VCPU_PMC + 8(r4)
832 lwz r7, VCPU_PMC + 12(r4)
833 lwz r8, VCPU_PMC + 16(r4)
834 lwz r9, VCPU_PMC + 20(r4)
842 ld r5, VCPU_MMCR + 8(r4)
843 ld r6, VCPU_MMCR + 16(r4)
851 ld r5, VCPU_MMCR + 24(r4)
855 BEGIN_FTR_SECTION_NESTED(96)
856 lwz r7, VCPU_PMC + 24(r4)
857 lwz r8, VCPU_PMC + 28(r4)
858 ld r9, VCPU_MMCR + 32(r4)
862 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
863 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
867 /* Load up FP, VMX and VSX registers */
870 ld r14, VCPU_GPR(R14)(r4)
871 ld r15, VCPU_GPR(R15)(r4)
872 ld r16, VCPU_GPR(R16)(r4)
873 ld r17, VCPU_GPR(R17)(r4)
874 ld r18, VCPU_GPR(R18)(r4)
875 ld r19, VCPU_GPR(R19)(r4)
876 ld r20, VCPU_GPR(R20)(r4)
877 ld r21, VCPU_GPR(R21)(r4)
878 ld r22, VCPU_GPR(R22)(r4)
879 ld r23, VCPU_GPR(R23)(r4)
880 ld r24, VCPU_GPR(R24)(r4)
881 ld r25, VCPU_GPR(R25)(r4)
882 ld r26, VCPU_GPR(R26)(r4)
883 ld r27, VCPU_GPR(R27)(r4)
884 ld r28, VCPU_GPR(R28)(r4)
885 ld r29, VCPU_GPR(R29)(r4)
886 ld r30, VCPU_GPR(R30)(r4)
887 ld r31, VCPU_GPR(R31)(r4)
889 /* Switch DSCR to guest value */
894 /* Skip next section on POWER7 */
896 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
897 /* Load up POWER8-specific registers */
899 lwz r6, VCPU_PSPB(r4)
905 ld r6, VCPU_DAWRX(r4)
906 ld r7, VCPU_CIABR(r4)
913 ld r8, VCPU_EBBHR(r4)
916 ld r5, VCPU_EBBRR(r4)
917 ld r6, VCPU_BESCR(r4)
918 lwz r7, VCPU_GUEST_PID(r4)
926 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
928 /* POWER8-only registers */
929 ld r5, VCPU_TCSCR(r4)
931 ld r7, VCPU_CSIGR(r4)
938 /* POWER9-only registers */
940 ld r6, VCPU_PSSCR(r4)
941 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
942 ld r7, VCPU_HFSCR(r4)
946 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
950 * Set the decrementer to the guest decrementer.
952 ld r8,VCPU_DEC_EXPIRES(r4)
953 /* r8 is a host timebase value here, convert to guest TB */
954 ld r5,HSTATE_KVM_VCORE(r13)
955 ld r6,VCORE_TB_OFFSET(r5)
962 ld r5, VCPU_SPRG0(r4)
963 ld r6, VCPU_SPRG1(r4)
964 ld r7, VCPU_SPRG2(r4)
965 ld r8, VCPU_SPRG3(r4)
971 /* Load up DAR and DSISR */
973 lwz r6, VCPU_DSISR(r4)
977 /* Restore AMR and UAMOR, set AMOR to all 1s */
985 /* Restore state of CTRL run bit; assume 1 on entry */
993 /* Secondary threads wait for primary to have done partition switch */
994 ld r5, HSTATE_KVM_VCORE(r13)
995 lbz r6, HSTATE_PTID(r13)
998 lbz r0, VCORE_IN_GUEST(r5)
1002 20: lwz r3, VCORE_ENTRY_EXIT(r5)
1005 lbz r0, VCORE_IN_GUEST(r5)
1011 ld r8,VCORE_LPCR(r5)
1015 /* Check if HDEC expires soon */
1018 cmpdi r3, 512 /* 1 microsecond */
1021 #ifdef CONFIG_KVM_XICS
1022 /* We are entering the guest on that thread, push VCPU to XIVE */
1023 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1026 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1030 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1031 li r9, TM_QW1_OS + TM_WORD2
1034 stw r9, VCPU_XIVE_PUSHED(r4)
1037 #endif /* CONFIG_KVM_XICS */
1039 deliver_guest_interrupt:
1046 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
1048 ld r11, VCPU_MSR(r4)
1049 ld r6, VCPU_SRR0(r4)
1050 ld r7, VCPU_SRR1(r4)
1054 /* r11 = vcpu->arch.msr & ~MSR_HV */
1055 rldicl r11, r11, 63 - MSR_HV_LG, 1
1056 rotldi r11, r11, 1 + MSR_HV_LG
1057 ori r11, r11, MSR_ME
1059 /* Check if we can deliver an external or decrementer interrupt now */
1060 ld r0, VCPU_PENDING_EXC(r4)
1061 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1063 andi. r8, r11, MSR_EE
1065 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1066 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1070 li r0, BOOK3S_INTERRUPT_EXTERNAL
1074 /* On POWER9 check whether the guest has large decrementer enabled */
1075 andis. r8, r8, LPCR_LD@h
1077 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1080 li r0, BOOK3S_INTERRUPT_DECREMENTER
1083 12: mtspr SPRN_SRR0, r10
1085 mtspr SPRN_SRR1, r11
1087 bl kvmppc_msr_interrupt
1091 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1092 /* On POWER9, check for pending doorbell requests */
1093 lbz r0, VCPU_DBELL_REQ(r4)
1095 beq fast_guest_return
1096 ld r5, HSTATE_KVM_VCORE(r13)
1097 /* Set DPDES register so the CPU will take a doorbell interrupt */
1099 mtspr SPRN_DPDES, r0
1100 std r0, VCORE_DPDES(r5)
1101 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1103 /* Clear the pending doorbell request */
1105 stb r0, VCPU_DBELL_REQ(r4)
1110 * R10: value for HSRR0
1111 * R11: value for HSRR1
1116 stb r0,VCPU_CEDED(r4) /* cancel cede */
1117 mtspr SPRN_HSRR0,r10
1118 mtspr SPRN_HSRR1,r11
1120 /* Activate guest mode, so faults get handled by KVM */
1121 li r9, KVM_GUEST_MODE_GUEST_HV
1122 stb r9, HSTATE_IN_GUEST(r13)
1124 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1125 /* Accumulate timing */
1126 addi r3, r4, VCPU_TB_GUEST
1127 bl kvmhv_accumulate_time
1133 ld r5, VCPU_CFAR(r4)
1135 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1138 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1145 ld r1, VCPU_GPR(R1)(r4)
1146 ld r2, VCPU_GPR(R2)(r4)
1147 ld r3, VCPU_GPR(R3)(r4)
1148 ld r5, VCPU_GPR(R5)(r4)
1149 ld r6, VCPU_GPR(R6)(r4)
1150 ld r7, VCPU_GPR(R7)(r4)
1151 ld r8, VCPU_GPR(R8)(r4)
1152 ld r9, VCPU_GPR(R9)(r4)
1153 ld r10, VCPU_GPR(R10)(r4)
1154 ld r11, VCPU_GPR(R11)(r4)
1155 ld r12, VCPU_GPR(R12)(r4)
1156 ld r13, VCPU_GPR(R13)(r4)
1160 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1162 /* Move canary into DSISR to check for later */
1165 mtspr SPRN_HDSISR, r0
1166 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1168 ld r0, VCPU_GPR(R0)(r4)
1169 ld r4, VCPU_GPR(R4)(r4)
1178 stw r12, VCPU_TRAP(r4)
1179 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1180 addi r3, r4, VCPU_TB_RMEXIT
1181 bl kvmhv_accumulate_time
1183 11: b kvmhv_switch_to_host
1190 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1191 12: stw r12, VCPU_TRAP(r4)
1193 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1194 addi r3, r4, VCPU_TB_RMEXIT
1195 bl kvmhv_accumulate_time
1199 /******************************************************************************
1203 *****************************************************************************/
1206 * We come here from the first-level interrupt handlers.
1208 .globl kvmppc_interrupt_hv
1209 kvmppc_interrupt_hv:
1211 * Register contents:
1212 * R12 = (guest CR << 32) | interrupt vector
1214 * guest R12 saved in shadow VCPU SCRATCH0
1215 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1216 * guest R13 saved in SPRN_SCRATCH0
1218 std r9, HSTATE_SCRATCH2(r13)
1219 lbz r9, HSTATE_IN_GUEST(r13)
1220 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1221 beq kvmppc_bad_host_intr
1222 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1223 cmpwi r9, KVM_GUEST_MODE_GUEST
1224 ld r9, HSTATE_SCRATCH2(r13)
1225 beq kvmppc_interrupt_pr
1227 /* We're now back in the host but in guest MMU context */
1228 li r9, KVM_GUEST_MODE_HOST_HV
1229 stb r9, HSTATE_IN_GUEST(r13)
1231 ld r9, HSTATE_KVM_VCPU(r13)
1233 /* Save registers */
1235 std r0, VCPU_GPR(R0)(r9)
1236 std r1, VCPU_GPR(R1)(r9)
1237 std r2, VCPU_GPR(R2)(r9)
1238 std r3, VCPU_GPR(R3)(r9)
1239 std r4, VCPU_GPR(R4)(r9)
1240 std r5, VCPU_GPR(R5)(r9)
1241 std r6, VCPU_GPR(R6)(r9)
1242 std r7, VCPU_GPR(R7)(r9)
1243 std r8, VCPU_GPR(R8)(r9)
1244 ld r0, HSTATE_SCRATCH2(r13)
1245 std r0, VCPU_GPR(R9)(r9)
1246 std r10, VCPU_GPR(R10)(r9)
1247 std r11, VCPU_GPR(R11)(r9)
1248 ld r3, HSTATE_SCRATCH0(r13)
1249 std r3, VCPU_GPR(R12)(r9)
1250 /* CR is in the high half of r12 */
1254 ld r3, HSTATE_CFAR(r13)
1255 std r3, VCPU_CFAR(r9)
1256 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1258 ld r4, HSTATE_PPR(r13)
1259 std r4, VCPU_PPR(r9)
1260 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1262 /* Restore R1/R2 so we can handle faults */
1263 ld r1, HSTATE_HOST_R1(r13)
1266 mfspr r10, SPRN_SRR0
1267 mfspr r11, SPRN_SRR1
1268 std r10, VCPU_SRR0(r9)
1269 std r11, VCPU_SRR1(r9)
1270 /* trap is in the low half of r12, clear CR from the high half */
1272 andi. r0, r12, 2 /* need to read HSRR0/1? */
1274 mfspr r10, SPRN_HSRR0
1275 mfspr r11, SPRN_HSRR1
1277 1: std r10, VCPU_PC(r9)
1278 std r11, VCPU_MSR(r9)
1282 std r3, VCPU_GPR(R13)(r9)
1285 stw r12,VCPU_TRAP(r9)
1288 * Now that we have saved away SRR0/1 and HSRR0/1,
1289 * interrupts are recoverable in principle, so set MSR_RI.
1290 * This becomes important for relocation-on interrupts from
1291 * the guest, which we can get in radix mode on POWER9.
1296 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1297 addi r3, r9, VCPU_TB_RMINTR
1299 bl kvmhv_accumulate_time
1300 ld r5, VCPU_GPR(R5)(r9)
1301 ld r6, VCPU_GPR(R6)(r9)
1302 ld r7, VCPU_GPR(R7)(r9)
1303 ld r8, VCPU_GPR(R8)(r9)
1306 /* Save HEIR (HV emulation assist reg) in emul_inst
1307 if this is an HEI (HV emulation interrupt, e40) */
1308 li r3,KVM_INST_FETCH_FAILED
1309 stw r3,VCPU_LAST_INST(r9)
1310 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1313 11: stw r3,VCPU_HEIR(r9)
1315 /* these are volatile across C function calls */
1316 #ifdef CONFIG_RELOCATABLE
1317 ld r3, HSTATE_SCRATCH1(r13)
1323 std r3, VCPU_CTR(r9)
1324 std r4, VCPU_XER(r9)
1326 /* If this is a page table miss then see if it's theirs or ours */
1327 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1329 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1332 /* See if this is a leftover HDEC interrupt */
1333 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1339 bge fast_guest_return
1341 /* See if this is an hcall we can handle in real mode */
1342 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1343 beq hcall_try_real_mode
1345 /* Hypervisor doorbell - exit only if host IPI flag set */
1346 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1351 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1352 lbz r0, HSTATE_HOST_IPI(r13)
1357 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1358 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1360 mfspr r3, SPRN_HFSCR
1361 std r3, VCPU_HFSCR(r9)
1364 /* External interrupt ? */
1365 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1366 bne+ guest_exit_cont
1368 /* External interrupt, first check for host_ipi. If this is
1369 * set, we know the host wants us out so let's do it now
1374 * Restore the active volatile registers after returning from
1377 ld r9, HSTATE_KVM_VCPU(r13)
1378 li r12, BOOK3S_INTERRUPT_EXTERNAL
1381 * kvmppc_read_intr return codes:
1383 * Exit to host (r3 > 0)
1384 * 1 An interrupt is pending that needs to be handled by the host
1385 * Exit guest and return to host by branching to guest_exit_cont
1387 * 2 Passthrough that needs completion in the host
1388 * Exit guest and return to host by branching to guest_exit_cont
1389 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1390 * to indicate to the host to complete handling the interrupt
1392 * Before returning to guest, we check if any CPU is heading out
1393 * to the host and if so, we head out also. If no CPUs are heading
1394 * check return values <= 0.
1396 * Return to guest (r3 <= 0)
1397 * 0 No external interrupt is pending
1398 * -1 A guest wakeup IPI (which has now been cleared)
1399 * In either case, we return to guest to deliver any pending
1402 * -2 A PCI passthrough external interrupt was handled
1403 * (interrupt was delivered directly to guest)
1404 * Return to guest to deliver any pending guest interrupts.
1410 /* Return code = 2 */
1411 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1412 stw r12, VCPU_TRAP(r9)
1415 1: /* Return code <= 1 */
1419 /* Return code <= 0 */
1420 4: ld r5, HSTATE_KVM_VCORE(r13)
1421 lwz r0, VCORE_ENTRY_EXIT(r5)
1424 blt deliver_guest_interrupt
1426 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1427 #ifdef CONFIG_KVM_XICS
1428 /* We are exiting, pull the VP from the XIVE */
1429 lwz r0, VCPU_XIVE_PUSHED(r9)
1432 li r7, TM_SPC_PULL_OS_CTX
1435 andi. r0, r0, MSR_IR /* in real mode? */
1437 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1440 /* First load to pull the context, we ignore the value */
1443 /* Second load to recover the context state (Words 0 and 1) */
1446 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1449 /* First load to pull the context, we ignore the value */
1452 /* Second load to recover the context state (Words 0 and 1) */
1454 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1455 /* Fixup some of the state for the next load */
1458 stw r10, VCPU_XIVE_PUSHED(r9)
1459 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1460 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1463 #endif /* CONFIG_KVM_XICS */
1464 /* Save more register state */
1467 std r6, VCPU_DAR(r9)
1468 stw r7, VCPU_DSISR(r9)
1469 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1470 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1472 std r6, VCPU_FAULT_DAR(r9)
1473 stw r7, VCPU_FAULT_DSISR(r9)
1475 /* See if it is a machine check */
1476 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1477 beq machine_check_realmode
1479 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1480 addi r3, r9, VCPU_TB_RMEXIT
1482 bl kvmhv_accumulate_time
1486 /* Increment exit count, poke other threads to exit */
1487 bl kvmhv_commence_exit
1489 ld r9, HSTATE_KVM_VCPU(r13)
1490 lwz r12, VCPU_TRAP(r9)
1492 /* Stop others sending VCPU interrupts to this physical CPU */
1494 stw r0, VCPU_CPU(r9)
1495 stw r0, VCPU_THREAD_CPU(r9)
1497 /* Save guest CTRL register, set runlatch to 1 */
1499 stw r6,VCPU_CTRL(r9)
1505 /* Check if we are running hash or radix and store it in cr2 */
1507 lbz r0, KVM_RADIX(r5)
1510 /* Read the guest SLB and save it away */
1512 bne cr2, 3f /* for radix, save 0 entries */
1513 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1518 andis. r0,r8,SLB_ESID_V@h
1520 add r8,r8,r6 /* put index in */
1522 std r8,VCPU_SLB_E(r7)
1523 std r3,VCPU_SLB_V(r7)
1524 addi r7,r7,VCPU_SLB_SIZE
1528 3: stw r5,VCPU_SLB_MAX(r9)
1531 * Save the guest PURR/SPURR
1536 ld r8,VCPU_SPURR(r9)
1537 std r5,VCPU_PURR(r9)
1538 std r6,VCPU_SPURR(r9)
1543 * Restore host PURR/SPURR and add guest times
1544 * so that the time in the guest gets accounted.
1546 ld r3,HSTATE_PURR(r13)
1547 ld r4,HSTATE_SPURR(r13)
1554 ld r3, HSTATE_KVM_VCORE(r13)
1557 /* On P9, if the guest has large decr enabled, don't sign extend */
1559 ld r4, VCORE_LPCR(r3)
1560 andis. r4, r4, LPCR_LD@h
1562 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1565 /* r5 is a guest timebase value here, convert to host TB */
1566 ld r4,VCORE_TB_OFFSET(r3)
1568 std r5,VCPU_DEC_EXPIRES(r9)
1572 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1573 /* Save POWER8-specific registers */
1577 std r5, VCPU_IAMR(r9)
1578 stw r6, VCPU_PSPB(r9)
1579 std r7, VCPU_FSCR(r9)
1583 std r7, VCPU_TAR(r9)
1584 mfspr r8, SPRN_EBBHR
1585 std r8, VCPU_EBBHR(r9)
1586 mfspr r5, SPRN_EBBRR
1587 mfspr r6, SPRN_BESCR
1590 std r5, VCPU_EBBRR(r9)
1591 std r6, VCPU_BESCR(r9)
1592 stw r7, VCPU_GUEST_PID(r9)
1593 std r8, VCPU_WORT(r9)
1595 mfspr r5, SPRN_TCSCR
1597 mfspr r7, SPRN_CSIGR
1599 std r5, VCPU_TCSCR(r9)
1600 std r6, VCPU_ACOP(r9)
1601 std r7, VCPU_CSIGR(r9)
1602 std r8, VCPU_TACR(r9)
1605 mfspr r6, SPRN_PSSCR
1606 std r5, VCPU_TID(r9)
1607 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1609 std r6, VCPU_PSSCR(r9)
1610 /* Restore host HFSCR value */
1611 ld r7, STACK_SLOT_HFSCR(r1)
1612 mtspr SPRN_HFSCR, r7
1613 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1615 * Restore various registers to 0, where non-zero values
1616 * set by the guest could disrupt the host.
1623 mtspr SPRN_TCSCR, r0
1624 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1627 mtspr SPRN_MMCRS, r0
1628 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1631 /* Save and reset AMR and UAMOR before turning on the MMU */
1635 std r6,VCPU_UAMOR(r9)
1638 mtspr SPRN_UAMOR, r6
1640 /* Switch DSCR back to host value */
1642 ld r7, HSTATE_DSCR(r13)
1643 std r8, VCPU_DSCR(r9)
1646 /* Save non-volatile GPRs */
1647 std r14, VCPU_GPR(R14)(r9)
1648 std r15, VCPU_GPR(R15)(r9)
1649 std r16, VCPU_GPR(R16)(r9)
1650 std r17, VCPU_GPR(R17)(r9)
1651 std r18, VCPU_GPR(R18)(r9)
1652 std r19, VCPU_GPR(R19)(r9)
1653 std r20, VCPU_GPR(R20)(r9)
1654 std r21, VCPU_GPR(R21)(r9)
1655 std r22, VCPU_GPR(R22)(r9)
1656 std r23, VCPU_GPR(R23)(r9)
1657 std r24, VCPU_GPR(R24)(r9)
1658 std r25, VCPU_GPR(R25)(r9)
1659 std r26, VCPU_GPR(R26)(r9)
1660 std r27, VCPU_GPR(R27)(r9)
1661 std r28, VCPU_GPR(R28)(r9)
1662 std r29, VCPU_GPR(R29)(r9)
1663 std r30, VCPU_GPR(R30)(r9)
1664 std r31, VCPU_GPR(R31)(r9)
1667 mfspr r3, SPRN_SPRG0
1668 mfspr r4, SPRN_SPRG1
1669 mfspr r5, SPRN_SPRG2
1670 mfspr r6, SPRN_SPRG3
1671 std r3, VCPU_SPRG0(r9)
1672 std r4, VCPU_SPRG1(r9)
1673 std r5, VCPU_SPRG2(r9)
1674 std r6, VCPU_SPRG3(r9)
1680 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1683 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1686 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1689 /* Increment yield count if they have a VPA */
1690 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1693 li r4, LPPACA_YIELDCOUNT
1698 stb r3, VCPU_VPA_DIRTY(r9)
1700 /* Save PMU registers if requested */
1701 /* r8 and cr0.eq are live here */
1704 * POWER8 seems to have a hardware bug where setting
1705 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1706 * when some counters are already negative doesn't seem
1707 * to cause a performance monitor alert (and hence interrupt).
1708 * The effect of this is that when saving the PMU state,
1709 * if there is no PMU alert pending when we read MMCR0
1710 * before freezing the counters, but one becomes pending
1711 * before we read the counters, we lose it.
1712 * To work around this, we need a way to freeze the counters
1713 * before reading MMCR0. Normally, freezing the counters
1714 * is done by writing MMCR0 (to set MMCR0[FC]) which
1715 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1716 * we can also freeze the counters using MMCR2, by writing
1717 * 1s to all the counter freeze condition bits (there are
1718 * 9 bits each for 6 counters).
1720 li r3, -1 /* set all freeze bits */
1722 mfspr r10, SPRN_MMCR2
1723 mtspr SPRN_MMCR2, r3
1725 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1727 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1728 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1729 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1730 mfspr r6, SPRN_MMCRA
1731 /* Clear MMCRA in order to disable SDAR updates */
1733 mtspr SPRN_MMCRA, r7
1735 beq 21f /* if no VPA, save PMU stuff anyway */
1736 lbz r7, LPPACA_PMCINUSE(r8)
1737 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1739 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1741 21: mfspr r5, SPRN_MMCR1
1744 std r4, VCPU_MMCR(r9)
1745 std r5, VCPU_MMCR + 8(r9)
1746 std r6, VCPU_MMCR + 16(r9)
1748 std r10, VCPU_MMCR + 24(r9)
1749 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1750 std r7, VCPU_SIAR(r9)
1751 std r8, VCPU_SDAR(r9)
1758 stw r3, VCPU_PMC(r9)
1759 stw r4, VCPU_PMC + 4(r9)
1760 stw r5, VCPU_PMC + 8(r9)
1761 stw r6, VCPU_PMC + 12(r9)
1762 stw r7, VCPU_PMC + 16(r9)
1763 stw r8, VCPU_PMC + 20(r9)
1766 std r5, VCPU_SIER(r9)
1767 BEGIN_FTR_SECTION_NESTED(96)
1768 mfspr r6, SPRN_SPMC1
1769 mfspr r7, SPRN_SPMC2
1770 mfspr r8, SPRN_MMCRS
1771 stw r6, VCPU_PMC + 24(r9)
1772 stw r7, VCPU_PMC + 28(r9)
1773 std r8, VCPU_MMCR + 32(r9)
1775 mtspr SPRN_MMCRS, r4
1776 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1777 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1780 /* Restore host values of some registers */
1782 ld r5, STACK_SLOT_CIABR(r1)
1783 ld r6, STACK_SLOT_DAWR(r1)
1784 ld r7, STACK_SLOT_DAWRX(r1)
1785 mtspr SPRN_CIABR, r5
1787 mtspr SPRN_DAWRX, r7
1788 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1790 ld r5, STACK_SLOT_TID(r1)
1791 ld r6, STACK_SLOT_PSSCR(r1)
1792 ld r7, STACK_SLOT_PID(r1)
1793 ld r8, STACK_SLOT_IAMR(r1)
1795 mtspr SPRN_PSSCR, r6
1798 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1800 #ifdef CONFIG_PPC_RADIX_MMU
1802 * Are we running hash or radix ?
1805 lbz r0, KVM_RADIX(r5)
1809 /* Radix: Handle the case where the guest used an illegal PID */
1810 LOAD_REG_ADDR(r4, mmu_base_pid)
1811 lwz r3, VCPU_GUEST_PID(r9)
1817 * Illegal PID, the HW might have prefetched and cached in the TLB
1818 * some translations for the LPID 0 / guest PID combination which
1819 * Linux doesn't know about, so we need to flush that PID out of
1820 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1821 * the right context.
1827 /* Then do a congruence class local flush */
1829 lwz r0,KVM_TLB_SETS(r6)
1831 li r7,0x400 /* IS field = 0b01 */
1833 sldi r0,r3,32 /* RS has PID */
1834 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1839 2: /* Flush the ERAT on radix P9 DD1 guest exit */
1842 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1844 #endif /* CONFIG_PPC_RADIX_MMU */
1846 /* Hash: clear out SLB */
1853 * POWER7/POWER8 guest -> host partition switch code.
1854 * We don't have to lock against tlbies but we do
1855 * have to coordinate the hardware threads.
1857 kvmhv_switch_to_host:
1858 /* Secondary threads wait for primary to do partition switch */
1859 ld r5,HSTATE_KVM_VCORE(r13)
1860 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1861 lbz r3,HSTATE_PTID(r13)
1865 13: lbz r3,VCORE_IN_GUEST(r5)
1871 /* Primary thread waits for all the secondaries to exit guest */
1872 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1873 rlwinm r0,r3,32-8,0xff
1879 /* Did we actually switch to the guest at all? */
1880 lbz r6, VCORE_IN_GUEST(r5)
1884 /* Primary thread switches back to host partition */
1885 lwz r7,KVM_HOST_LPID(r4)
1887 ld r6,KVM_HOST_SDR1(r4)
1888 li r8,LPID_RSVD /* switch to reserved LPID */
1891 mtspr SPRN_SDR1,r6 /* switch to host page table */
1892 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1897 /* DPDES and VTB are shared between threads */
1898 mfspr r7, SPRN_DPDES
1900 std r7, VCORE_DPDES(r5)
1901 std r8, VCORE_VTB(r5)
1902 /* clear DPDES so we don't get guest doorbells in the host */
1904 mtspr SPRN_DPDES, r8
1905 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1907 /* If HMI, call kvmppc_realmode_hmi_handler() */
1908 cmpwi r12, BOOK3S_INTERRUPT_HMI
1910 bl kvmppc_realmode_hmi_handler
1912 li r12, BOOK3S_INTERRUPT_HMI
1914 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1915 * the TB. Hence it is not required to subtract guest timebase
1916 * offset from timebase. So, skip it.
1918 * Also, do not call kvmppc_subcore_exit_guest() because it has
1919 * been invoked as part of kvmppc_realmode_hmi_handler().
1924 /* Subtract timebase offset from timebase */
1925 ld r8,VCORE_TB_OFFSET(r5)
1928 mftb r6 /* current guest timebase */
1930 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1931 mftb r7 /* check if lower 24 bits overflowed */
1936 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1939 17: bl kvmppc_subcore_exit_guest
1941 30: ld r5,HSTATE_KVM_VCORE(r13)
1942 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1945 ld r0, VCORE_PCR(r5)
1951 /* Signal secondary CPUs to continue */
1952 stb r0,VCORE_IN_GUEST(r5)
1953 19: lis r8,0x7fff /* MAX_INT@h */
1958 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
1959 ld r3, HSTATE_SPLIT_MODE(r13)
1962 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
1965 stw r12, STACK_SLOT_TRAP(r1)
1966 bl kvmhv_p9_restore_lpcr
1968 lwz r12, STACK_SLOT_TRAP(r1)
1971 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1972 ld r8,KVM_HOST_LPCR(r4)
1976 /* load host SLB entries */
1977 BEGIN_MMU_FTR_SECTION
1979 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1980 ld r8,PACA_SLBSHADOWPTR(r13)
1982 .rept SLB_NUM_BOLTED
1983 li r3, SLBSHADOW_SAVEAREA
1987 andis. r7,r5,SLB_ESID_V@h
1993 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1994 /* Finish timing, if we have a vcpu */
1995 ld r4, HSTATE_KVM_VCPU(r13)
1999 bl kvmhv_accumulate_time
2002 /* Unset guest mode */
2003 li r0, KVM_GUEST_MODE_NONE
2004 stb r0, HSTATE_IN_GUEST(r13)
2006 ld r0, SFS+PPC_LR_STKOFF(r1)
2012 * Check whether an HDSI is an HPTE not found fault or something else.
2013 * If it is an HPTE not found fault that is due to the guest accessing
2014 * a page that they have mapped but which we have paged out, then
2015 * we continue on with the guest exit path. In all other cases,
2016 * reflect the HDSI to the guest as a DSI.
2020 lbz r0, KVM_RADIX(r3)
2022 mfspr r6, SPRN_HDSISR
2024 /* Look for DSISR canary. If we find it, retry instruction */
2027 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2029 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2030 /* HPTE not found fault or protection fault? */
2031 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2032 beq 1f /* if not, send it to the guest */
2033 andi. r0, r11, MSR_DR /* data relocation enabled? */
2036 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2038 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2040 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2041 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2042 bne 7f /* if no SLB entry found */
2043 4: std r4, VCPU_FAULT_DAR(r9)
2044 stw r6, VCPU_FAULT_DSISR(r9)
2046 /* Search the hash table. */
2047 mr r3, r9 /* vcpu pointer */
2048 li r7, 1 /* data fault */
2049 bl kvmppc_hpte_hv_fault
2050 ld r9, HSTATE_KVM_VCPU(r13)
2052 ld r11, VCPU_MSR(r9)
2053 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2054 cmpdi r3, 0 /* retry the instruction */
2056 cmpdi r3, -1 /* handle in kernel mode */
2058 cmpdi r3, -2 /* MMIO emulation; need instr word */
2061 /* Synthesize a DSI (or DSegI) for the guest */
2062 ld r4, VCPU_FAULT_DAR(r9)
2064 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2065 mtspr SPRN_DSISR, r6
2066 7: mtspr SPRN_DAR, r4
2067 mtspr SPRN_SRR0, r10
2068 mtspr SPRN_SRR1, r11
2070 bl kvmppc_msr_interrupt
2071 fast_interrupt_c_return:
2072 6: ld r7, VCPU_CTR(r9)
2079 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2080 ld r5, KVM_VRMA_SLB_V(r5)
2083 /* If this is for emulated MMIO, load the instruction word */
2084 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2086 /* Set guest mode to 'jump over instruction' so if lwz faults
2087 * we'll just continue at the next IP. */
2088 li r0, KVM_GUEST_MODE_SKIP
2089 stb r0, HSTATE_IN_GUEST(r13)
2091 /* Do the access with MSR:DR enabled */
2093 ori r4, r3, MSR_DR /* Enable paging for data */
2098 /* Store the result */
2099 stw r8, VCPU_LAST_INST(r9)
2101 /* Unset guest mode. */
2102 li r0, KVM_GUEST_MODE_HOST_HV
2103 stb r0, HSTATE_IN_GUEST(r13)
2107 std r4, VCPU_FAULT_DAR(r9)
2108 stw r6, VCPU_FAULT_DSISR(r9)
2111 std r5, VCPU_FAULT_GPA(r9)
2115 * Similarly for an HISI, reflect it to the guest as an ISI unless
2116 * it is an HPTE not found fault for a page that we have paged out.
2120 lbz r0, KVM_RADIX(r3)
2122 bne .Lradix_hisi /* for radix, just save ASDR */
2123 andis. r0, r11, SRR1_ISI_NOPT@h
2125 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2128 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2130 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2132 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2133 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2134 bne 7f /* if no SLB entry found */
2136 /* Search the hash table. */
2137 mr r3, r9 /* vcpu pointer */
2140 li r7, 0 /* instruction fault */
2141 bl kvmppc_hpte_hv_fault
2142 ld r9, HSTATE_KVM_VCPU(r13)
2144 ld r11, VCPU_MSR(r9)
2145 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2146 cmpdi r3, 0 /* retry the instruction */
2147 beq fast_interrupt_c_return
2148 cmpdi r3, -1 /* handle in kernel mode */
2151 /* Synthesize an ISI (or ISegI) for the guest */
2153 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2154 7: mtspr SPRN_SRR0, r10
2155 mtspr SPRN_SRR1, r11
2157 bl kvmppc_msr_interrupt
2158 b fast_interrupt_c_return
2160 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2161 ld r5, KVM_VRMA_SLB_V(r6)
2165 * Try to handle an hcall in real mode.
2166 * Returns to the guest if we handle it, or continues on up to
2167 * the kernel if we can't (i.e. if we don't have a handler for
2168 * it, or if the handler returns H_TOO_HARD).
2170 * r5 - r8 contain hcall args,
2171 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2173 hcall_try_real_mode:
2174 ld r3,VCPU_GPR(R3)(r9)
2176 /* sc 1 from userspace - reflect to guest syscall */
2177 bne sc_1_fast_return
2179 cmpldi r3,hcall_real_table_end - hcall_real_table
2181 /* See if this hcall is enabled for in-kernel handling */
2183 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2184 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2186 ld r0, KVM_ENABLED_HCALLS(r4)
2187 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2191 /* Get pointer to handler, if any, and call it */
2192 LOAD_REG_ADDR(r4, hcall_real_table)
2198 mr r3,r9 /* get vcpu pointer */
2199 ld r4,VCPU_GPR(R4)(r9)
2202 beq hcall_real_fallback
2203 ld r4,HSTATE_KVM_VCPU(r13)
2204 std r3,VCPU_GPR(R3)(r4)
2212 li r10, BOOK3S_INTERRUPT_SYSCALL
2213 bl kvmppc_msr_interrupt
2217 /* We've attempted a real mode hcall, but it's punted it back
2218 * to userspace. We need to restore some clobbered volatiles
2219 * before resuming the pass-it-to-qemu path */
2220 hcall_real_fallback:
2221 li r12,BOOK3S_INTERRUPT_SYSCALL
2222 ld r9, HSTATE_KVM_VCPU(r13)
2226 .globl hcall_real_table
2228 .long 0 /* 0 - unused */
2229 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2230 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2231 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2232 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2233 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2234 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2235 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2236 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2237 .long 0 /* 0x24 - H_SET_SPRG0 */
2238 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2253 #ifdef CONFIG_KVM_XICS
2254 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2255 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2256 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2257 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2258 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2260 .long 0 /* 0x64 - H_EOI */
2261 .long 0 /* 0x68 - H_CPPR */
2262 .long 0 /* 0x6c - H_IPI */
2263 .long 0 /* 0x70 - H_IPOLL */
2264 .long 0 /* 0x74 - H_XIRR */
2292 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2293 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2309 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2313 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2314 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2315 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2427 #ifdef CONFIG_KVM_XICS
2428 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2430 .long 0 /* 0x2fc - H_XIRR_X*/
2432 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2433 .globl hcall_real_table_end
2434 hcall_real_table_end:
2436 _GLOBAL(kvmppc_h_set_xdabr)
2437 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2439 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2442 6: li r3, H_PARAMETER
2445 _GLOBAL(kvmppc_h_set_dabr)
2446 li r5, DABRX_USER | DABRX_KERNEL
2450 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2451 std r4,VCPU_DABR(r3)
2452 stw r5, VCPU_DABRX(r3)
2453 mtspr SPRN_DABRX, r5
2454 /* Work around P7 bug where DABR can get corrupted on mtspr */
2455 1: mtspr SPRN_DABR,r4
2463 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2464 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2465 rlwimi r5, r4, 2, DAWRX_WT
2467 std r4, VCPU_DAWR(r3)
2468 std r5, VCPU_DAWRX(r3)
2470 mtspr SPRN_DAWRX, r5
2474 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2476 std r11,VCPU_MSR(r3)
2478 stb r0,VCPU_CEDED(r3)
2479 sync /* order setting ceded vs. testing prodded */
2480 lbz r5,VCPU_PRODDED(r3)
2482 bne kvm_cede_prodded
2483 li r12,0 /* set trap to 0 to say hcall is handled */
2484 stw r12,VCPU_TRAP(r3)
2486 std r0,VCPU_GPR(R3)(r3)
2489 * Set our bit in the bitmask of napping threads unless all the
2490 * other threads are already napping, in which case we send this
2493 ld r5,HSTATE_KVM_VCORE(r13)
2494 lbz r6,HSTATE_PTID(r13)
2495 lwz r8,VCORE_ENTRY_EXIT(r5)
2499 addi r6,r5,VCORE_NAPPING_THREADS
2506 /* order napping_threads update vs testing entry_exit_map */
2509 stb r0,HSTATE_NAPPING(r13)
2510 lwz r7,VCORE_ENTRY_EXIT(r5)
2512 bge 33f /* another thread already exiting */
2515 * Although not specifically required by the architecture, POWER7
2516 * preserves the following registers in nap mode, even if an SMT mode
2517 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2518 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2520 /* Save non-volatile GPRs */
2521 std r14, VCPU_GPR(R14)(r3)
2522 std r15, VCPU_GPR(R15)(r3)
2523 std r16, VCPU_GPR(R16)(r3)
2524 std r17, VCPU_GPR(R17)(r3)
2525 std r18, VCPU_GPR(R18)(r3)
2526 std r19, VCPU_GPR(R19)(r3)
2527 std r20, VCPU_GPR(R20)(r3)
2528 std r21, VCPU_GPR(R21)(r3)
2529 std r22, VCPU_GPR(R22)(r3)
2530 std r23, VCPU_GPR(R23)(r3)
2531 std r24, VCPU_GPR(R24)(r3)
2532 std r25, VCPU_GPR(R25)(r3)
2533 std r26, VCPU_GPR(R26)(r3)
2534 std r27, VCPU_GPR(R27)(r3)
2535 std r28, VCPU_GPR(R28)(r3)
2536 std r29, VCPU_GPR(R29)(r3)
2537 std r30, VCPU_GPR(R30)(r3)
2538 std r31, VCPU_GPR(R31)(r3)
2543 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2546 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2548 ld r9, HSTATE_KVM_VCPU(r13)
2550 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2554 * Set DEC to the smaller of DEC and HDEC, so that we wake
2555 * no later than the end of our timeslice (HDEC interrupts
2556 * don't wake us from nap).
2562 /* On P9 check whether the guest has large decrementer mode enabled */
2563 ld r6, HSTATE_KVM_VCORE(r13)
2564 ld r6, VCORE_LPCR(r6)
2565 andis. r6, r6, LPCR_LD@h
2567 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2574 /* save expiry time of guest decrementer */
2576 ld r4, HSTATE_KVM_VCPU(r13)
2577 ld r5, HSTATE_KVM_VCORE(r13)
2578 ld r6, VCORE_TB_OFFSET(r5)
2579 subf r3, r6, r3 /* convert to host TB value */
2580 std r3, VCPU_DEC_EXPIRES(r4)
2582 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2583 ld r4, HSTATE_KVM_VCPU(r13)
2584 addi r3, r4, VCPU_TB_CEDE
2585 bl kvmhv_accumulate_time
2588 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2591 * Take a nap until a decrementer or external or doobell interrupt
2592 * occurs, with PECE1 and PECE0 set in LPCR.
2593 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2594 * Also clear the runlatch bit before napping.
2597 mfspr r0, SPRN_CTRLF
2599 mtspr SPRN_CTRLT, r0
2602 stb r0,HSTATE_HWTHREAD_REQ(r13)
2604 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2606 ori r5, r5, LPCR_PECEDH
2607 rlwimi r5, r3, 0, LPCR_PECEDP
2608 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2610 kvm_nap_sequence: /* desired LPCR value in r5 */
2613 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2614 * enable state loss = 1 (allow SMT mode switch)
2615 * requested level = 0 (just stop dispatching)
2617 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2618 mtspr SPRN_PSSCR, r3
2619 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2620 li r4, LPCR_PECE_HVEE@higher
2623 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2627 std r0, HSTATE_SCRATCH0(r13)
2629 ld r0, HSTATE_SCRATCH0(r13)
2636 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2645 /* get vcpu pointer */
2646 ld r4, HSTATE_KVM_VCPU(r13)
2648 /* Woken by external or decrementer interrupt */
2649 ld r1, HSTATE_HOST_R1(r13)
2651 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2652 addi r3, r4, VCPU_TB_RMINTR
2653 bl kvmhv_accumulate_time
2656 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2659 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2661 bl kvmppc_restore_tm
2662 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2665 /* load up FP state */
2668 /* Restore guest decrementer */
2669 ld r3, VCPU_DEC_EXPIRES(r4)
2670 ld r5, HSTATE_KVM_VCORE(r13)
2671 ld r6, VCORE_TB_OFFSET(r5)
2672 add r3, r3, r6 /* convert host TB to guest TB value */
2678 ld r14, VCPU_GPR(R14)(r4)
2679 ld r15, VCPU_GPR(R15)(r4)
2680 ld r16, VCPU_GPR(R16)(r4)
2681 ld r17, VCPU_GPR(R17)(r4)
2682 ld r18, VCPU_GPR(R18)(r4)
2683 ld r19, VCPU_GPR(R19)(r4)
2684 ld r20, VCPU_GPR(R20)(r4)
2685 ld r21, VCPU_GPR(R21)(r4)
2686 ld r22, VCPU_GPR(R22)(r4)
2687 ld r23, VCPU_GPR(R23)(r4)
2688 ld r24, VCPU_GPR(R24)(r4)
2689 ld r25, VCPU_GPR(R25)(r4)
2690 ld r26, VCPU_GPR(R26)(r4)
2691 ld r27, VCPU_GPR(R27)(r4)
2692 ld r28, VCPU_GPR(R28)(r4)
2693 ld r29, VCPU_GPR(R29)(r4)
2694 ld r30, VCPU_GPR(R30)(r4)
2695 ld r31, VCPU_GPR(R31)(r4)
2697 /* Check the wake reason in SRR1 to see why we got here */
2698 bl kvmppc_check_wake_reason
2701 * Restore volatile registers since we could have called a
2702 * C routine in kvmppc_check_wake_reason
2704 * r3 tells us whether we need to return to host or not
2705 * WARNING: it gets checked further down:
2706 * should not modify r3 until this check is done.
2708 ld r4, HSTATE_KVM_VCPU(r13)
2710 /* clear our bit in vcore->napping_threads */
2711 34: ld r5,HSTATE_KVM_VCORE(r13)
2712 lbz r7,HSTATE_PTID(r13)
2715 addi r6,r5,VCORE_NAPPING_THREADS
2721 stb r0,HSTATE_NAPPING(r13)
2723 /* See if the wake reason saved in r3 means we need to exit */
2724 stw r12, VCPU_TRAP(r4)
2729 /* see if any other thread is already exiting */
2730 lwz r0,VCORE_ENTRY_EXIT(r5)
2734 b kvmppc_cede_reentry /* if not go back to guest */
2736 /* cede when already previously prodded case */
2739 stb r0,VCPU_PRODDED(r3)
2740 sync /* order testing prodded vs. clearing ceded */
2741 stb r0,VCPU_CEDED(r3)
2745 /* we've ceded but we want to give control to the host */
2747 ld r9, HSTATE_KVM_VCPU(r13)
2750 /* Try to handle a machine check in real mode */
2751 machine_check_realmode:
2752 mr r3, r9 /* get vcpu pointer */
2753 bl kvmppc_realmode_machine_check
2755 ld r9, HSTATE_KVM_VCPU(r13)
2756 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2758 * For the guest that is FWNMI capable, deliver all the MCE errors
2759 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2760 * reason. This new approach injects machine check errors in guest
2761 * address space to guest with additional information in the form
2762 * of RTAS event, thus enabling guest kernel to suitably handle
2765 * For the guest that is not FWNMI capable (old QEMU) fallback
2766 * to old behaviour for backward compatibility:
2767 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2768 * through machine check interrupt (set HSRR0 to 0x200).
2769 * For handled errors (no-fatal), just go back to guest execution
2770 * with current HSRR0.
2771 * if we receive machine check with MSR(RI=0) then deliver it to
2772 * guest as machine check causing guest to crash.
2774 ld r11, VCPU_MSR(r9)
2775 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2776 bne mc_cont /* if so, exit to host */
2777 /* Check if guest is capable of handling NMI exit */
2778 ld r10, VCPU_KVM(r9)
2779 lbz r10, KVM_FWNMI(r10)
2780 cmpdi r10, 1 /* FWNMI capable? */
2781 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
2783 /* if not, fall through for backward compatibility. */
2784 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2785 beq 1f /* Deliver a machine check to guest */
2787 cmpdi r3, 0 /* Did we handle MCE ? */
2788 bne 2f /* Continue guest execution. */
2789 /* If not, deliver a machine check. SRR0/1 are already set */
2790 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2791 bl kvmppc_msr_interrupt
2792 2: b fast_interrupt_c_return
2795 * Check the reason we woke from nap, and take appropriate action.
2797 * 0 if nothing needs to be done
2798 * 1 if something happened that needs to be handled by the host
2799 * -1 if there was a guest wakeup (IPI or msgsnd)
2800 * -2 if we handled a PCI passthrough interrupt (returned by
2801 * kvmppc_read_intr only)
2803 * Also sets r12 to the interrupt vector for any interrupt that needs
2804 * to be handled now by the host (0x500 for external interrupt), or zero.
2805 * Modifies all volatile registers (since it may call a C function).
2806 * This routine calls kvmppc_read_intr, a C function, if an external
2807 * interrupt is pending.
2809 kvmppc_check_wake_reason:
2812 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2814 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2815 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2816 cmpwi r6, 8 /* was it an external interrupt? */
2817 beq 7f /* if so, see what it was */
2820 cmpwi r6, 6 /* was it the decrementer? */
2823 cmpwi r6, 5 /* privileged doorbell? */
2825 cmpwi r6, 3 /* hypervisor doorbell? */
2827 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2828 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2830 li r3, 1 /* anything else, return 1 */
2833 /* hypervisor doorbell */
2834 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2837 * Clear the doorbell as we will invoke the handler
2838 * explicitly in the guest exit path.
2840 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2842 /* see if it's a host IPI */
2847 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2848 lbz r0, HSTATE_HOST_IPI(r13)
2851 /* if not, return -1 */
2855 /* Woken up due to Hypervisor maintenance interrupt */
2856 4: li r12, BOOK3S_INTERRUPT_HMI
2860 /* external interrupt - create a stack frame so we can call C */
2862 std r0, PPC_LR_STKOFF(r1)
2863 stdu r1, -PPC_MIN_STKFRM(r1)
2866 li r12, BOOK3S_INTERRUPT_EXTERNAL
2871 * Return code of 2 means PCI passthrough interrupt, but
2872 * we need to return back to host to complete handling the
2873 * interrupt. Trap reason is expected in r12 by guest
2876 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2878 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2879 addi r1, r1, PPC_MIN_STKFRM
2884 * Save away FP, VMX and VSX registers.
2886 * N.B. r30 and r31 are volatile across this function,
2887 * thus it is not callable from C.
2894 #ifdef CONFIG_ALTIVEC
2896 oris r8,r8,MSR_VEC@h
2897 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2901 oris r8,r8,MSR_VSX@h
2902 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2905 addi r3,r3,VCPU_FPRS
2907 #ifdef CONFIG_ALTIVEC
2909 addi r3,r31,VCPU_VRS
2911 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2913 mfspr r6,SPRN_VRSAVE
2914 stw r6,VCPU_VRSAVE(r31)
2919 * Load up FP, VMX and VSX registers
2921 * N.B. r30 and r31 are volatile across this function,
2922 * thus it is not callable from C.
2929 #ifdef CONFIG_ALTIVEC
2931 oris r8,r8,MSR_VEC@h
2932 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2936 oris r8,r8,MSR_VSX@h
2937 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2940 addi r3,r4,VCPU_FPRS
2942 #ifdef CONFIG_ALTIVEC
2944 addi r3,r31,VCPU_VRS
2946 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2948 lwz r7,VCPU_VRSAVE(r31)
2949 mtspr SPRN_VRSAVE,r7
2954 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2956 * Save transactional state and TM-related registers.
2957 * Called with r9 pointing to the vcpu struct.
2958 * This can modify all checkpointed registers, but
2959 * restores r1, r2 and r9 (vcpu pointer) before exit.
2963 std r0, PPC_LR_STKOFF(r1)
2968 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2972 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2973 beq 1f /* TM not active in guest. */
2975 std r1, HSTATE_HOST_R1(r13)
2976 li r3, TM_CAUSE_KVM_RESCHED
2978 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2982 /* All GPRs are volatile at this point. */
2985 /* Temporarily store r13 and r9 so we have some regs to play with */
2988 std r9, PACATMSCRATCH(r13)
2989 ld r9, HSTATE_KVM_VCPU(r13)
2991 /* Get a few more GPRs free. */
2992 std r29, VCPU_GPRS_TM(29)(r9)
2993 std r30, VCPU_GPRS_TM(30)(r9)
2994 std r31, VCPU_GPRS_TM(31)(r9)
2996 /* Save away PPR and DSCR soon so don't run with user values. */
2999 mfspr r30, SPRN_DSCR
3000 ld r29, HSTATE_DSCR(r13)
3001 mtspr SPRN_DSCR, r29
3003 /* Save all but r9, r13 & r29-r31 */
3006 .if (reg != 9) && (reg != 13)
3007 std reg, VCPU_GPRS_TM(reg)(r9)
3011 /* ... now save r13 */
3013 std r4, VCPU_GPRS_TM(13)(r9)
3014 /* ... and save r9 */
3015 ld r4, PACATMSCRATCH(r13)
3016 std r4, VCPU_GPRS_TM(9)(r9)
3018 /* Reload stack pointer and TOC. */
3019 ld r1, HSTATE_HOST_R1(r13)
3022 /* Set MSR RI now we have r1 and r13 back. */
3026 /* Save away checkpinted SPRs. */
3027 std r31, VCPU_PPR_TM(r9)
3028 std r30, VCPU_DSCR_TM(r9)
3035 std r5, VCPU_LR_TM(r9)
3036 stw r6, VCPU_CR_TM(r9)
3037 std r7, VCPU_CTR_TM(r9)
3038 std r8, VCPU_AMR_TM(r9)
3039 std r10, VCPU_TAR_TM(r9)
3040 std r11, VCPU_XER_TM(r9)
3042 /* Restore r12 as trap number. */
3043 lwz r12, VCPU_TRAP(r9)
3046 addi r3, r9, VCPU_FPRS_TM
3048 addi r3, r9, VCPU_VRS_TM
3050 mfspr r6, SPRN_VRSAVE
3051 stw r6, VCPU_VRSAVE_TM(r9)
3054 * We need to save these SPRs after the treclaim so that the software
3055 * error code is recorded correctly in the TEXASR. Also the user may
3056 * change these outside of a transaction, so they must always be
3059 mfspr r5, SPRN_TFHAR
3060 mfspr r6, SPRN_TFIAR
3061 mfspr r7, SPRN_TEXASR
3062 std r5, VCPU_TFHAR(r9)
3063 std r6, VCPU_TFIAR(r9)
3064 std r7, VCPU_TEXASR(r9)
3066 ld r0, PPC_LR_STKOFF(r1)
3071 * Restore transactional state and TM-related registers.
3072 * Called with r4 pointing to the vcpu struct.
3073 * This potentially modifies all checkpointed registers.
3074 * It restores r1, r2, r4 from the PACA.
3078 std r0, PPC_LR_STKOFF(r1)
3080 /* Turn on TM/FP/VSX/VMX so we can restore them. */
3086 oris r5, r5, (MSR_VEC | MSR_VSX)@h
3090 * The user may change these outside of a transaction, so they must
3091 * always be context switched.
3093 ld r5, VCPU_TFHAR(r4)
3094 ld r6, VCPU_TFIAR(r4)
3095 ld r7, VCPU_TEXASR(r4)
3096 mtspr SPRN_TFHAR, r5
3097 mtspr SPRN_TFIAR, r6
3098 mtspr SPRN_TEXASR, r7
3101 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
3102 beqlr /* TM not active in guest */
3103 std r1, HSTATE_HOST_R1(r13)
3105 /* Make sure the failure summary is set, otherwise we'll program check
3106 * when we trechkpt. It's possible that this might have been not set
3107 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
3110 oris r7, r7, (TEXASR_FS)@h
3111 mtspr SPRN_TEXASR, r7
3114 * We need to load up the checkpointed state for the guest.
3115 * We need to do this early as it will blow away any GPRs, VSRs and
3120 addi r3, r31, VCPU_FPRS_TM
3122 addi r3, r31, VCPU_VRS_TM
3125 lwz r7, VCPU_VRSAVE_TM(r4)
3126 mtspr SPRN_VRSAVE, r7
3128 ld r5, VCPU_LR_TM(r4)
3129 lwz r6, VCPU_CR_TM(r4)
3130 ld r7, VCPU_CTR_TM(r4)
3131 ld r8, VCPU_AMR_TM(r4)
3132 ld r9, VCPU_TAR_TM(r4)
3133 ld r10, VCPU_XER_TM(r4)
3142 * Load up PPR and DSCR values but don't put them in the actual SPRs
3143 * till the last moment to avoid running with userspace PPR and DSCR for
3146 ld r29, VCPU_DSCR_TM(r4)
3147 ld r30, VCPU_PPR_TM(r4)
3149 std r2, PACATMSCRATCH(r13) /* Save TOC */
3151 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
3155 /* Load GPRs r0-r28 */
3158 ld reg, VCPU_GPRS_TM(reg)(r31)
3162 mtspr SPRN_DSCR, r29
3165 /* Load final GPRs */
3166 ld 29, VCPU_GPRS_TM(29)(r31)
3167 ld 30, VCPU_GPRS_TM(30)(r31)
3168 ld 31, VCPU_GPRS_TM(31)(r31)
3170 /* TM checkpointed state is now setup. All GPRs are now volatile. */
3173 /* Now let's get back the state we need. */
3176 ld r29, HSTATE_DSCR(r13)
3177 mtspr SPRN_DSCR, r29
3178 ld r4, HSTATE_KVM_VCPU(r13)
3179 ld r1, HSTATE_HOST_R1(r13)
3180 ld r2, PACATMSCRATCH(r13)
3182 /* Set the MSR RI since we have our registers back. */
3186 ld r0, PPC_LR_STKOFF(r1)
3192 * We come here if we get any exception or interrupt while we are
3193 * executing host real mode code while in guest MMU context.
3194 * r12 is (CR << 32) | vector
3195 * r13 points to our PACA
3196 * r12 is saved in HSTATE_SCRATCH0(r13)
3197 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3198 * r9 is saved in HSTATE_SCRATCH2(r13)
3199 * r13 is saved in HSPRG1
3200 * cfar is saved in HSTATE_CFAR(r13)
3201 * ppr is saved in HSTATE_PPR(r13)
3203 kvmppc_bad_host_intr:
3205 * Switch to the emergency stack, but start half-way down in
3206 * case we were already on it.
3210 ld r1, PACAEMERGSP(r13)
3211 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3224 mfspr r3, SPRN_HSRR0
3225 mfspr r4, SPRN_HSRR1
3227 mfspr r6, SPRN_HDSISR
3229 1: mfspr r3, SPRN_SRR0
3232 mfspr r6, SPRN_DSISR
3237 ld r9, HSTATE_SCRATCH2(r13)
3238 ld r12, HSTATE_SCRATCH0(r13)
3243 ld r5, HSTATE_CFAR(r13)
3244 std r5, ORIG_GPR3(r1)
3246 #ifdef CONFIG_RELOCATABLE
3247 ld r4, HSTATE_SCRATCH1(r13)
3252 lbz r6, PACASOFTIRQEN(r13)
3258 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3259 std r3, STACK_FRAME_OVERHEAD-16(r1)
3262 * On POWER9 do a minimal restore of the MMU and call C code,
3263 * which will print a message and panic.
3264 * XXX On POWER7 and POWER8, we just spin here since we don't
3265 * know what the other threads are doing (and we don't want to
3266 * coordinate with them) - but at least we now have register state
3267 * in memory that we might be able to look at from another CPU.
3271 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3272 ld r9, HSTATE_KVM_VCPU(r13)
3273 ld r10, VCPU_KVM(r9)
3278 mtspr SPRN_CIABR, r0
3279 mtspr SPRN_DAWRX, r0
3281 /* Flush the ERAT on radix P9 DD1 guest exit */
3284 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
3286 BEGIN_MMU_FTR_SECTION
3288 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3293 ld r8, PACA_SLBSHADOWPTR(r13)
3294 .rept SLB_NUM_BOLTED
3295 li r3, SLBSHADOW_SAVEAREA
3299 andis. r7, r5, SLB_ESID_V@h
3305 4: lwz r7, KVM_HOST_LPID(r10)
3308 ld r8, KVM_HOST_LPCR(r10)
3311 li r0, KVM_GUEST_MODE_NONE
3312 stb r0, HSTATE_IN_GUEST(r13)
3315 * Turn on the MMU and jump to C code
3319 addi r3, r3, 9f - 5b
3320 ld r4, PACAKMSR(r13)
3324 9: addi r3, r1, STACK_FRAME_OVERHEAD
3325 bl kvmppc_bad_interrupt
3329 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3330 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3331 * r11 has the guest MSR value (in/out)
3332 * r9 has a vcpu pointer (in)
3333 * r0 is used as a scratch register
3335 kvmppc_msr_interrupt:
3336 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3337 cmpwi r0, 2 /* Check if we are in transactional state.. */
3338 ld r11, VCPU_INTR_MSR(r9)
3340 /* ... if transactional, change to suspended */
3342 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3346 * This works around a hardware bug on POWER8E processors, where
3347 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3348 * performance monitor interrupt. Instead, when we need to have
3349 * an interrupt pending, we have to arrange for a counter to overflow.
3353 mtspr SPRN_MMCR2, r3
3354 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3355 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3356 mtspr SPRN_MMCR0, r3
3363 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3365 * Start timing an activity
3366 * r3 = pointer to time accumulation struct, r4 = vcpu
3369 ld r5, HSTATE_KVM_VCORE(r13)
3370 lbz r6, VCORE_IN_GUEST(r5)
3372 beq 5f /* if in guest, need to */
3373 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
3376 std r3, VCPU_CUR_ACTIVITY(r4)
3377 std r5, VCPU_ACTIVITY_START(r4)
3381 * Accumulate time to one activity and start another.
3382 * r3 = pointer to new time accumulation struct, r4 = vcpu
3384 kvmhv_accumulate_time:
3385 ld r5, HSTATE_KVM_VCORE(r13)
3386 lbz r8, VCORE_IN_GUEST(r5)
3388 beq 4f /* if in guest, need to */
3389 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
3390 4: ld r5, VCPU_CUR_ACTIVITY(r4)
3391 ld r6, VCPU_ACTIVITY_START(r4)
3392 std r3, VCPU_CUR_ACTIVITY(r4)
3395 std r7, VCPU_ACTIVITY_START(r4)
3399 ld r8, TAS_SEQCOUNT(r5)
3402 std r8, TAS_SEQCOUNT(r5)
3404 ld r7, TAS_TOTAL(r5)
3406 std r7, TAS_TOTAL(r5)
3412 3: std r3, TAS_MIN(r5)
3418 std r8, TAS_SEQCOUNT(r5)