2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/book3s/64/mmu-hash.h>
33 #include <asm/xive-regs.h>
34 #include <asm/thread_info.h>
36 /* Sign-extend HDEC if not on POWER9 */
37 #define EXTEND_HDEC(reg) \
40 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
42 /* Values in HSTATE_NAPPING(r13) */
43 #define NAPPING_CEDE 1
44 #define NAPPING_NOVCPU 2
46 /* Stack frame offsets for kvmppc_hv_entry */
48 #define STACK_SLOT_TRAP (SFS-4)
49 #define STACK_SLOT_TID (SFS-16)
50 #define STACK_SLOT_PSSCR (SFS-24)
51 #define STACK_SLOT_PID (SFS-32)
52 #define STACK_SLOT_IAMR (SFS-40)
53 #define STACK_SLOT_CIABR (SFS-48)
54 #define STACK_SLOT_DAWR (SFS-56)
55 #define STACK_SLOT_DAWRX (SFS-64)
56 #define STACK_SLOT_HFSCR (SFS-72)
59 * Call kvmppc_hv_entry in real mode.
60 * Must be called with interrupts hard-disabled.
64 * LR = return address to continue at after eventually re-enabling MMU
66 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
68 std r0, PPC_LR_STKOFF(r1)
71 std r10, HSTATE_HOST_MSR(r13)
72 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
77 mtmsrd r0,1 /* clear RI in MSR */
84 /* On P9, do LPCR setting, if necessary */
85 ld r3, HSTATE_SPLIT_MODE(r13)
88 lwz r4, KVM_SPLIT_DO_SET(r3)
94 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
96 ld r4, HSTATE_KVM_VCPU(r13)
99 /* Back from guest - restore host state and return to caller */
102 /* Restore host DABR and DABRX */
103 ld r5,HSTATE_DABR(r13)
107 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
110 ld r3,PACA_SPRG_VDSO(r13)
111 mtspr SPRN_SPRG_VDSO_WRITE,r3
113 /* Reload the host's PMU registers */
114 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
116 beq 23f /* skip if not */
118 ld r3, HSTATE_MMCR0(r13)
119 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
122 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
123 lwz r3, HSTATE_PMC1(r13)
124 lwz r4, HSTATE_PMC2(r13)
125 lwz r5, HSTATE_PMC3(r13)
126 lwz r6, HSTATE_PMC4(r13)
127 lwz r8, HSTATE_PMC5(r13)
128 lwz r9, HSTATE_PMC6(r13)
135 ld r3, HSTATE_MMCR0(r13)
136 ld r4, HSTATE_MMCR1(r13)
137 ld r5, HSTATE_MMCRA(r13)
138 ld r6, HSTATE_SIAR(r13)
139 ld r7, HSTATE_SDAR(r13)
145 ld r8, HSTATE_MMCR2(r13)
146 ld r9, HSTATE_SIER(r13)
149 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
155 * Reload DEC. HDEC interrupts were disabled when
156 * we reloaded the host's LPCR value.
158 ld r3, HSTATE_DECEXP(r13)
163 /* hwthread_req may have got set by cede or no vcpu, so clear it */
165 stb r0, HSTATE_HWTHREAD_REQ(r13)
168 * For external interrupts we need to call the Linux
169 * handler to process the interrupt. We do that by jumping
170 * to absolute address 0x500 for external interrupts.
171 * The [h]rfid at the end of the handler will return to
172 * the book3s_hv_interrupts.S code. For other interrupts
173 * we do the rfid to get back to the book3s_hv_interrupts.S
176 ld r8, 112+PPC_LR_STKOFF(r1)
178 ld r7, HSTATE_HOST_MSR(r13)
180 /* Return the trap number on this thread as the return value */
184 * If we came back from the guest via a relocation-on interrupt,
185 * we will be in virtual mode at this point, which makes it a
186 * little easier to get back to the caller.
189 andi. r0, r0, MSR_IR /* in real mode? */
192 /* RFI into the highmem handler */
196 mtmsrd r6, 1 /* Clear RI in MSR */
201 /* Virtual-mode return */
206 kvmppc_primary_no_guest:
207 /* We handle this much like a ceded vcpu */
208 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
209 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
210 /* HDEC value came from DEC in the first place, it will fit */
214 * Make sure the primary has finished the MMU switch.
215 * We should never get here on a secondary thread, but
216 * check it for robustness' sake.
218 ld r5, HSTATE_KVM_VCORE(r13)
219 65: lbz r0, VCORE_IN_GUEST(r5)
226 /* set our bit in napping_threads */
227 ld r5, HSTATE_KVM_VCORE(r13)
228 lbz r7, HSTATE_PTID(r13)
231 addi r6, r5, VCORE_NAPPING_THREADS
236 /* order napping_threads update vs testing entry_exit_map */
239 lwz r7, VCORE_ENTRY_EXIT(r5)
241 bge kvm_novcpu_exit /* another thread already exiting */
242 li r3, NAPPING_NOVCPU
243 stb r3, HSTATE_NAPPING(r13)
245 li r3, 0 /* Don't wake on privileged (OS) doorbell */
250 * Entered from kvm_start_guest if kvm_hstate.napping is set
256 ld r1, HSTATE_HOST_R1(r13)
257 ld r5, HSTATE_KVM_VCORE(r13)
259 stb r0, HSTATE_NAPPING(r13)
261 /* check the wake reason */
262 bl kvmppc_check_wake_reason
265 * Restore volatile registers since we could have called
266 * a C routine in kvmppc_check_wake_reason.
269 ld r5, HSTATE_KVM_VCORE(r13)
271 /* see if any other thread is already exiting */
272 lwz r0, VCORE_ENTRY_EXIT(r5)
276 /* clear our bit in napping_threads */
277 lbz r7, HSTATE_PTID(r13)
280 addi r6, r5, VCORE_NAPPING_THREADS
286 /* See if the wake reason means we need to exit */
290 /* See if our timeslice has expired (HDEC is negative) */
293 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
297 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
298 ld r4, HSTATE_KVM_VCPU(r13)
300 beq kvmppc_primary_no_guest
302 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
303 addi r3, r4, VCPU_TB_RMENTRY
304 bl kvmhv_start_timing
309 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
310 ld r4, HSTATE_KVM_VCPU(r13)
313 addi r3, r4, VCPU_TB_RMEXIT
314 bl kvmhv_accumulate_time
317 stw r12, STACK_SLOT_TRAP(r1)
318 bl kvmhv_commence_exit
320 b kvmhv_switch_to_host
323 * We come in here when wakened from nap mode.
324 * Relocation is off and most register values are lost.
325 * r13 points to the PACA.
326 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
328 .globl kvm_start_guest
330 /* Set runlatch bit the minute you wake up from nap */
336 * Could avoid this and pass it through in r3. For now,
337 * code expects it to be in SRR1.
344 stb r0,PACA_FTRACE_ENABLED(r13)
346 li r0,KVM_HWTHREAD_IN_KVM
347 stb r0,HSTATE_HWTHREAD_STATE(r13)
349 /* NV GPR values from power7_idle() will no longer be valid */
351 stb r0,PACA_NAPSTATELOST(r13)
353 /* were we napping due to cede? */
354 lbz r0,HSTATE_NAPPING(r13)
355 cmpwi r0,NAPPING_CEDE
357 cmpwi r0,NAPPING_NOVCPU
358 beq kvm_novcpu_wakeup
360 ld r1,PACAEMERGSP(r13)
361 subi r1,r1,STACK_FRAME_OVERHEAD
364 * We weren't napping due to cede, so this must be a secondary
365 * thread being woken up to run a guest, or being woken up due
366 * to a stray IPI. (Or due to some machine check or hypervisor
367 * maintenance interrupt while the core is in KVM.)
370 /* Check the wake reason in SRR1 to see why we got here */
371 bl kvmppc_check_wake_reason
373 * kvmppc_check_wake_reason could invoke a C routine, but we
374 * have no volatile registers to restore when we return.
380 /* get vcore pointer, NULL if we have nothing to run */
381 ld r5,HSTATE_KVM_VCORE(r13)
383 /* if we have no vcore to run, go back to sleep */
386 kvm_secondary_got_guest:
388 /* Set HSTATE_DSCR(r13) to something sensible */
389 ld r6, PACA_DSCR_DEFAULT(r13)
390 std r6, HSTATE_DSCR(r13)
392 /* On thread 0 of a subcore, set HDEC to max */
393 lbz r4, HSTATE_PTID(r13)
396 LOAD_REG_ADDR(r6, decrementer_max)
399 /* and set per-LPAR registers, if doing dynamic micro-threading */
400 ld r6, HSTATE_SPLIT_MODE(r13)
404 ld r0, KVM_SPLIT_RPR(r6)
406 ld r0, KVM_SPLIT_PMMAR(r6)
408 ld r0, KVM_SPLIT_LDBAR(r6)
412 /* On P9 we use the split_info for coordinating LPCR changes */
413 lwz r4, KVM_SPLIT_DO_SET(r6)
420 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
422 /* Order load of vcpu after load of vcore */
424 ld r4, HSTATE_KVM_VCPU(r13)
427 /* Back from the guest, go back to nap */
428 /* Clear our vcpu and vcore pointers so we don't come back in early */
430 std r0, HSTATE_KVM_VCPU(r13)
432 * Once we clear HSTATE_KVM_VCORE(r13), the code in
433 * kvmppc_run_core() is going to assume that all our vcpu
434 * state is visible in memory. This lwsync makes sure
438 std r0, HSTATE_KVM_VCORE(r13)
441 * All secondaries exiting guest will fall through this path.
442 * Before proceeding, just check for HMI interrupt and
443 * invoke opal hmi handler. By now we are sure that the
444 * primary thread on this core/subcore has already made partition
445 * switch/TB resync and we are good to call opal hmi handler.
447 cmpwi r12, BOOK3S_INTERRUPT_HMI
450 li r3,0 /* NULL argument */
451 bl hmi_exception_realmode
453 * At this point we have finished executing in the guest.
454 * We need to wait for hwthread_req to become zero, since
455 * we may not turn on the MMU while hwthread_req is non-zero.
456 * While waiting we also need to check if we get given a vcpu to run.
459 lbz r3, HSTATE_HWTHREAD_REQ(r13)
463 li r0, KVM_HWTHREAD_IN_KERNEL
464 stb r0, HSTATE_HWTHREAD_STATE(r13)
465 /* need to recheck hwthread_req after a barrier, to avoid race */
467 lbz r3, HSTATE_HWTHREAD_REQ(r13)
471 * We jump to pnv_wakeup_loss, which will return to the caller
472 * of power7_nap in the powernv cpu offline loop. The value we
473 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
474 * requires SRR1 in r12.
478 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
485 ld r5, HSTATE_KVM_VCORE(r13)
488 ld r3, HSTATE_SPLIT_MODE(r13)
491 lwz r0, KVM_SPLIT_DO_SET(r3)
494 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
497 lbz r0, KVM_SPLIT_DO_NAP(r3)
503 b kvm_secondary_got_guest
505 54: li r0, KVM_HWTHREAD_IN_KVM
506 stb r0, HSTATE_HWTHREAD_STATE(r13)
510 /* Set LPCR, LPIDR etc. on P9 */
518 bl kvmhv_p9_restore_lpcr
523 * Here the primary thread is trying to return the core to
524 * whole-core mode, so we need to nap.
528 * When secondaries are napping in kvm_unsplit_nap() with
529 * hwthread_req = 1, HMI goes ignored even though subcores are
530 * already exited the guest. Hence HMI keeps waking up secondaries
531 * from nap in a loop and secondaries always go back to nap since
532 * no vcore is assigned to them. This makes impossible for primary
533 * thread to get hold of secondary threads resulting into a soft
534 * lockup in KVM path.
536 * Let us check if HMI is pending and handle it before we go to nap.
538 cmpwi r12, BOOK3S_INTERRUPT_HMI
540 li r3, 0 /* NULL argument */
541 bl hmi_exception_realmode
544 * Ensure that secondary doesn't nap when it has
545 * its vcore pointer set.
547 sync /* matches smp_mb() before setting split_info.do_nap */
548 ld r0, HSTATE_KVM_VCORE(r13)
551 /* clear any pending message */
553 lis r6, (PPC_DBELL_SERVER << (63-36))@h
555 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
556 /* Set kvm_split_mode.napped[tid] = 1 */
557 ld r3, HSTATE_SPLIT_MODE(r13)
559 lbz r4, HSTATE_TID(r13)
560 addi r4, r4, KVM_SPLIT_NAPPED
562 /* Check the do_nap flag again after setting napped[] */
564 lbz r0, KVM_SPLIT_DO_NAP(r3)
567 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
569 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
576 /******************************************************************************
580 *****************************************************************************/
582 .global kvmppc_hv_entry
587 * R4 = vcpu pointer (or NULL)
592 * all other volatile GPRS = free
593 * Does not preserve non-volatile GPRs or CR fields
596 std r0, PPC_LR_STKOFF(r1)
599 /* Save R1 in the PACA */
600 std r1, HSTATE_HOST_R1(r13)
602 li r6, KVM_GUEST_MODE_HOST_HV
603 stb r6, HSTATE_IN_GUEST(r13)
605 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
606 /* Store initial timestamp */
609 addi r3, r4, VCPU_TB_RMENTRY
610 bl kvmhv_start_timing
614 /* Use cr7 as an indication of radix mode */
615 ld r5, HSTATE_KVM_VCORE(r13)
616 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
617 lbz r0, KVM_RADIX(r9)
621 * POWER7/POWER8 host -> guest partition switch code.
622 * We don't have to lock against concurrent tlbies,
623 * but we do have to coordinate across hardware threads.
625 /* Set bit in entry map iff exit map is zero. */
627 lbz r6, HSTATE_PTID(r13)
629 addi r8, r5, VCORE_ENTRY_EXIT
631 cmpwi r3, 0x100 /* any threads starting to exit? */
632 bge secondary_too_late /* if so we're too late to the party */
637 /* Primary thread switches to guest partition. */
641 /* Radix has already switched LPID and flushed core TLB */
647 li r0,LPID_RSVD /* switch to reserved LPID */
650 mtspr SPRN_SDR1,r6 /* switch to partition page table */
651 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
655 /* See if we need to flush the TLB. Hash has to be done in RM */
656 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
659 * On POWER9, individual threads can come in here, but the
660 * TLB is shared between the 4 threads in a core, hence
661 * invalidating on one thread invalidates for all.
662 * Thus we make all 4 threads use the same bit here.
665 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
666 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
667 srdi r6,r6,6 /* doubleword number */
668 sldi r6,r6,3 /* address offset */
670 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
676 /* Flush the TLB of any entries for this LPID */
677 lwz r0,KVM_TLB_SETS(r9)
679 li r7,0x800 /* IS field = 0b10 */
681 li r0,0 /* RS for P9 version of tlbiel */
682 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
686 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
691 /* Add timebase offset onto timebase */
692 22: ld r8,VCORE_TB_OFFSET(r5)
695 std r8, VCORE_TB_OFFSET_APPL(r5)
696 mftb r6 /* current host timebase */
698 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
699 mftb r7 /* check if lower 24 bits overflowed */
704 addis r8,r8,0x100 /* if so, increment upper 40 bits */
707 /* Load guest PCR value to select appropriate compat mode */
708 37: ld r7, VCORE_PCR(r5)
715 /* DPDES and VTB are shared between threads */
716 ld r8, VCORE_DPDES(r5)
720 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
722 /* Mark the subcore state as inside guest */
723 bl kvmppc_subcore_enter_guest
725 ld r5, HSTATE_KVM_VCORE(r13)
726 ld r4, HSTATE_KVM_VCPU(r13)
728 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
730 /* Do we have a guest vcpu to run? */
732 beq kvmppc_primary_no_guest
734 /* Increment yield count if they have a VPA */
738 li r6, LPPACA_YIELDCOUNT
743 stb r6, VCPU_VPA_DIRTY(r4)
746 /* Save purr/spurr */
749 std r5,HSTATE_PURR(r13)
750 std r6,HSTATE_SPURR(r13)
756 /* Save host values of some registers */
762 std r5, STACK_SLOT_TID(r1)
763 std r6, STACK_SLOT_PSSCR(r1)
764 std r7, STACK_SLOT_PID(r1)
765 std r8, STACK_SLOT_IAMR(r1)
767 std r5, STACK_SLOT_HFSCR(r1)
768 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
773 std r5, STACK_SLOT_CIABR(r1)
774 std r6, STACK_SLOT_DAWR(r1)
775 std r7, STACK_SLOT_DAWRX(r1)
776 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
779 /* Set partition DABR */
780 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
781 lwz r5,VCPU_DABRX(r4)
786 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
788 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
790 * Branch around the call if both CPU_FTR_TM and
791 * CPU_FTR_P9_TM_HV_ASSIST are off.
795 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
797 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
801 bl kvmppc_restore_tm_hv
802 ld r4, HSTATE_KVM_VCPU(r13)
806 /* Load guest PMU registers */
807 /* R4 is live here (vcpu pointer) */
809 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
810 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
814 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
817 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
818 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
819 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
820 lwz r6, VCPU_PMC + 8(r4)
821 lwz r7, VCPU_PMC + 12(r4)
822 lwz r8, VCPU_PMC + 16(r4)
823 lwz r9, VCPU_PMC + 20(r4)
831 ld r5, VCPU_MMCR + 8(r4)
832 ld r6, VCPU_MMCR + 16(r4)
840 ld r5, VCPU_MMCR + 24(r4)
844 BEGIN_FTR_SECTION_NESTED(96)
845 lwz r7, VCPU_PMC + 24(r4)
846 lwz r8, VCPU_PMC + 28(r4)
847 ld r9, VCPU_MMCR + 32(r4)
851 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
852 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
856 /* Load up FP, VMX and VSX registers */
859 ld r14, VCPU_GPR(R14)(r4)
860 ld r15, VCPU_GPR(R15)(r4)
861 ld r16, VCPU_GPR(R16)(r4)
862 ld r17, VCPU_GPR(R17)(r4)
863 ld r18, VCPU_GPR(R18)(r4)
864 ld r19, VCPU_GPR(R19)(r4)
865 ld r20, VCPU_GPR(R20)(r4)
866 ld r21, VCPU_GPR(R21)(r4)
867 ld r22, VCPU_GPR(R22)(r4)
868 ld r23, VCPU_GPR(R23)(r4)
869 ld r24, VCPU_GPR(R24)(r4)
870 ld r25, VCPU_GPR(R25)(r4)
871 ld r26, VCPU_GPR(R26)(r4)
872 ld r27, VCPU_GPR(R27)(r4)
873 ld r28, VCPU_GPR(R28)(r4)
874 ld r29, VCPU_GPR(R29)(r4)
875 ld r30, VCPU_GPR(R30)(r4)
876 ld r31, VCPU_GPR(R31)(r4)
878 /* Switch DSCR to guest value */
883 /* Skip next section on POWER7 */
885 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
886 /* Load up POWER8-specific registers */
888 lwz r6, VCPU_PSPB(r4)
894 ld r6, VCPU_DAWRX(r4)
895 ld r7, VCPU_CIABR(r4)
898 * Handle broken DAWR case by not writing it. This means we
899 * can still store the DAWR register for migration.
904 END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
908 ld r8, VCPU_EBBHR(r4)
911 ld r5, VCPU_EBBRR(r4)
912 ld r6, VCPU_BESCR(r4)
913 lwz r7, VCPU_GUEST_PID(r4)
921 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
923 /* POWER8-only registers */
924 ld r5, VCPU_TCSCR(r4)
926 ld r7, VCPU_CSIGR(r4)
934 /* POWER9-only registers */
936 ld r6, VCPU_PSSCR(r4)
937 lbz r8, HSTATE_FAKE_SUSPEND(r13)
938 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
939 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
940 ld r7, VCPU_HFSCR(r4)
944 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
947 ld r5, VCPU_SPRG0(r4)
948 ld r6, VCPU_SPRG1(r4)
949 ld r7, VCPU_SPRG2(r4)
950 ld r8, VCPU_SPRG3(r4)
956 /* Load up DAR and DSISR */
958 lwz r6, VCPU_DSISR(r4)
962 /* Restore AMR and UAMOR, set AMOR to all 1s */
970 /* Restore state of CTRL run bit; assume 1 on entry */
978 /* Secondary threads wait for primary to have done partition switch */
979 ld r5, HSTATE_KVM_VCORE(r13)
980 lbz r6, HSTATE_PTID(r13)
983 lbz r0, VCORE_IN_GUEST(r5)
987 20: lwz r3, VCORE_ENTRY_EXIT(r5)
990 lbz r0, VCORE_IN_GUEST(r5)
1001 * Set the decrementer to the guest decrementer.
1003 ld r8,VCPU_DEC_EXPIRES(r4)
1004 /* r8 is a host timebase value here, convert to guest TB */
1005 ld r5,HSTATE_KVM_VCORE(r13)
1006 ld r6,VCORE_TB_OFFSET_APPL(r5)
1012 /* Check if HDEC expires soon */
1015 cmpdi r3, 512 /* 1 microsecond */
1018 /* For hash guest, clear out and reload the SLB */
1020 lbz r0, KVM_RADIX(r6)
1028 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
1029 lwz r5,VCPU_SLB_MAX(r4)
1034 1: ld r8,VCPU_SLB_E(r6)
1035 ld r9,VCPU_SLB_V(r6)
1037 addi r6,r6,VCPU_SLB_SIZE
1041 #ifdef CONFIG_KVM_XICS
1042 /* We are entering the guest on that thread, push VCPU to XIVE */
1043 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1046 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1050 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1051 li r9, TM_QW1_OS + TM_WORD2
1054 stb r9, VCPU_XIVE_PUSHED(r4)
1058 * We clear the irq_pending flag. There is a small chance of a
1059 * race vs. the escalation interrupt happening on another
1060 * processor setting it again, but the only consequence is to
1061 * cause a spurrious wakeup on the next H_CEDE which is not an
1065 stb r0, VCPU_IRQ_PENDING(r4)
1068 * In single escalation mode, if the escalation interrupt is
1071 lbz r0, VCPU_XIVE_ESC_ON(r4)
1074 ld r10, VCPU_XIVE_ESC_RADDR(r4)
1075 li r9, XIVE_ESB_SET_PQ_01
1079 /* We have a possible subtle race here: The escalation interrupt might
1080 * have fired and be on its way to the host queue while we mask it,
1081 * and if we unmask it early enough (re-cede right away), there is
1082 * a theorical possibility that it fires again, thus landing in the
1083 * target queue more than once which is a big no-no.
1085 * Fortunately, solving this is rather easy. If the above load setting
1086 * PQ to 01 returns a previous value where P is set, then we know the
1087 * escalation interrupt is somewhere on its way to the host. In that
1088 * case we simply don't clear the xive_esc_on flag below. It will be
1089 * eventually cleared by the handler for the escalation interrupt.
1091 * Then, when doing a cede, we check that flag again before re-enabling
1092 * the escalation interrupt, and if set, we abort the cede.
1094 andi. r0, r0, XIVE_ESB_VAL_P
1097 /* Now P is 0, we can clear the flag */
1099 stb r0, VCPU_XIVE_ESC_ON(r4)
1102 #endif /* CONFIG_KVM_XICS */
1104 deliver_guest_interrupt:
1111 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
1113 ld r11, VCPU_MSR(r4)
1114 ld r6, VCPU_SRR0(r4)
1115 ld r7, VCPU_SRR1(r4)
1119 /* r11 = vcpu->arch.msr & ~MSR_HV */
1120 rldicl r11, r11, 63 - MSR_HV_LG, 1
1121 rotldi r11, r11, 1 + MSR_HV_LG
1122 ori r11, r11, MSR_ME
1124 /* Check if we can deliver an external or decrementer interrupt now */
1125 ld r0, VCPU_PENDING_EXC(r4)
1126 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1128 andi. r8, r11, MSR_EE
1130 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1131 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1135 li r0, BOOK3S_INTERRUPT_EXTERNAL
1139 /* On POWER9 check whether the guest has large decrementer enabled */
1140 andis. r8, r8, LPCR_LD@h
1142 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1145 li r0, BOOK3S_INTERRUPT_DECREMENTER
1148 12: mtspr SPRN_SRR0, r10
1150 mtspr SPRN_SRR1, r11
1152 bl kvmppc_msr_interrupt
1156 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1157 /* On POWER9, check for pending doorbell requests */
1158 lbz r0, VCPU_DBELL_REQ(r4)
1160 beq fast_guest_return
1161 ld r5, HSTATE_KVM_VCORE(r13)
1162 /* Set DPDES register so the CPU will take a doorbell interrupt */
1164 mtspr SPRN_DPDES, r0
1165 std r0, VCORE_DPDES(r5)
1166 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1168 /* Clear the pending doorbell request */
1170 stb r0, VCPU_DBELL_REQ(r4)
1175 * R10: value for HSRR0
1176 * R11: value for HSRR1
1181 stb r0,VCPU_CEDED(r4) /* cancel cede */
1182 mtspr SPRN_HSRR0,r10
1183 mtspr SPRN_HSRR1,r11
1185 /* Activate guest mode, so faults get handled by KVM */
1186 li r9, KVM_GUEST_MODE_GUEST_HV
1187 stb r9, HSTATE_IN_GUEST(r13)
1189 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1190 /* Accumulate timing */
1191 addi r3, r4, VCPU_TB_GUEST
1192 bl kvmhv_accumulate_time
1198 ld r5, VCPU_CFAR(r4)
1200 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1203 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1210 ld r1, VCPU_GPR(R1)(r4)
1211 ld r2, VCPU_GPR(R2)(r4)
1212 ld r3, VCPU_GPR(R3)(r4)
1213 ld r5, VCPU_GPR(R5)(r4)
1214 ld r6, VCPU_GPR(R6)(r4)
1215 ld r7, VCPU_GPR(R7)(r4)
1216 ld r8, VCPU_GPR(R8)(r4)
1217 ld r9, VCPU_GPR(R9)(r4)
1218 ld r10, VCPU_GPR(R10)(r4)
1219 ld r11, VCPU_GPR(R11)(r4)
1220 ld r12, VCPU_GPR(R12)(r4)
1221 ld r13, VCPU_GPR(R13)(r4)
1225 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1227 /* Move canary into DSISR to check for later */
1230 mtspr SPRN_HDSISR, r0
1231 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1233 ld r0, VCPU_GPR(R0)(r4)
1234 ld r4, VCPU_GPR(R4)(r4)
1240 stw r12, STACK_SLOT_TRAP(r1)
1243 stw r12, VCPU_TRAP(r4)
1244 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1245 addi r3, r4, VCPU_TB_RMEXIT
1246 bl kvmhv_accumulate_time
1248 11: b kvmhv_switch_to_host
1255 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1256 12: stw r12, VCPU_TRAP(r4)
1258 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1259 addi r3, r4, VCPU_TB_RMEXIT
1260 bl kvmhv_accumulate_time
1264 /******************************************************************************
1268 *****************************************************************************/
1271 * We come here from the first-level interrupt handlers.
1273 .globl kvmppc_interrupt_hv
1274 kvmppc_interrupt_hv:
1276 * Register contents:
1277 * R12 = (guest CR << 32) | interrupt vector
1279 * guest R12 saved in shadow VCPU SCRATCH0
1280 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1281 * guest R13 saved in SPRN_SCRATCH0
1283 std r9, HSTATE_SCRATCH2(r13)
1284 lbz r9, HSTATE_IN_GUEST(r13)
1285 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1286 beq kvmppc_bad_host_intr
1287 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1288 cmpwi r9, KVM_GUEST_MODE_GUEST
1289 ld r9, HSTATE_SCRATCH2(r13)
1290 beq kvmppc_interrupt_pr
1292 /* We're now back in the host but in guest MMU context */
1293 li r9, KVM_GUEST_MODE_HOST_HV
1294 stb r9, HSTATE_IN_GUEST(r13)
1296 ld r9, HSTATE_KVM_VCPU(r13)
1298 /* Save registers */
1300 std r0, VCPU_GPR(R0)(r9)
1301 std r1, VCPU_GPR(R1)(r9)
1302 std r2, VCPU_GPR(R2)(r9)
1303 std r3, VCPU_GPR(R3)(r9)
1304 std r4, VCPU_GPR(R4)(r9)
1305 std r5, VCPU_GPR(R5)(r9)
1306 std r6, VCPU_GPR(R6)(r9)
1307 std r7, VCPU_GPR(R7)(r9)
1308 std r8, VCPU_GPR(R8)(r9)
1309 ld r0, HSTATE_SCRATCH2(r13)
1310 std r0, VCPU_GPR(R9)(r9)
1311 std r10, VCPU_GPR(R10)(r9)
1312 std r11, VCPU_GPR(R11)(r9)
1313 ld r3, HSTATE_SCRATCH0(r13)
1314 std r3, VCPU_GPR(R12)(r9)
1315 /* CR is in the high half of r12 */
1319 ld r3, HSTATE_CFAR(r13)
1320 std r3, VCPU_CFAR(r9)
1321 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1323 ld r4, HSTATE_PPR(r13)
1324 std r4, VCPU_PPR(r9)
1325 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1327 /* Restore R1/R2 so we can handle faults */
1328 ld r1, HSTATE_HOST_R1(r13)
1331 mfspr r10, SPRN_SRR0
1332 mfspr r11, SPRN_SRR1
1333 std r10, VCPU_SRR0(r9)
1334 std r11, VCPU_SRR1(r9)
1335 /* trap is in the low half of r12, clear CR from the high half */
1337 andi. r0, r12, 2 /* need to read HSRR0/1? */
1339 mfspr r10, SPRN_HSRR0
1340 mfspr r11, SPRN_HSRR1
1342 1: std r10, VCPU_PC(r9)
1343 std r11, VCPU_MSR(r9)
1347 std r3, VCPU_GPR(R13)(r9)
1350 stw r12,VCPU_TRAP(r9)
1353 * Now that we have saved away SRR0/1 and HSRR0/1,
1354 * interrupts are recoverable in principle, so set MSR_RI.
1355 * This becomes important for relocation-on interrupts from
1356 * the guest, which we can get in radix mode on POWER9.
1361 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1362 addi r3, r9, VCPU_TB_RMINTR
1364 bl kvmhv_accumulate_time
1365 ld r5, VCPU_GPR(R5)(r9)
1366 ld r6, VCPU_GPR(R6)(r9)
1367 ld r7, VCPU_GPR(R7)(r9)
1368 ld r8, VCPU_GPR(R8)(r9)
1371 /* Save HEIR (HV emulation assist reg) in emul_inst
1372 if this is an HEI (HV emulation interrupt, e40) */
1373 li r3,KVM_INST_FETCH_FAILED
1374 stw r3,VCPU_LAST_INST(r9)
1375 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1378 11: stw r3,VCPU_HEIR(r9)
1380 /* these are volatile across C function calls */
1381 #ifdef CONFIG_RELOCATABLE
1382 ld r3, HSTATE_SCRATCH1(r13)
1388 std r3, VCPU_CTR(r9)
1389 std r4, VCPU_XER(r9)
1391 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1392 /* For softpatch interrupt, go off and do TM instruction emulation */
1393 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1397 /* If this is a page table miss then see if it's theirs or ours */
1398 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1400 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1403 /* See if this is a leftover HDEC interrupt */
1404 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1410 bge fast_guest_return
1412 /* See if this is an hcall we can handle in real mode */
1413 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1414 beq hcall_try_real_mode
1416 /* Hypervisor doorbell - exit only if host IPI flag set */
1417 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1422 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1423 lbz r0, HSTATE_HOST_IPI(r13)
1428 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1429 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1431 mfspr r3, SPRN_HFSCR
1432 std r3, VCPU_HFSCR(r9)
1435 /* External interrupt ? */
1436 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1437 bne+ guest_exit_cont
1439 /* External interrupt, first check for host_ipi. If this is
1440 * set, we know the host wants us out so let's do it now
1445 * Restore the active volatile registers after returning from
1448 ld r9, HSTATE_KVM_VCPU(r13)
1449 li r12, BOOK3S_INTERRUPT_EXTERNAL
1452 * kvmppc_read_intr return codes:
1454 * Exit to host (r3 > 0)
1455 * 1 An interrupt is pending that needs to be handled by the host
1456 * Exit guest and return to host by branching to guest_exit_cont
1458 * 2 Passthrough that needs completion in the host
1459 * Exit guest and return to host by branching to guest_exit_cont
1460 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1461 * to indicate to the host to complete handling the interrupt
1463 * Before returning to guest, we check if any CPU is heading out
1464 * to the host and if so, we head out also. If no CPUs are heading
1465 * check return values <= 0.
1467 * Return to guest (r3 <= 0)
1468 * 0 No external interrupt is pending
1469 * -1 A guest wakeup IPI (which has now been cleared)
1470 * In either case, we return to guest to deliver any pending
1473 * -2 A PCI passthrough external interrupt was handled
1474 * (interrupt was delivered directly to guest)
1475 * Return to guest to deliver any pending guest interrupts.
1481 /* Return code = 2 */
1482 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1483 stw r12, VCPU_TRAP(r9)
1486 1: /* Return code <= 1 */
1490 /* Return code <= 0 */
1491 4: ld r5, HSTATE_KVM_VCORE(r13)
1492 lwz r0, VCORE_ENTRY_EXIT(r5)
1495 blt deliver_guest_interrupt
1497 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1498 /* Save more register state */
1501 std r6, VCPU_DAR(r9)
1502 stw r7, VCPU_DSISR(r9)
1503 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1504 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1506 std r6, VCPU_FAULT_DAR(r9)
1507 stw r7, VCPU_FAULT_DSISR(r9)
1509 /* See if it is a machine check */
1510 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1511 beq machine_check_realmode
1513 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1514 addi r3, r9, VCPU_TB_RMEXIT
1516 bl kvmhv_accumulate_time
1518 #ifdef CONFIG_KVM_XICS
1519 /* We are exiting, pull the VP from the XIVE */
1520 lbz r0, VCPU_XIVE_PUSHED(r9)
1523 li r7, TM_SPC_PULL_OS_CTX
1526 andi. r0, r0, MSR_DR /* in real mode? */
1528 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1531 /* First load to pull the context, we ignore the value */
1534 /* Second load to recover the context state (Words 0 and 1) */
1537 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1540 /* First load to pull the context, we ignore the value */
1543 /* Second load to recover the context state (Words 0 and 1) */
1545 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1546 /* Fixup some of the state for the next load */
1549 stb r10, VCPU_XIVE_PUSHED(r9)
1550 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1551 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1554 #endif /* CONFIG_KVM_XICS */
1556 /* For hash guest, read the guest SLB and save it away */
1558 lbz r0, KVM_RADIX(r5)
1561 bne 3f /* for radix, save 0 entries */
1562 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1567 andis. r0,r8,SLB_ESID_V@h
1569 add r8,r8,r6 /* put index in */
1571 std r8,VCPU_SLB_E(r7)
1572 std r3,VCPU_SLB_V(r7)
1573 addi r7,r7,VCPU_SLB_SIZE
1577 /* Finally clear out the SLB */
1582 3: stw r5,VCPU_SLB_MAX(r9)
1584 /* load host SLB entries */
1585 BEGIN_MMU_FTR_SECTION
1587 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1588 ld r8,PACA_SLBSHADOWPTR(r13)
1590 .rept SLB_NUM_BOLTED
1591 li r3, SLBSHADOW_SAVEAREA
1595 andis. r7,r5,SLB_ESID_V@h
1603 stw r12, STACK_SLOT_TRAP(r1)
1606 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1607 ld r3, HSTATE_KVM_VCORE(r13)
1610 /* On P9, if the guest has large decr enabled, don't sign extend */
1612 ld r4, VCORE_LPCR(r3)
1613 andis. r4, r4, LPCR_LD@h
1615 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1618 /* r5 is a guest timebase value here, convert to host TB */
1619 ld r4,VCORE_TB_OFFSET_APPL(r3)
1621 std r5,VCPU_DEC_EXPIRES(r9)
1623 /* Increment exit count, poke other threads to exit */
1625 bl kvmhv_commence_exit
1627 ld r9, HSTATE_KVM_VCPU(r13)
1629 /* Stop others sending VCPU interrupts to this physical CPU */
1631 stw r0, VCPU_CPU(r9)
1632 stw r0, VCPU_THREAD_CPU(r9)
1634 /* Save guest CTRL register, set runlatch to 1 */
1636 stw r6,VCPU_CTRL(r9)
1643 * Save the guest PURR/SPURR
1648 ld r8,VCPU_SPURR(r9)
1649 std r5,VCPU_PURR(r9)
1650 std r6,VCPU_SPURR(r9)
1655 * Restore host PURR/SPURR and add guest times
1656 * so that the time in the guest gets accounted.
1658 ld r3,HSTATE_PURR(r13)
1659 ld r4,HSTATE_SPURR(r13)
1667 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1668 /* Save POWER8-specific registers */
1672 std r5, VCPU_IAMR(r9)
1673 stw r6, VCPU_PSPB(r9)
1674 std r7, VCPU_FSCR(r9)
1678 std r7, VCPU_TAR(r9)
1679 mfspr r8, SPRN_EBBHR
1680 std r8, VCPU_EBBHR(r9)
1681 mfspr r5, SPRN_EBBRR
1682 mfspr r6, SPRN_BESCR
1685 std r5, VCPU_EBBRR(r9)
1686 std r6, VCPU_BESCR(r9)
1687 stw r7, VCPU_GUEST_PID(r9)
1688 std r8, VCPU_WORT(r9)
1690 mfspr r5, SPRN_TCSCR
1692 mfspr r7, SPRN_CSIGR
1694 std r5, VCPU_TCSCR(r9)
1695 std r6, VCPU_ACOP(r9)
1696 std r7, VCPU_CSIGR(r9)
1697 std r8, VCPU_TACR(r9)
1700 mfspr r6, SPRN_PSSCR
1701 std r5, VCPU_TID(r9)
1702 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1704 std r6, VCPU_PSSCR(r9)
1705 /* Restore host HFSCR value */
1706 ld r7, STACK_SLOT_HFSCR(r1)
1707 mtspr SPRN_HFSCR, r7
1708 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1710 * Restore various registers to 0, where non-zero values
1711 * set by the guest could disrupt the host.
1718 mtspr SPRN_TCSCR, r0
1719 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1722 mtspr SPRN_MMCRS, r0
1723 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1726 /* Save and reset AMR and UAMOR before turning on the MMU */
1730 std r6,VCPU_UAMOR(r9)
1733 mtspr SPRN_UAMOR, r6
1735 /* Switch DSCR back to host value */
1737 ld r7, HSTATE_DSCR(r13)
1738 std r8, VCPU_DSCR(r9)
1741 /* Save non-volatile GPRs */
1742 std r14, VCPU_GPR(R14)(r9)
1743 std r15, VCPU_GPR(R15)(r9)
1744 std r16, VCPU_GPR(R16)(r9)
1745 std r17, VCPU_GPR(R17)(r9)
1746 std r18, VCPU_GPR(R18)(r9)
1747 std r19, VCPU_GPR(R19)(r9)
1748 std r20, VCPU_GPR(R20)(r9)
1749 std r21, VCPU_GPR(R21)(r9)
1750 std r22, VCPU_GPR(R22)(r9)
1751 std r23, VCPU_GPR(R23)(r9)
1752 std r24, VCPU_GPR(R24)(r9)
1753 std r25, VCPU_GPR(R25)(r9)
1754 std r26, VCPU_GPR(R26)(r9)
1755 std r27, VCPU_GPR(R27)(r9)
1756 std r28, VCPU_GPR(R28)(r9)
1757 std r29, VCPU_GPR(R29)(r9)
1758 std r30, VCPU_GPR(R30)(r9)
1759 std r31, VCPU_GPR(R31)(r9)
1762 mfspr r3, SPRN_SPRG0
1763 mfspr r4, SPRN_SPRG1
1764 mfspr r5, SPRN_SPRG2
1765 mfspr r6, SPRN_SPRG3
1766 std r3, VCPU_SPRG0(r9)
1767 std r4, VCPU_SPRG1(r9)
1768 std r5, VCPU_SPRG2(r9)
1769 std r6, VCPU_SPRG3(r9)
1775 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1777 * Branch around the call if both CPU_FTR_TM and
1778 * CPU_FTR_P9_TM_HV_ASSIST are off.
1782 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1784 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1788 bl kvmppc_save_tm_hv
1789 ld r9, HSTATE_KVM_VCPU(r13)
1793 /* Increment yield count if they have a VPA */
1794 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1797 li r4, LPPACA_YIELDCOUNT
1802 stb r3, VCPU_VPA_DIRTY(r9)
1804 /* Save PMU registers if requested */
1805 /* r8 and cr0.eq are live here */
1808 * POWER8 seems to have a hardware bug where setting
1809 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1810 * when some counters are already negative doesn't seem
1811 * to cause a performance monitor alert (and hence interrupt).
1812 * The effect of this is that when saving the PMU state,
1813 * if there is no PMU alert pending when we read MMCR0
1814 * before freezing the counters, but one becomes pending
1815 * before we read the counters, we lose it.
1816 * To work around this, we need a way to freeze the counters
1817 * before reading MMCR0. Normally, freezing the counters
1818 * is done by writing MMCR0 (to set MMCR0[FC]) which
1819 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1820 * we can also freeze the counters using MMCR2, by writing
1821 * 1s to all the counter freeze condition bits (there are
1822 * 9 bits each for 6 counters).
1824 li r3, -1 /* set all freeze bits */
1826 mfspr r10, SPRN_MMCR2
1827 mtspr SPRN_MMCR2, r3
1829 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1831 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1832 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1833 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1834 mfspr r6, SPRN_MMCRA
1835 /* Clear MMCRA in order to disable SDAR updates */
1837 mtspr SPRN_MMCRA, r7
1839 beq 21f /* if no VPA, save PMU stuff anyway */
1840 lbz r7, LPPACA_PMCINUSE(r8)
1841 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1843 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1845 21: mfspr r5, SPRN_MMCR1
1848 std r4, VCPU_MMCR(r9)
1849 std r5, VCPU_MMCR + 8(r9)
1850 std r6, VCPU_MMCR + 16(r9)
1852 std r10, VCPU_MMCR + 24(r9)
1853 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1854 std r7, VCPU_SIAR(r9)
1855 std r8, VCPU_SDAR(r9)
1862 stw r3, VCPU_PMC(r9)
1863 stw r4, VCPU_PMC + 4(r9)
1864 stw r5, VCPU_PMC + 8(r9)
1865 stw r6, VCPU_PMC + 12(r9)
1866 stw r7, VCPU_PMC + 16(r9)
1867 stw r8, VCPU_PMC + 20(r9)
1870 std r5, VCPU_SIER(r9)
1871 BEGIN_FTR_SECTION_NESTED(96)
1872 mfspr r6, SPRN_SPMC1
1873 mfspr r7, SPRN_SPMC2
1874 mfspr r8, SPRN_MMCRS
1875 stw r6, VCPU_PMC + 24(r9)
1876 stw r7, VCPU_PMC + 28(r9)
1877 std r8, VCPU_MMCR + 32(r9)
1879 mtspr SPRN_MMCRS, r4
1880 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1881 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1884 /* Restore host values of some registers */
1886 ld r5, STACK_SLOT_CIABR(r1)
1887 ld r6, STACK_SLOT_DAWR(r1)
1888 ld r7, STACK_SLOT_DAWRX(r1)
1889 mtspr SPRN_CIABR, r5
1891 * If the DAWR doesn't work, it's ok to write these here as
1892 * this value should always be zero
1895 mtspr SPRN_DAWRX, r7
1896 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1898 ld r5, STACK_SLOT_TID(r1)
1899 ld r6, STACK_SLOT_PSSCR(r1)
1900 ld r7, STACK_SLOT_PID(r1)
1901 ld r8, STACK_SLOT_IAMR(r1)
1903 mtspr SPRN_PSSCR, r6
1906 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1908 #ifdef CONFIG_PPC_RADIX_MMU
1910 * Are we running hash or radix ?
1913 lbz r0, KVM_RADIX(r5)
1918 * Radix: do eieio; tlbsync; ptesync sequence in case we
1919 * interrupted the guest between a tlbie and a ptesync.
1925 /* Radix: Handle the case where the guest used an illegal PID */
1926 LOAD_REG_ADDR(r4, mmu_base_pid)
1927 lwz r3, VCPU_GUEST_PID(r9)
1933 * Illegal PID, the HW might have prefetched and cached in the TLB
1934 * some translations for the LPID 0 / guest PID combination which
1935 * Linux doesn't know about, so we need to flush that PID out of
1936 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1937 * the right context.
1943 /* Then do a congruence class local flush */
1945 lwz r0,KVM_TLB_SETS(r6)
1947 li r7,0x400 /* IS field = 0b01 */
1949 sldi r0,r3,32 /* RS has PID */
1950 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1955 2: /* Flush the ERAT on radix P9 DD1 guest exit */
1958 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1960 #endif /* CONFIG_PPC_RADIX_MMU */
1963 * POWER7/POWER8 guest -> host partition switch code.
1964 * We don't have to lock against tlbies but we do
1965 * have to coordinate the hardware threads.
1966 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1968 kvmhv_switch_to_host:
1969 /* Secondary threads wait for primary to do partition switch */
1970 ld r5,HSTATE_KVM_VCORE(r13)
1971 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1972 lbz r3,HSTATE_PTID(r13)
1976 13: lbz r3,VCORE_IN_GUEST(r5)
1982 /* Primary thread waits for all the secondaries to exit guest */
1983 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1984 rlwinm r0,r3,32-8,0xff
1990 /* Did we actually switch to the guest at all? */
1991 lbz r6, VCORE_IN_GUEST(r5)
1995 /* Primary thread switches back to host partition */
1996 lwz r7,KVM_HOST_LPID(r4)
1998 ld r6,KVM_HOST_SDR1(r4)
1999 li r8,LPID_RSVD /* switch to reserved LPID */
2002 mtspr SPRN_SDR1,r6 /* switch to host page table */
2003 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
2008 /* DPDES and VTB are shared between threads */
2009 mfspr r7, SPRN_DPDES
2011 std r7, VCORE_DPDES(r5)
2012 std r8, VCORE_VTB(r5)
2013 /* clear DPDES so we don't get guest doorbells in the host */
2015 mtspr SPRN_DPDES, r8
2016 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2018 /* If HMI, call kvmppc_realmode_hmi_handler() */
2019 lwz r12, STACK_SLOT_TRAP(r1)
2020 cmpwi r12, BOOK3S_INTERRUPT_HMI
2022 bl kvmppc_realmode_hmi_handler
2026 * At this point kvmppc_realmode_hmi_handler may have resync-ed
2027 * the TB, and if it has, we must not subtract the guest timebase
2028 * offset from the timebase. So, skip it.
2030 * Also, do not call kvmppc_subcore_exit_guest() because it has
2031 * been invoked as part of kvmppc_realmode_hmi_handler().
2036 /* Subtract timebase offset from timebase */
2037 ld r8, VCORE_TB_OFFSET_APPL(r5)
2041 std r0, VCORE_TB_OFFSET_APPL(r5)
2042 mftb r6 /* current guest timebase */
2044 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
2045 mftb r7 /* check if lower 24 bits overflowed */
2050 addis r8,r8,0x100 /* if so, increment upper 40 bits */
2053 17: bl kvmppc_subcore_exit_guest
2055 30: ld r5,HSTATE_KVM_VCORE(r13)
2056 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
2059 ld r0, VCORE_PCR(r5)
2065 /* Signal secondary CPUs to continue */
2066 stb r0,VCORE_IN_GUEST(r5)
2067 19: lis r8,0x7fff /* MAX_INT@h */
2072 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
2073 ld r3, HSTATE_SPLIT_MODE(r13)
2076 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
2079 bl kvmhv_p9_restore_lpcr
2083 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2084 ld r8,KVM_HOST_LPCR(r4)
2088 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2089 /* Finish timing, if we have a vcpu */
2090 ld r4, HSTATE_KVM_VCPU(r13)
2094 bl kvmhv_accumulate_time
2097 /* Unset guest mode */
2098 li r0, KVM_GUEST_MODE_NONE
2099 stb r0, HSTATE_IN_GUEST(r13)
2101 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
2102 ld r0, SFS+PPC_LR_STKOFF(r1)
2107 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2109 * Softpatch interrupt for transactional memory emulation cases
2110 * on POWER9 DD2.2. This is early in the guest exit path - we
2111 * haven't saved registers or done a treclaim yet.
2114 /* Save instruction image in HEIR */
2116 stw r3, VCPU_HEIR(r9)
2119 * The cases we want to handle here are those where the guest
2120 * is in real suspend mode and is trying to transition to
2121 * transactional mode.
2123 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2124 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2126 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2127 cmpwi r3, 1 /* or if not in suspend state */
2130 /* Call C code to do the emulation */
2132 bl kvmhv_p9_tm_emulation_early
2134 ld r9, HSTATE_KVM_VCPU(r13)
2135 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2137 beq guest_exit_cont /* continue exiting if not handled */
2139 ld r11, VCPU_MSR(r9)
2140 b fast_interrupt_c_return /* go back to guest if handled */
2141 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2144 * Check whether an HDSI is an HPTE not found fault or something else.
2145 * If it is an HPTE not found fault that is due to the guest accessing
2146 * a page that they have mapped but which we have paged out, then
2147 * we continue on with the guest exit path. In all other cases,
2148 * reflect the HDSI to the guest as a DSI.
2152 lbz r0, KVM_RADIX(r3)
2154 mfspr r6, SPRN_HDSISR
2156 /* Look for DSISR canary. If we find it, retry instruction */
2159 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2161 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2162 /* HPTE not found fault or protection fault? */
2163 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2164 beq 1f /* if not, send it to the guest */
2165 andi. r0, r11, MSR_DR /* data relocation enabled? */
2168 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2170 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2172 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2173 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2174 bne 7f /* if no SLB entry found */
2175 4: std r4, VCPU_FAULT_DAR(r9)
2176 stw r6, VCPU_FAULT_DSISR(r9)
2178 /* Search the hash table. */
2179 mr r3, r9 /* vcpu pointer */
2180 li r7, 1 /* data fault */
2181 bl kvmppc_hpte_hv_fault
2182 ld r9, HSTATE_KVM_VCPU(r13)
2184 ld r11, VCPU_MSR(r9)
2185 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2186 cmpdi r3, 0 /* retry the instruction */
2188 cmpdi r3, -1 /* handle in kernel mode */
2190 cmpdi r3, -2 /* MMIO emulation; need instr word */
2193 /* Synthesize a DSI (or DSegI) for the guest */
2194 ld r4, VCPU_FAULT_DAR(r9)
2196 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2197 mtspr SPRN_DSISR, r6
2198 7: mtspr SPRN_DAR, r4
2199 mtspr SPRN_SRR0, r10
2200 mtspr SPRN_SRR1, r11
2202 bl kvmppc_msr_interrupt
2203 fast_interrupt_c_return:
2204 6: ld r7, VCPU_CTR(r9)
2211 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2212 ld r5, KVM_VRMA_SLB_V(r5)
2215 /* If this is for emulated MMIO, load the instruction word */
2216 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2218 /* Set guest mode to 'jump over instruction' so if lwz faults
2219 * we'll just continue at the next IP. */
2220 li r0, KVM_GUEST_MODE_SKIP
2221 stb r0, HSTATE_IN_GUEST(r13)
2223 /* Do the access with MSR:DR enabled */
2225 ori r4, r3, MSR_DR /* Enable paging for data */
2230 /* Store the result */
2231 stw r8, VCPU_LAST_INST(r9)
2233 /* Unset guest mode. */
2234 li r0, KVM_GUEST_MODE_HOST_HV
2235 stb r0, HSTATE_IN_GUEST(r13)
2239 std r4, VCPU_FAULT_DAR(r9)
2240 stw r6, VCPU_FAULT_DSISR(r9)
2243 std r5, VCPU_FAULT_GPA(r9)
2247 * Similarly for an HISI, reflect it to the guest as an ISI unless
2248 * it is an HPTE not found fault for a page that we have paged out.
2252 lbz r0, KVM_RADIX(r3)
2254 bne .Lradix_hisi /* for radix, just save ASDR */
2255 andis. r0, r11, SRR1_ISI_NOPT@h
2257 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2260 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2262 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2264 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2265 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2266 bne 7f /* if no SLB entry found */
2268 /* Search the hash table. */
2269 mr r3, r9 /* vcpu pointer */
2272 li r7, 0 /* instruction fault */
2273 bl kvmppc_hpte_hv_fault
2274 ld r9, HSTATE_KVM_VCPU(r13)
2276 ld r11, VCPU_MSR(r9)
2277 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2278 cmpdi r3, 0 /* retry the instruction */
2279 beq fast_interrupt_c_return
2280 cmpdi r3, -1 /* handle in kernel mode */
2283 /* Synthesize an ISI (or ISegI) for the guest */
2285 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2286 7: mtspr SPRN_SRR0, r10
2287 mtspr SPRN_SRR1, r11
2289 bl kvmppc_msr_interrupt
2290 b fast_interrupt_c_return
2292 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2293 ld r5, KVM_VRMA_SLB_V(r6)
2297 * Try to handle an hcall in real mode.
2298 * Returns to the guest if we handle it, or continues on up to
2299 * the kernel if we can't (i.e. if we don't have a handler for
2300 * it, or if the handler returns H_TOO_HARD).
2302 * r5 - r8 contain hcall args,
2303 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2305 hcall_try_real_mode:
2306 ld r3,VCPU_GPR(R3)(r9)
2308 /* sc 1 from userspace - reflect to guest syscall */
2309 bne sc_1_fast_return
2311 cmpldi r3,hcall_real_table_end - hcall_real_table
2313 /* See if this hcall is enabled for in-kernel handling */
2315 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2316 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2318 ld r0, KVM_ENABLED_HCALLS(r4)
2319 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2323 /* Get pointer to handler, if any, and call it */
2324 LOAD_REG_ADDR(r4, hcall_real_table)
2330 mr r3,r9 /* get vcpu pointer */
2331 ld r4,VCPU_GPR(R4)(r9)
2334 beq hcall_real_fallback
2335 ld r4,HSTATE_KVM_VCPU(r13)
2336 std r3,VCPU_GPR(R3)(r4)
2344 li r10, BOOK3S_INTERRUPT_SYSCALL
2345 bl kvmppc_msr_interrupt
2349 /* We've attempted a real mode hcall, but it's punted it back
2350 * to userspace. We need to restore some clobbered volatiles
2351 * before resuming the pass-it-to-qemu path */
2352 hcall_real_fallback:
2353 li r12,BOOK3S_INTERRUPT_SYSCALL
2354 ld r9, HSTATE_KVM_VCPU(r13)
2358 .globl hcall_real_table
2360 .long 0 /* 0 - unused */
2361 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2362 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2363 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2364 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2365 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2366 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2367 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2368 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2369 .long 0 /* 0x24 - H_SET_SPRG0 */
2370 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2385 #ifdef CONFIG_KVM_XICS
2386 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2387 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2388 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2389 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2390 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2392 .long 0 /* 0x64 - H_EOI */
2393 .long 0 /* 0x68 - H_CPPR */
2394 .long 0 /* 0x6c - H_IPI */
2395 .long 0 /* 0x70 - H_IPOLL */
2396 .long 0 /* 0x74 - H_XIRR */
2424 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2425 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2441 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2445 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2446 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2447 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2559 #ifdef CONFIG_KVM_XICS
2560 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2562 .long 0 /* 0x2fc - H_XIRR_X*/
2564 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2565 .globl hcall_real_table_end
2566 hcall_real_table_end:
2568 _GLOBAL(kvmppc_h_set_xdabr)
2569 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2571 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2574 6: li r3, H_PARAMETER
2577 _GLOBAL(kvmppc_h_set_dabr)
2578 li r5, DABRX_USER | DABRX_KERNEL
2582 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2583 std r4,VCPU_DABR(r3)
2584 stw r5, VCPU_DABRX(r3)
2585 mtspr SPRN_DABRX, r5
2586 /* Work around P7 bug where DABR can get corrupted on mtspr */
2587 1: mtspr SPRN_DABR,r4
2597 /* POWER9 with disabled DAWR */
2600 END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
2601 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2602 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2603 rlwimi r5, r4, 2, DAWRX_WT
2605 std r4, VCPU_DAWR(r3)
2606 std r5, VCPU_DAWRX(r3)
2608 mtspr SPRN_DAWRX, r5
2612 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2614 std r11,VCPU_MSR(r3)
2616 stb r0,VCPU_CEDED(r3)
2617 sync /* order setting ceded vs. testing prodded */
2618 lbz r5,VCPU_PRODDED(r3)
2620 bne kvm_cede_prodded
2621 li r12,0 /* set trap to 0 to say hcall is handled */
2622 stw r12,VCPU_TRAP(r3)
2624 std r0,VCPU_GPR(R3)(r3)
2627 * Set our bit in the bitmask of napping threads unless all the
2628 * other threads are already napping, in which case we send this
2631 ld r5,HSTATE_KVM_VCORE(r13)
2632 lbz r6,HSTATE_PTID(r13)
2633 lwz r8,VCORE_ENTRY_EXIT(r5)
2637 addi r6,r5,VCORE_NAPPING_THREADS
2644 /* order napping_threads update vs testing entry_exit_map */
2647 stb r0,HSTATE_NAPPING(r13)
2648 lwz r7,VCORE_ENTRY_EXIT(r5)
2650 bge 33f /* another thread already exiting */
2653 * Although not specifically required by the architecture, POWER7
2654 * preserves the following registers in nap mode, even if an SMT mode
2655 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2656 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2658 /* Save non-volatile GPRs */
2659 std r14, VCPU_GPR(R14)(r3)
2660 std r15, VCPU_GPR(R15)(r3)
2661 std r16, VCPU_GPR(R16)(r3)
2662 std r17, VCPU_GPR(R17)(r3)
2663 std r18, VCPU_GPR(R18)(r3)
2664 std r19, VCPU_GPR(R19)(r3)
2665 std r20, VCPU_GPR(R20)(r3)
2666 std r21, VCPU_GPR(R21)(r3)
2667 std r22, VCPU_GPR(R22)(r3)
2668 std r23, VCPU_GPR(R23)(r3)
2669 std r24, VCPU_GPR(R24)(r3)
2670 std r25, VCPU_GPR(R25)(r3)
2671 std r26, VCPU_GPR(R26)(r3)
2672 std r27, VCPU_GPR(R27)(r3)
2673 std r28, VCPU_GPR(R28)(r3)
2674 std r29, VCPU_GPR(R29)(r3)
2675 std r30, VCPU_GPR(R30)(r3)
2676 std r31, VCPU_GPR(R31)(r3)
2681 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2683 * Branch around the call if both CPU_FTR_TM and
2684 * CPU_FTR_P9_TM_HV_ASSIST are off.
2688 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2690 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2692 ld r3, HSTATE_KVM_VCPU(r13)
2694 bl kvmppc_save_tm_hv
2699 * Set DEC to the smaller of DEC and HDEC, so that we wake
2700 * no later than the end of our timeslice (HDEC interrupts
2701 * don't wake us from nap).
2707 /* On P9 check whether the guest has large decrementer mode enabled */
2708 ld r6, HSTATE_KVM_VCORE(r13)
2709 ld r6, VCORE_LPCR(r6)
2710 andis. r6, r6, LPCR_LD@h
2712 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2719 /* save expiry time of guest decrementer */
2721 ld r4, HSTATE_KVM_VCPU(r13)
2722 ld r5, HSTATE_KVM_VCORE(r13)
2723 ld r6, VCORE_TB_OFFSET_APPL(r5)
2724 subf r3, r6, r3 /* convert to host TB value */
2725 std r3, VCPU_DEC_EXPIRES(r4)
2727 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2728 ld r4, HSTATE_KVM_VCPU(r13)
2729 addi r3, r4, VCPU_TB_CEDE
2730 bl kvmhv_accumulate_time
2733 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2736 * Take a nap until a decrementer or external or doobell interrupt
2737 * occurs, with PECE1 and PECE0 set in LPCR.
2738 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2739 * Also clear the runlatch bit before napping.
2742 mfspr r0, SPRN_CTRLF
2744 mtspr SPRN_CTRLT, r0
2747 stb r0,HSTATE_HWTHREAD_REQ(r13)
2749 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2751 ori r5, r5, LPCR_PECEDH
2752 rlwimi r5, r3, 0, LPCR_PECEDP
2753 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2755 kvm_nap_sequence: /* desired LPCR value in r5 */
2758 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2759 * enable state loss = 1 (allow SMT mode switch)
2760 * requested level = 0 (just stop dispatching)
2762 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2763 mtspr SPRN_PSSCR, r3
2764 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2765 li r4, LPCR_PECE_HVEE@higher
2768 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2772 std r0, HSTATE_SCRATCH0(r13)
2774 ld r0, HSTATE_SCRATCH0(r13)
2781 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2790 /* get vcpu pointer */
2791 ld r4, HSTATE_KVM_VCPU(r13)
2793 /* Woken by external or decrementer interrupt */
2794 ld r1, HSTATE_HOST_R1(r13)
2796 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2797 addi r3, r4, VCPU_TB_RMINTR
2798 bl kvmhv_accumulate_time
2801 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2803 * Branch around the call if both CPU_FTR_TM and
2804 * CPU_FTR_P9_TM_HV_ASSIST are off.
2808 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2810 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2814 bl kvmppc_restore_tm_hv
2815 ld r4, HSTATE_KVM_VCPU(r13)
2819 /* load up FP state */
2822 /* Restore guest decrementer */
2823 ld r3, VCPU_DEC_EXPIRES(r4)
2824 ld r5, HSTATE_KVM_VCORE(r13)
2825 ld r6, VCORE_TB_OFFSET_APPL(r5)
2826 add r3, r3, r6 /* convert host TB to guest TB value */
2832 ld r14, VCPU_GPR(R14)(r4)
2833 ld r15, VCPU_GPR(R15)(r4)
2834 ld r16, VCPU_GPR(R16)(r4)
2835 ld r17, VCPU_GPR(R17)(r4)
2836 ld r18, VCPU_GPR(R18)(r4)
2837 ld r19, VCPU_GPR(R19)(r4)
2838 ld r20, VCPU_GPR(R20)(r4)
2839 ld r21, VCPU_GPR(R21)(r4)
2840 ld r22, VCPU_GPR(R22)(r4)
2841 ld r23, VCPU_GPR(R23)(r4)
2842 ld r24, VCPU_GPR(R24)(r4)
2843 ld r25, VCPU_GPR(R25)(r4)
2844 ld r26, VCPU_GPR(R26)(r4)
2845 ld r27, VCPU_GPR(R27)(r4)
2846 ld r28, VCPU_GPR(R28)(r4)
2847 ld r29, VCPU_GPR(R29)(r4)
2848 ld r30, VCPU_GPR(R30)(r4)
2849 ld r31, VCPU_GPR(R31)(r4)
2851 /* Check the wake reason in SRR1 to see why we got here */
2852 bl kvmppc_check_wake_reason
2855 * Restore volatile registers since we could have called a
2856 * C routine in kvmppc_check_wake_reason
2858 * r3 tells us whether we need to return to host or not
2859 * WARNING: it gets checked further down:
2860 * should not modify r3 until this check is done.
2862 ld r4, HSTATE_KVM_VCPU(r13)
2864 /* clear our bit in vcore->napping_threads */
2865 34: ld r5,HSTATE_KVM_VCORE(r13)
2866 lbz r7,HSTATE_PTID(r13)
2869 addi r6,r5,VCORE_NAPPING_THREADS
2875 stb r0,HSTATE_NAPPING(r13)
2877 /* See if the wake reason saved in r3 means we need to exit */
2878 stw r12, VCPU_TRAP(r4)
2883 /* see if any other thread is already exiting */
2884 lwz r0,VCORE_ENTRY_EXIT(r5)
2888 b kvmppc_cede_reentry /* if not go back to guest */
2890 /* cede when already previously prodded case */
2893 stb r0,VCPU_PRODDED(r3)
2894 sync /* order testing prodded vs. clearing ceded */
2895 stb r0,VCPU_CEDED(r3)
2899 /* we've ceded but we want to give control to the host */
2901 ld r9, HSTATE_KVM_VCPU(r13)
2902 #ifdef CONFIG_KVM_XICS
2903 /* Abort if we still have a pending escalation */
2904 lbz r5, VCPU_XIVE_ESC_ON(r9)
2908 stb r0, VCPU_CEDED(r9)
2909 1: /* Enable XIVE escalation */
2910 li r5, XIVE_ESB_SET_PQ_00
2912 andi. r0, r0, MSR_DR /* in real mode? */
2914 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2919 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2925 stb r0, VCPU_XIVE_ESC_ON(r9)
2926 #endif /* CONFIG_KVM_XICS */
2927 3: b guest_exit_cont
2929 /* Try to handle a machine check in real mode */
2930 machine_check_realmode:
2931 mr r3, r9 /* get vcpu pointer */
2932 bl kvmppc_realmode_machine_check
2934 ld r9, HSTATE_KVM_VCPU(r13)
2935 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2937 * For the guest that is FWNMI capable, deliver all the MCE errors
2938 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2939 * reason. This new approach injects machine check errors in guest
2940 * address space to guest with additional information in the form
2941 * of RTAS event, thus enabling guest kernel to suitably handle
2944 * For the guest that is not FWNMI capable (old QEMU) fallback
2945 * to old behaviour for backward compatibility:
2946 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2947 * through machine check interrupt (set HSRR0 to 0x200).
2948 * For handled errors (no-fatal), just go back to guest execution
2949 * with current HSRR0.
2950 * if we receive machine check with MSR(RI=0) then deliver it to
2951 * guest as machine check causing guest to crash.
2953 ld r11, VCPU_MSR(r9)
2954 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2955 bne mc_cont /* if so, exit to host */
2956 /* Check if guest is capable of handling NMI exit */
2957 ld r10, VCPU_KVM(r9)
2958 lbz r10, KVM_FWNMI(r10)
2959 cmpdi r10, 1 /* FWNMI capable? */
2960 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
2962 /* if not, fall through for backward compatibility. */
2963 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2964 beq 1f /* Deliver a machine check to guest */
2966 cmpdi r3, 0 /* Did we handle MCE ? */
2967 bne 2f /* Continue guest execution. */
2968 /* If not, deliver a machine check. SRR0/1 are already set */
2969 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2970 bl kvmppc_msr_interrupt
2971 2: b fast_interrupt_c_return
2974 * Check the reason we woke from nap, and take appropriate action.
2976 * 0 if nothing needs to be done
2977 * 1 if something happened that needs to be handled by the host
2978 * -1 if there was a guest wakeup (IPI or msgsnd)
2979 * -2 if we handled a PCI passthrough interrupt (returned by
2980 * kvmppc_read_intr only)
2982 * Also sets r12 to the interrupt vector for any interrupt that needs
2983 * to be handled now by the host (0x500 for external interrupt), or zero.
2984 * Modifies all volatile registers (since it may call a C function).
2985 * This routine calls kvmppc_read_intr, a C function, if an external
2986 * interrupt is pending.
2988 kvmppc_check_wake_reason:
2991 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2993 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2994 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2995 cmpwi r6, 8 /* was it an external interrupt? */
2996 beq 7f /* if so, see what it was */
2999 cmpwi r6, 6 /* was it the decrementer? */
3002 cmpwi r6, 5 /* privileged doorbell? */
3004 cmpwi r6, 3 /* hypervisor doorbell? */
3006 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3007 cmpwi r6, 0xa /* Hypervisor maintenance ? */
3009 li r3, 1 /* anything else, return 1 */
3012 /* hypervisor doorbell */
3013 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
3016 * Clear the doorbell as we will invoke the handler
3017 * explicitly in the guest exit path.
3019 lis r6, (PPC_DBELL_SERVER << (63-36))@h
3021 /* see if it's a host IPI */
3026 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3027 lbz r0, HSTATE_HOST_IPI(r13)
3030 /* if not, return -1 */
3034 /* Woken up due to Hypervisor maintenance interrupt */
3035 4: li r12, BOOK3S_INTERRUPT_HMI
3039 /* external interrupt - create a stack frame so we can call C */
3041 std r0, PPC_LR_STKOFF(r1)
3042 stdu r1, -PPC_MIN_STKFRM(r1)
3045 li r12, BOOK3S_INTERRUPT_EXTERNAL
3050 * Return code of 2 means PCI passthrough interrupt, but
3051 * we need to return back to host to complete handling the
3052 * interrupt. Trap reason is expected in r12 by guest
3055 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
3057 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3058 addi r1, r1, PPC_MIN_STKFRM
3063 * Save away FP, VMX and VSX registers.
3065 * N.B. r30 and r31 are volatile across this function,
3066 * thus it is not callable from C.
3073 #ifdef CONFIG_ALTIVEC
3075 oris r8,r8,MSR_VEC@h
3076 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3080 oris r8,r8,MSR_VSX@h
3081 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3084 addi r3,r3,VCPU_FPRS
3086 #ifdef CONFIG_ALTIVEC
3088 addi r3,r31,VCPU_VRS
3090 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3092 mfspr r6,SPRN_VRSAVE
3093 stw r6,VCPU_VRSAVE(r31)
3098 * Load up FP, VMX and VSX registers
3100 * N.B. r30 and r31 are volatile across this function,
3101 * thus it is not callable from C.
3108 #ifdef CONFIG_ALTIVEC
3110 oris r8,r8,MSR_VEC@h
3111 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3115 oris r8,r8,MSR_VSX@h
3116 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3119 addi r3,r4,VCPU_FPRS
3121 #ifdef CONFIG_ALTIVEC
3123 addi r3,r31,VCPU_VRS
3125 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3127 lwz r7,VCPU_VRSAVE(r31)
3128 mtspr SPRN_VRSAVE,r7
3133 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3135 * Save transactional state and TM-related registers.
3136 * Called with r3 pointing to the vcpu struct and r4 containing
3137 * the guest MSR value.
3138 * This can modify all checkpointed registers, but
3139 * restores r1 and r2 before exit.
3142 /* See if we need to handle fake suspend mode */
3145 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3147 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3149 beq __kvmppc_save_tm
3151 /* The following code handles the fake_suspend = 1 case */
3153 std r0, PPC_LR_STKOFF(r1)
3154 stdu r1, -PPC_MIN_STKFRM(r1)
3159 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3162 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3165 bl pnv_power9_force_smt4_catch
3166 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3169 std r1, HSTATE_HOST_R1(r13)
3171 /* Clear the MSR RI since r1, r13 may be foobar. */
3175 /* We have to treclaim here because that's the only way to do S->N */
3176 li r3, TM_CAUSE_KVM_RESCHED
3180 * We were in fake suspend, so we are not going to save the
3181 * register state as the guest checkpointed state (since
3182 * we already have it), therefore we can now use any volatile GPR.
3184 /* Reload PACA pointer, stack pointer and TOC. */
3186 ld r1, HSTATE_HOST_R1(r13)
3189 /* Set MSR RI now we have r1 and r13 back. */
3194 ld r6, HSTATE_DSCR(r13)
3196 BEGIN_FTR_SECTION_NESTED(96)
3197 bl pnv_power9_force_smt4_release
3198 END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
3202 mfspr r3, SPRN_PSSCR
3203 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3204 li r0, PSSCR_FAKE_SUSPEND
3206 mtspr SPRN_PSSCR, r3
3208 /* Don't save TEXASR, use value from last exit in real suspend state */
3209 ld r9, HSTATE_KVM_VCPU(r13)
3210 mfspr r5, SPRN_TFHAR
3211 mfspr r6, SPRN_TFIAR
3212 std r5, VCPU_TFHAR(r9)
3213 std r6, VCPU_TFIAR(r9)
3215 addi r1, r1, PPC_MIN_STKFRM
3216 ld r0, PPC_LR_STKOFF(r1)
3221 * Restore transactional state and TM-related registers.
3222 * Called with r3 pointing to the vcpu struct
3223 * and r4 containing the guest MSR value.
3224 * This potentially modifies all checkpointed registers.
3225 * It restores r1 and r2 from the PACA.
3227 kvmppc_restore_tm_hv:
3229 * If we are doing TM emulation for the guest on a POWER9 DD2,
3230 * then we don't actually do a trechkpt -- we either set up
3231 * fake-suspend mode, or emulate a TM rollback.
3234 b __kvmppc_restore_tm
3235 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3237 std r0, PPC_LR_STKOFF(r1)
3240 stb r0, HSTATE_FAKE_SUSPEND(r13)
3242 /* Turn on TM so we can restore TM SPRs */
3245 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3249 * The user may change these outside of a transaction, so they must
3250 * always be context switched.
3252 ld r5, VCPU_TFHAR(r3)
3253 ld r6, VCPU_TFIAR(r3)
3254 ld r7, VCPU_TEXASR(r3)
3255 mtspr SPRN_TFHAR, r5
3256 mtspr SPRN_TFIAR, r6
3257 mtspr SPRN_TEXASR, r7
3259 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3260 beqlr /* TM not active in guest */
3262 /* Make sure the failure summary is set */
3263 oris r7, r7, (TEXASR_FS)@h
3264 mtspr SPRN_TEXASR, r7
3266 cmpwi r5, 1 /* check for suspended state */
3268 stb r5, HSTATE_FAKE_SUSPEND(r13)
3269 b 9f /* and return */
3270 10: stdu r1, -PPC_MIN_STKFRM(r1)
3271 /* guest is in transactional state, so simulate rollback */
3272 bl kvmhv_emulate_tm_rollback
3274 addi r1, r1, PPC_MIN_STKFRM
3275 9: ld r0, PPC_LR_STKOFF(r1)
3278 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
3281 * We come here if we get any exception or interrupt while we are
3282 * executing host real mode code while in guest MMU context.
3283 * r12 is (CR << 32) | vector
3284 * r13 points to our PACA
3285 * r12 is saved in HSTATE_SCRATCH0(r13)
3286 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3287 * r9 is saved in HSTATE_SCRATCH2(r13)
3288 * r13 is saved in HSPRG1
3289 * cfar is saved in HSTATE_CFAR(r13)
3290 * ppr is saved in HSTATE_PPR(r13)
3292 kvmppc_bad_host_intr:
3294 * Switch to the emergency stack, but start half-way down in
3295 * case we were already on it.
3299 ld r1, PACAEMERGSP(r13)
3300 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3313 mfspr r3, SPRN_HSRR0
3314 mfspr r4, SPRN_HSRR1
3316 mfspr r6, SPRN_HDSISR
3318 1: mfspr r3, SPRN_SRR0
3321 mfspr r6, SPRN_DSISR
3326 ld r9, HSTATE_SCRATCH2(r13)
3327 ld r12, HSTATE_SCRATCH0(r13)
3332 ld r5, HSTATE_CFAR(r13)
3333 std r5, ORIG_GPR3(r1)
3335 #ifdef CONFIG_RELOCATABLE
3336 ld r4, HSTATE_SCRATCH1(r13)
3341 lbz r6, PACAIRQSOFTMASK(r13)
3347 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3348 std r3, STACK_FRAME_OVERHEAD-16(r1)
3351 * On POWER9 do a minimal restore of the MMU and call C code,
3352 * which will print a message and panic.
3353 * XXX On POWER7 and POWER8, we just spin here since we don't
3354 * know what the other threads are doing (and we don't want to
3355 * coordinate with them) - but at least we now have register state
3356 * in memory that we might be able to look at from another CPU.
3360 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3361 ld r9, HSTATE_KVM_VCPU(r13)
3362 ld r10, VCPU_KVM(r9)
3367 mtspr SPRN_CIABR, r0
3368 mtspr SPRN_DAWRX, r0
3370 /* Flush the ERAT on radix P9 DD1 guest exit */
3373 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
3375 BEGIN_MMU_FTR_SECTION
3377 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3382 ld r8, PACA_SLBSHADOWPTR(r13)
3383 .rept SLB_NUM_BOLTED
3384 li r3, SLBSHADOW_SAVEAREA
3388 andis. r7, r5, SLB_ESID_V@h
3394 4: lwz r7, KVM_HOST_LPID(r10)
3397 ld r8, KVM_HOST_LPCR(r10)
3400 li r0, KVM_GUEST_MODE_NONE
3401 stb r0, HSTATE_IN_GUEST(r13)
3404 * Turn on the MMU and jump to C code
3408 addi r3, r3, 9f - 5b
3410 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
3411 ld r4, PACAKMSR(r13)
3415 9: addi r3, r1, STACK_FRAME_OVERHEAD
3416 bl kvmppc_bad_interrupt
3420 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3421 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3422 * r11 has the guest MSR value (in/out)
3423 * r9 has a vcpu pointer (in)
3424 * r0 is used as a scratch register
3426 kvmppc_msr_interrupt:
3427 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3428 cmpwi r0, 2 /* Check if we are in transactional state.. */
3429 ld r11, VCPU_INTR_MSR(r9)
3431 /* ... if transactional, change to suspended */
3433 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3437 * This works around a hardware bug on POWER8E processors, where
3438 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3439 * performance monitor interrupt. Instead, when we need to have
3440 * an interrupt pending, we have to arrange for a counter to overflow.
3444 mtspr SPRN_MMCR2, r3
3445 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3446 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3447 mtspr SPRN_MMCR0, r3
3454 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3456 * Start timing an activity
3457 * r3 = pointer to time accumulation struct, r4 = vcpu
3460 ld r5, HSTATE_KVM_VCORE(r13)
3461 ld r6, VCORE_TB_OFFSET_APPL(r5)
3463 subf r5, r6, r5 /* subtract current timebase offset */
3464 std r3, VCPU_CUR_ACTIVITY(r4)
3465 std r5, VCPU_ACTIVITY_START(r4)
3469 * Accumulate time to one activity and start another.
3470 * r3 = pointer to new time accumulation struct, r4 = vcpu
3472 kvmhv_accumulate_time:
3473 ld r5, HSTATE_KVM_VCORE(r13)
3474 ld r8, VCORE_TB_OFFSET_APPL(r5)
3475 ld r5, VCPU_CUR_ACTIVITY(r4)
3476 ld r6, VCPU_ACTIVITY_START(r4)
3477 std r3, VCPU_CUR_ACTIVITY(r4)
3479 subf r7, r8, r7 /* subtract current timebase offset */
3480 std r7, VCPU_ACTIVITY_START(r4)
3484 ld r8, TAS_SEQCOUNT(r5)
3487 std r8, TAS_SEQCOUNT(r5)
3489 ld r7, TAS_TOTAL(r5)
3491 std r7, TAS_TOTAL(r5)
3497 3: std r3, TAS_MIN(r5)
3503 std r8, TAS_SEQCOUNT(r5)