KVM: PPC: Permit SRR1 flags in more injected interrupt types
authorNicholas Piggin <npiggin@gmail.com>
Thu, 30 Mar 2023 10:32:23 +0000 (20:32 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 3 Apr 2023 04:54:44 +0000 (14:54 +1000)
The prefix architecture in ISA v3.1 introduces a prefixed bit in SRR1
for many types of synchronous interrupts which is set when the interrupt
is caused by a prefixed instruction.

This requires KVM to be able to set this bit when injecting interrupts
into a guest. Plumb through the SRR1 "flags" argument to the core_queue
APIs where it's missing for this. For now they are set to 0, which is
no change.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
[mpe: Fixup kvmppc_core_queue_alignment() in booke.c]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230330103224.3589928-2-npiggin@gmail.com
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_nested.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/emulate_loadstore.c
arch/powerpc/kvm/powerpc.c

index 6bef23d6d0e3571907174f394834de09d81be345..23ea02b06ffa6f69b245172ffa9b05e3cc4a15f3 100644 (file)
@@ -126,25 +126,34 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
 
 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
+
+extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu,
+                                           ulong srr1_flags);
 extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
-extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu,
+                                     ulong srr1_flags);
+extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu,
+                                       ulong srr1_flags);
+extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu,
+                                         ulong srr1_flags);
+extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu,
+                                         ulong srr1_flags);
 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
                                        struct kvm_interrupt *irq);
 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
+extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
+                                       ulong dear_flags,
                                        ulong esr_flags);
 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
-                                          ulong dear_flags,
-                                          ulong esr_flags);
+                                          ulong srr1_flags,
+                                          ulong dar,
+                                          ulong dsisr);
 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
-                                          ulong esr_flags);
+                                          ulong srr1_flags);
+
 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
 
index 57f4e7896d671a5affe7dd01a431ba10d825a4c1..fa6ac24f328044c420b8bb27894d4e54d1b29dff 100644 (file)
@@ -188,10 +188,10 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
 }
 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
 
-void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
+void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags)
 {
        /* might as well deliver this straight away */
-       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
+       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags);
 }
 EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
 
@@ -201,29 +201,29 @@ void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL(kvmppc_core_queue_syscall);
 
-void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
+void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags)
 {
        /* might as well deliver this straight away */
-       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
+       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags);
 }
 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
 
-void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
 {
        /* might as well deliver this straight away */
-       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
+       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags);
 }
 
-void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
 {
        /* might as well deliver this straight away */
-       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
+       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags);
 }
 
-void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
 {
        /* might as well deliver this straight away */
-       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
+       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags);
 }
 
 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
@@ -278,18 +278,18 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
        kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
 }
 
-void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
-                                   ulong flags)
+void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
+                                   ulong dar, ulong dsisr)
 {
        kvmppc_set_dar(vcpu, dar);
-       kvmppc_set_dsisr(vcpu, flags);
-       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
+       kvmppc_set_dsisr(vcpu, dsisr);
+       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags);
 }
 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
 
-void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
+void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags)
 {
-       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
+       kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags);
 }
 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
 
index 9d3743ca16d53e6baf9083bee97cd58101acd695..215a6b5ba104cbf92560824bdb578de1a6210985 100644 (file)
@@ -954,7 +954,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
        if (dsisr & DSISR_BADACCESS) {
                /* Reflect to the guest as DSI */
                pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
-               kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
+               kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr);
                return RESUME_GUEST;
        }
 
@@ -979,7 +979,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
                         * Bad address in guest page table tree, or other
                         * unusual error - reflect it to the guest as DSI.
                         */
-                       kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
+                       kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr);
                        return RESUME_GUEST;
                }
                return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
@@ -988,8 +988,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
        if (memslot->flags & KVM_MEM_READONLY) {
                if (writing) {
                        /* give the guest a DSI */
-                       kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
-                                                      DSISR_PROTFAULT);
+                       kvmppc_core_queue_data_storage(vcpu, 0, ea,
+                                       DSISR_ISSTORE | DSISR_PROTFAULT);
                        return RESUME_GUEST;
                }
                kvm_ro = true;
index 6ba68dd6190bd30a4ab31c417df2e909077d62a5..38c6b33d759e2cd571b7b6bf59e314221c542980 100644 (file)
@@ -1739,7 +1739,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                }
 
                if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
-                       kvmppc_core_queue_data_storage(vcpu,
+                       kvmppc_core_queue_data_storage(vcpu, 0,
                                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
                        r = RESUME_GUEST;
                        break;
@@ -1757,7 +1757,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                } else if (err == -1 || err == -2) {
                        r = RESUME_PAGE_FAULT;
                } else {
-                       kvmppc_core_queue_data_storage(vcpu,
+                       kvmppc_core_queue_data_storage(vcpu, 0,
                                vcpu->arch.fault_dar, err);
                        r = RESUME_GUEST;
                }
index 5a64a1341e6f1de1e7505b4b54468697357faf19..2c9db6119d89180aed087a5b8676e18b8b5e480c 100644 (file)
@@ -1560,7 +1560,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
        if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
                if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
                        /* unusual error -> reflect to the guest as a DSI */
-                       kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
+                       kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr);
                        return RESUME_GUEST;
                }
 
@@ -1570,7 +1570,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
        if (memslot->flags & KVM_MEM_READONLY) {
                if (writing) {
                        /* Give the guest a DSI */
-                       kvmppc_core_queue_data_storage(vcpu, ea,
+                       kvmppc_core_queue_data_storage(vcpu, 0, ea,
                                        DSISR_ISSTORE | DSISR_PROTFAULT);
                        return RESUME_GUEST;
                }
index 9fc4dd8f66ebc365b1e211c880f298d90266167e..fdbc88a4c0563bec3dc214bbe1854e8c7913a572 100644 (file)
@@ -759,7 +759,7 @@ static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
                        flags = DSISR_NOHPTE;
                if (data) {
                        flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
-                       kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
+                       kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags);
                } else {
                        kvmppc_core_queue_inst_storage(vcpu, flags);
                }
@@ -1236,7 +1236,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
                        r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
                        srcu_read_unlock(&vcpu->kvm->srcu, idx);
                } else {
-                       kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
+                       kvmppc_core_queue_data_storage(vcpu, 0, dar, fault_dsisr);
                        r = RESUME_GUEST;
                }
                break;
index a9c04073d27e0d7f216ed9182876cad2f7d07a96..8a9a0e112fc5aa6719afbe4566f3dcc1355bf733 100644 (file)
@@ -283,9 +283,10 @@ void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
 }
 
-void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
+void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
                                    ulong dear_flags, ulong esr_flags)
 {
+       WARN_ON_ONCE(srr1_flags);
        vcpu->arch.queued_dear = dear_flags;
        vcpu->arch.queued_esr = esr_flags;
        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
@@ -316,14 +317,16 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
 }
 
-void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
 {
+       WARN_ON_ONCE(srr1_flags);
        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
 }
 
 #ifdef CONFIG_ALTIVEC
-void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
 {
+       WARN_ON_ONCE(srr1_flags);
        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
 }
 #endif
@@ -1225,7 +1228,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 #endif
 
        case BOOKE_INTERRUPT_DATA_STORAGE:
-               kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
+               kvmppc_core_queue_data_storage(vcpu, 0, vcpu->arch.fault_dear,
                                               vcpu->arch.fault_esr);
                kvmppc_account_exit(vcpu, DSI_EXITS);
                r = RESUME_GUEST;
index cfc9114b87d0d591e8e6ee5abd935ed1330bcffd..e324a174b5857456f29a324fae560f1c21c38e25 100644 (file)
@@ -28,7 +28,7 @@
 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
 {
        if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
-               kvmppc_core_queue_fpunavail(vcpu);
+               kvmppc_core_queue_fpunavail(vcpu, 0);
                return true;
        }
 
@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
 {
        if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
-               kvmppc_core_queue_vsx_unavail(vcpu);
+               kvmppc_core_queue_vsx_unavail(vcpu, 0);
                return true;
        }
 
@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
 {
        if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
-               kvmppc_core_queue_vec_unavail(vcpu);
+               kvmppc_core_queue_vec_unavail(vcpu, 0);
                return true;
        }
 
index 4c5405fc55387028e33e26f83fac20c99a2e5316..f9d9e0d1ab2368263a252be2232cd3f9fbaf5000 100644 (file)
@@ -321,7 +321,8 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
                        if (vcpu->mmio_is_write)
                                dsisr |= DSISR_ISSTORE;
 
-                       kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
+                       kvmppc_core_queue_data_storage(vcpu, 0,
+                                       vcpu->arch.vaddr_accessed, dsisr);
                } else {
                        /*
                         * BookE does not send a SIGBUS on a bad