KVM: PPC: Book3S HV: Use hypercalls for TLB invalidation when nested
authorPaul Mackerras <paulus@ozlabs.org>
Mon, 8 Oct 2018 05:31:10 +0000 (16:31 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 9 Oct 2018 05:04:27 +0000 (16:04 +1100)
This adds code to call the H_TLB_INVALIDATE hypercall when running as
a guest, in the cases where we need to invalidate TLBs (or other MMU
caches) as part of managing the mappings for a nested guest.  Calling
H_TLB_INVALIDATE lets the nested hypervisor inform the parent
hypervisor about changes to partition-scoped page tables or the
partition table without needing to do hypervisor-privileged tlbie
instructions.

Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_hv_nested.c

index c2a9146ee016a6e446212bd2ff1c44df1df523ce..719b317235671cc341e080f6f0a4b487c463e5dc 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/bitops.h>
 #include <asm/book3s/64/mmu-hash.h>
 #include <asm/cpu_has_feature.h>
+#include <asm/ppc-opcode.h>
 
 #ifdef CONFIG_PPC_PSERIES
 static inline bool kvmhv_on_pseries(void)
@@ -117,6 +118,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
                                          bool create);
 void kvmhv_put_nested(struct kvm_nested_guest *gp);
 
+/* Encoding of first parameter for H_TLB_INVALIDATE */
+#define H_TLBIE_P1_ENC(ric, prs, r)    (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+                                        ___PPC_R(r))
+
 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
 #define PPC_MIN_HPT_ORDER      18
 #define PPC_MAX_HPT_ORDER      46
index 4c1eccb201908b497c588b9137537196a621107b..ae0e3edd94bc327b5a7b94e2b5f2a8c371e6dc62 100644 (file)
@@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
                                    unsigned int pshift, unsigned int lpid)
 {
        unsigned long psize = PAGE_SIZE;
+       int psi;
+       long rc;
+       unsigned long rb;
 
        if (pshift)
                psize = 1UL << pshift;
+       else
+               pshift = PAGE_SHIFT;
 
        addr &= ~(psize - 1);
-       radix__flush_tlb_lpid_page(lpid, addr, psize);
+
+       if (!kvmhv_on_pseries()) {
+               radix__flush_tlb_lpid_page(lpid, addr, psize);
+               return;
+       }
+
+       psi = shift_to_mmu_psize(pshift);
+       rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
+       rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
+                               lpid, rb);
+       if (rc)
+               pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
 }
 
 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
 {
-       radix__flush_pwc_lpid(lpid);
+       long rc;
+
+       if (!kvmhv_on_pseries()) {
+               radix__flush_pwc_lpid(lpid);
+               return;
+       }
+
+       rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
+                               lpid, TLBIEL_INVAL_SET_LPID);
+       if (rc)
+               pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
 }
 
 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
index c83c13d707e2d369c11c8e97aeba670d8e7810f9..486d9007c288c1b0f2bfa7f3989b5c4987cad8bc 100644 (file)
@@ -299,14 +299,32 @@ void kvmhv_nested_exit(void)
        }
 }
 
+static void kvmhv_flush_lpid(unsigned int lpid)
+{
+       long rc;
+
+       if (!kvmhv_on_pseries()) {
+               radix__flush_tlb_lpid(lpid);
+               return;
+       }
+
+       rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
+                               lpid, TLBIEL_INVAL_SET_LPID);
+       if (rc)
+               pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
+}
+
 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
 {
-       if (cpu_has_feature(CPU_FTR_HVMODE)) {
+       if (!kvmhv_on_pseries()) {
                mmu_partition_table_set_entry(lpid, dw0, dw1);
-       } else {
-               pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
-               pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+               return;
        }
+
+       pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+       pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+       /* L0 will do the necessary barriers */
+       kvmhv_flush_lpid(lpid);
 }
 
 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
@@ -493,7 +511,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
        spin_lock(&kvm->mmu_lock);
        kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
        spin_unlock(&kvm->mmu_lock);
-       radix__flush_tlb_lpid(gp->shadow_lpid);
+       kvmhv_flush_lpid(gp->shadow_lpid);
        kvmhv_update_ptbl_cache(gp);
        if (gp->l1_gr_to_hr == 0)
                kvmhv_remove_nested(gp);
@@ -777,7 +795,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
                spin_lock(&kvm->mmu_lock);
                kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
                                          gp->shadow_lpid);
-               radix__flush_tlb_lpid(gp->shadow_lpid);
+               kvmhv_flush_lpid(gp->shadow_lpid);
                spin_unlock(&kvm->mmu_lock);
                break;
        case 1: