KVM: PPC: Book3S HV: Fix race between kvm_unmap_hva_range and MMU mode switch
authorPaul Mackerras <paulus@ozlabs.org>
Fri, 16 Nov 2018 10:28:18 +0000 (21:28 +1100)
committerPaul Mackerras <paulus@ozlabs.org>
Fri, 14 Dec 2018 04:33:15 +0000 (15:33 +1100)
Testing has revealed an occasional crash which appears to be caused
by a race between kvmppc_switch_mmu_to_hpt and kvm_unmap_hva_range_hv.
The symptom is a NULL pointer dereference in __find_linux_pte() called
from kvm_unmap_radix() with kvm->arch.pgtable == NULL.

Looking at kvmppc_switch_mmu_to_hpt(), it does indeed clear
kvm->arch.pgtable (via kvmppc_free_radix()) before setting
kvm->arch.radix to NULL, and there is nothing to prevent
kvm_unmap_hva_range_hv() or the other MMU callback functions from
being called concurrently with kvmppc_switch_mmu_to_hpt() or
kvmppc_switch_mmu_to_radix().

This patch therefore adds calls to spin_lock/unlock on the kvm->mmu_lock
around the assignments to kvm->arch.radix, and makes sure that the
partition-scoped radix tree or HPT is only freed after changing
kvm->arch.radix.

This also takes the kvm->mmu_lock in kvmppc_rmap_reset() to make sure
that the clearing of each rmap array (one per memslot) doesn't happen
concurrently with use of the array in the kvm_unmap_hva_range_hv()
or the other MMU callbacks.

Fixes: 18c3640cefc7 ("KVM: PPC: Book3S HV: Add infrastructure for running HPT guests on radix host")
Cc: stable@vger.kernel.org # v4.15+
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c

index c615617e78acc050d7221599c5311a319fabc62d..a18afda3d0f0bedeb9c74f7a749e88f2f5a6f87f 100644 (file)
@@ -743,12 +743,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
        srcu_idx = srcu_read_lock(&kvm->srcu);
        slots = kvm_memslots(kvm);
        kvm_for_each_memslot(memslot, slots) {
+               /* Mutual exclusion with kvm_unmap_hva_range etc. */
+               spin_lock(&kvm->mmu_lock);
                /*
                 * This assumes it is acceptable to lose reference and
                 * change bits across a reset.
                 */
                memset(memslot->arch.rmap, 0,
                       memslot->npages * sizeof(*memslot->arch.rmap));
+               spin_unlock(&kvm->mmu_lock);
        }
        srcu_read_unlock(&kvm->srcu, srcu_idx);
 }
index a56f8413758ab1d796328093eb754d3b6715f4eb..ab43306c4ea13ef6c10ce1c950b19e84b8f3e83c 100644 (file)
@@ -4532,12 +4532,15 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
 {
        if (nesting_enabled(kvm))
                kvmhv_release_all_nested(kvm);
+       kvmppc_rmap_reset(kvm);
+       kvm->arch.process_table = 0;
+       /* Mutual exclusion with kvm_unmap_hva_range etc. */
+       spin_lock(&kvm->mmu_lock);
+       kvm->arch.radix = 0;
+       spin_unlock(&kvm->mmu_lock);
        kvmppc_free_radix(kvm);
        kvmppc_update_lpcr(kvm, LPCR_VPM1,
                           LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
-       kvmppc_rmap_reset(kvm);
-       kvm->arch.radix = 0;
-       kvm->arch.process_table = 0;
        return 0;
 }
 
@@ -4549,12 +4552,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
        err = kvmppc_init_vm_radix(kvm);
        if (err)
                return err;
-
+       kvmppc_rmap_reset(kvm);
+       /* Mutual exclusion with kvm_unmap_hva_range etc. */
+       spin_lock(&kvm->mmu_lock);
+       kvm->arch.radix = 1;
+       spin_unlock(&kvm->mmu_lock);
        kvmppc_free_hpt(&kvm->arch.hpt);
        kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
                           LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
-       kvmppc_rmap_reset(kvm);
-       kvm->arch.radix = 1;
        return 0;
 }