Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / arch / x86 / kvm / mmu / mmu.c
index 7c9fce512625b35fe606602412c9334ecdc51cbe..992e651540e8523aadbd15f2cb8dc748850c6a49 100644 (file)
@@ -3575,10 +3575,14 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
        if (WARN_ON_ONCE(!sp))
                return;
 
-       if (is_tdp_mmu_page(sp))
+       if (is_tdp_mmu_page(sp)) {
+               lockdep_assert_held_read(&kvm->mmu_lock);
                kvm_tdp_mmu_put_root(kvm, sp);
-       else if (!--sp->root_count && sp->role.invalid)
-               kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+       } else {
+               lockdep_assert_held_write(&kvm->mmu_lock);
+               if (!--sp->root_count && sp->role.invalid)
+                       kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+       }
 
        *root_hpa = INVALID_PAGE;
 }
@@ -3587,6 +3591,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
                        ulong roots_to_free)
 {
+       bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
        int i;
        LIST_HEAD(invalid_list);
        bool free_active_root;
@@ -3609,7 +3614,10 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
                        return;
        }
 
-       write_lock(&kvm->mmu_lock);
+       if (is_tdp_mmu)
+               read_lock(&kvm->mmu_lock);
+       else
+               write_lock(&kvm->mmu_lock);
 
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
@@ -3635,8 +3643,13 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
                mmu->root.pgd = 0;
        }
 
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
-       write_unlock(&kvm->mmu_lock);
+       if (is_tdp_mmu) {
+               read_unlock(&kvm->mmu_lock);
+               WARN_ON_ONCE(!list_empty(&invalid_list));
+       } else {
+               kvm_mmu_commit_zap_page(kvm, &invalid_list);
+               write_unlock(&kvm->mmu_lock);
+       }
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
 
@@ -3693,15 +3706,15 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
        unsigned i;
        int r;
 
+       if (tdp_mmu_enabled)
+               return kvm_tdp_mmu_alloc_root(vcpu);
+
        write_lock(&vcpu->kvm->mmu_lock);
        r = make_mmu_pages_available(vcpu);
        if (r < 0)
                goto out_unlock;
 
-       if (tdp_mmu_enabled) {
-               root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
-               mmu->root.hpa = root;
-       } else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
+       if (shadow_root_level >= PT64_ROOT_4LEVEL) {
                root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
                mmu->root.hpa = root;
        } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
@@ -7039,9 +7052,7 @@ int kvm_mmu_vendor_module_init(void)
 
        kvm_mmu_reset_all_pte_masks();
 
-       pte_list_desc_cache = kmem_cache_create("pte_list_desc",
-                                           sizeof(struct pte_list_desc),
-                                           0, SLAB_ACCOUNT, NULL);
+       pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT);
        if (!pte_list_desc_cache)
                goto out;