Revert "KVM: MMU: zap pages in batch"
authorSean Christopherson <sean.j.christopherson@intel.com>
Tue, 5 Feb 2019 21:01:28 +0000 (13:01 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 20 Feb 2019 21:48:43 +0000 (22:48 +0100)
Unwinding optimizations related to obsolete pages is a step towards
removing x86 KVM's fast invalidate mechanism, i.e. this is one part of
a revert all patches from the series that introduced the mechanism[1].

This reverts commit e7d11c7a894986a13817c1c001e1e7668c5c4eb4.

[1] https://lkml.kernel.org/r/1369960590-14138-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com

Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index e733262027eda71150b1e0b69da91e12226a0236..cb9fd69d26326c7a3ab8036ffbf7c077fc49054e 100644 (file)
@@ -5846,18 +5846,14 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
 
-#define BATCH_ZAP_PAGES        10
 static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
        LIST_HEAD(invalid_list);
-       int batch = 0;
 
 restart:
        list_for_each_entry_safe_reverse(sp, node,
              &kvm->arch.active_mmu_pages, link) {
-               int ret;
-
                /*
                 * No obsolete page exists before new created page since
                 * active_mmu_pages is the FIFO list.
@@ -5866,6 +5862,28 @@ restart:
                        break;
 
                /*
+                * Do not repeatedly zap a root page to avoid unnecessary
+                * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
+                * progress:
+                *    vcpu 0                        vcpu 1
+                *                         call vcpu_enter_guest():
+                *                            1): handle KVM_REQ_MMU_RELOAD
+                *                                and require mmu-lock to
+                *                                load mmu
+                * repeat:
+                *    1): zap root page and
+                *        send KVM_REQ_MMU_RELOAD
+                *
+                *    2): if (cond_resched_lock(mmu-lock))
+                *
+                *                            2): hold mmu-lock and load mmu
+                *
+                *                            3): see KVM_REQ_MMU_RELOAD bit
+                *                                on vcpu->requests is set
+                *                                then return 1 to call
+                *                                vcpu_enter_guest() again.
+                *            goto repeat;
+                *
                 * Since we are reversely walking the list and the invalid
                 * list will be moved to the head, skip the invalid page
                 * can help us to avoid the infinity list walking.
@@ -5873,18 +5891,13 @@ restart:
                if (sp->role.invalid)
                        continue;
 
-               if (batch >= BATCH_ZAP_PAGES &&
-                     (need_resched() || spin_needbreak(&kvm->mmu_lock))) {
-                       batch = 0;
+               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
                        kvm_mmu_commit_zap_page(kvm, &invalid_list);
                        cond_resched_lock(&kvm->mmu_lock);
                        goto restart;
                }
 
-               ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
-               batch += ret;
-
-               if (ret)
+               if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
                        goto restart;
        }