KVM: MMU: fix broken page accessed tracking with ept enabled
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Fri, 16 Jul 2010 03:23:04 +0000 (11:23 +0800)
committerAvi Kivity <avi@redhat.com>
Mon, 2 Aug 2010 03:40:57 +0000 (06:40 +0300)
In current code, if ept is enabled(shadow_accessed_mask = 0), the page
accessed tracking is lost.

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index d2ea9cabc066a2b4b2af7358fde2a105cca966f2..9b3b916ebeaed423659c4bfffe3a47f6f7f564a5 100644 (file)
@@ -687,7 +687,7 @@ static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
        if (!is_rmap_spte(old_spte))
                return;
        pfn = spte_to_pfn(old_spte);
-       if (old_spte & shadow_accessed_mask)
+       if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
        if (is_writable_pte(old_spte))
                kvm_set_pfn_dirty(pfn);
@@ -815,7 +815,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
                                kvm_set_pfn_dirty(spte_to_pfn(*spte));
                        old_spte = __xchg_spte(spte, new_spte);
                        if (is_shadow_present_pte(old_spte)
-                           && (old_spte & shadow_accessed_mask))
+                           && (!shadow_accessed_mask ||
+                           old_spte & shadow_accessed_mask))
                                mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
                        spte = rmap_next(kvm, rmapp, spte);
                }