KVM: MMU: do not write-protect large mappings
authorMarcelo Tosatti <mtosatti@redhat.com>
Tue, 23 Sep 2008 16:18:32 +0000 (13:18 -0300)
committerAvi Kivity <avi@redhat.com>
Wed, 15 Oct 2008 12:25:18 +0000 (14:25 +0200)
There is not much point in write protecting large mappings. This
can only happen when a page is shadowed during the window between
is_largepage_backed and mmu_lock acquision. Zap the entry instead, so
the next pagefault will find a shadowed page via is_largepage_backed and
fallback to 4k translations.

Simplifies out of sync shadow.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index 23752ef0839cdb123b3e82212587233792411be7..731e6fe9cb078bea1e2655d03f70c5ed81850b77 100644 (file)
@@ -1180,11 +1180,16 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
            || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
                struct kvm_mmu_page *shadow;
 
+               if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
+                       ret = 1;
+                       spte = shadow_trap_nonpresent_pte;
+                       goto set_pte;
+               }
+
                spte |= PT_WRITABLE_MASK;
 
                shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
-               if (shadow ||
-                  (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
+               if (shadow) {
                        pgprintk("%s: found shadow page for %lx, marking ro\n",
                                 __func__, gfn);
                        ret = 1;
@@ -1197,6 +1202,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
        if (pte_access & ACC_WRITE_MASK)
                mark_page_dirty(vcpu->kvm, gfn);
 
+set_pte:
        set_shadow_pte(shadow_pte, spte);
        return ret;
 }