Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / arch / x86 / kvm / mmu / paging_tmpl.h
index 6ecdd7a41a826758d39ee7c5c2403c59c4c9890a..f5958071220c9aef44e1c011487b6cce47209fb3 100644 (file)
@@ -378,7 +378,7 @@ retry_walk:
                 * information to fix the exit_qualification or exit_info_1
                 * fields.
                 */
-               if (unlikely(real_gpa == UNMAPPED_GVA))
+               if (unlikely(real_gpa == INVALID_GPA))
                        return 0;
 
                host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
@@ -431,7 +431,7 @@ retry_walk:
 #endif
 
        real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
-       if (real_gpa == UNMAPPED_GVA)
+       if (real_gpa == INVALID_GPA)
                return 0;
 
        walker->gfn = real_gpa >> PAGE_SHIFT;
@@ -595,7 +595,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
        if (sp->role.direct)
                return __direct_pte_prefetch(vcpu, sp, sptep);
 
-       i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
+       i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
        spte = sp->spt + i;
 
        for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
@@ -648,15 +648,13 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
                gfn_t table_gfn;
 
                clear_sp_write_flooding_count(it.sptep);
-               drop_large_spte(vcpu, it.sptep);
 
-               sp = NULL;
-               if (!is_shadow_present_pte(*it.sptep)) {
-                       table_gfn = gw->table_gfn[it.level - 2];
-                       access = gw->pt_access[it.level - 2];
-                       sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
-                                                 false, access);
+               table_gfn = gw->table_gfn[it.level - 2];
+               access = gw->pt_access[it.level - 2];
+               sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
+                                         false, access);
 
+               if (sp != ERR_PTR(-EEXIST)) {
                        /*
                         * We must synchronize the pagetable before linking it
                         * because the guest doesn't need to flush tlb when
@@ -685,7 +683,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
                if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
                        goto out_gpte_changed;
 
-               if (sp)
+               if (sp != ERR_PTR(-EEXIST))
                        link_shadow_page(vcpu, it.sptep, sp);
        }
 
@@ -709,16 +707,15 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
 
                validate_direct_spte(vcpu, it.sptep, direct_access);
 
-               drop_large_spte(vcpu, it.sptep);
+               sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,
+                                         true, direct_access);
+               if (sp == ERR_PTR(-EEXIST))
+                       continue;
 
-               if (!is_shadow_present_pte(*it.sptep)) {
-                       sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,
-                                                 true, direct_access);
-                       link_shadow_page(vcpu, it.sptep, sp);
-                       if (fault->huge_page_disallowed &&
-                           fault->req_level >= it.level)
-                               account_huge_nx_page(vcpu->kvm, sp);
-               }
+               link_shadow_page(vcpu, it.sptep, sp);
+               if (fault->huge_page_disallowed &&
+                   fault->req_level >= it.level)
+                       account_huge_nx_page(vcpu->kvm, sp);
        }
 
        if (WARN_ON_ONCE(it.level != fault->goal_level))
@@ -936,7 +933,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
                                break;
 
                        pte_gpa = FNAME(get_level1_sp_gpa)(sp);
-                       pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
+                       pte_gpa += spte_index(sptep) * sizeof(pt_element_t);
 
                        mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL);
                        if (is_shadow_present_pte(old_spte))
@@ -965,7 +962,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                               struct x86_exception *exception)
 {
        struct guest_walker walker;
-       gpa_t gpa = UNMAPPED_GVA;
+       gpa_t gpa = INVALID_GPA;
        int r;
 
 #ifndef CONFIG_X86_64
@@ -985,7 +982,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 }
 
 /*
- * Using the cached information from sp->gfns is safe because:
+ * Using the information in sp->shadowed_translation (kvm_mmu_page_get_gfn()) is
+ * safe because:
  * - The spte has a reference to the struct page, so the pfn for a given gfn
  *   can't change unless all sptes pointing to it are nuked first.
  *
@@ -1067,12 +1065,16 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                 * "present" bit, as all other paging modes will create a
                 * read-only SPTE if pte_access is zero.
                 */
-               if ((!pte_access && !shadow_present_mask) || gfn != sp->gfns[i]) {
+               if ((!pte_access && !shadow_present_mask) ||
+                   gfn != kvm_mmu_page_get_gfn(sp, i)) {
                        drop_spte(vcpu->kvm, &sp->spt[i]);
                        flush = true;
                        continue;
                }
 
+               /* Update the shadowed access bits in case they changed. */
+               kvm_mmu_page_set_access(sp, i, pte_access);
+
                sptep = &sp->spt[i];
                spte = *sptep;
                host_writable = spte & shadow_host_writable_mask;