drm/i915/gvt: move write protect handler out of mmio emulation function
authorZhenyu Wang <zhenyuw@linux.intel.com>
Mon, 18 Dec 2017 03:58:46 +0000 (11:58 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Fri, 22 Dec 2017 08:33:50 +0000 (16:33 +0800)
It's a bit confusing that page write protect handler is live in
mmio emulation handler. This moves it to stand alone gvt ops.

Also remove unnecessary check of write protected page access
in mmio read handler and cleanup handling of failsafe case.

v2: rebase

Reviewed-by: Xiong Zhang <xiong.y.zhang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c

index 8bfac4ed24e6207cbb8ad2f019e8cc8f7d19e473..c4f752eeadcc8e79453014e8c1829a66a456038c 100644 (file)
@@ -1968,6 +1968,39 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        return ret;
 }
 
+int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
+                                    void *p_data, unsigned int bytes)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       int ret = 0;
+
+       if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
+               struct intel_vgpu_page_track *t;
+
+               mutex_lock(&gvt->lock);
+
+               t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
+               if (t) {
+                       if (unlikely(vgpu->failsafe)) {
+                               /* remove write protection to prevent furture traps */
+                               intel_vgpu_clean_page_track(vgpu, t);
+                       } else {
+                               ret = t->handler(t, pa, p_data, bytes);
+                               if (ret) {
+                                       gvt_err("guest page write error %d, "
+                                               "gfn 0x%lx, pa 0x%llx, "
+                                               "var 0x%x, len %d\n",
+                                               ret, t->gfn, pa,
+                                               *(u32 *)p_data, bytes);
+                               }
+                       }
+               }
+               mutex_unlock(&gvt->lock);
+       }
+       return ret;
+}
+
+
 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
                intel_gvt_gtt_type_t type)
 {
index f98c1c19b4cb47324697cab8b51995979068e1ca..4cc13b5934f1f0eb1acd1a7bef41fce05eba880e 100644 (file)
@@ -308,4 +308,7 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
        unsigned int off, void *p_data, unsigned int bytes);
 
+int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
+                                    void *p_data, unsigned int bytes);
+
 #endif /* _GVT_GTT_H_ */
index 643bb961d40dff4745614c640228a91bed0bfdbd..fac54f32d33fccd51672bcd5adfa25ecce96d616 100644 (file)
@@ -183,6 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .get_gvt_attrs = intel_get_gvt_attrs,
        .vgpu_query_plane = intel_vgpu_query_plane,
        .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
+       .write_protect_handler = intel_vgpu_write_protect_handler,
 };
 
 /**
index 0822d0fd45dab0b7f3702ae2399adbf7ec288568..7dc7a80213a8afacd8bc4e219ec67880b0eaf51d 100644 (file)
@@ -546,6 +546,8 @@ struct intel_gvt_ops {
                        struct attribute_group ***intel_vgpu_type_groups);
        int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
        int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
+       int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
+                                    unsigned int);
 };
 
 
index f86983d6655be03ba692c2df9bbecef1d9382c1f..45bab5a6290bea1ecb8eb62bd481ec9d624535b1 100644 (file)
@@ -1360,8 +1360,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                                        struct kvmgt_guest_info, track_node);
 
        if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
-               intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
-                                       (void *)val, len);
+               intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
+                                                    (void *)val, len);
 }
 
 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
index b18a8bed6c182ab7f2ba52cef0fa920db1b9b34f..562b5ad857a4eeec834f11751b514c28669b208f 100644 (file)
@@ -117,25 +117,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
                else
                        memcpy(pt, p_data, bytes);
 
-       } else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
-               struct intel_vgpu_page_track *t;
-
-               /* Since we enter the failsafe mode early during guest boot,
-                * guest may not have chance to set up its ppgtt table, so
-                * there should not be any wp pages for guest. Keep the wp
-                * related code here in case we need to handle it in furture.
-                */
-               t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
-               if (t) {
-                       /* remove write protection to prevent furture traps */
-                       intel_vgpu_clean_page_track(vgpu, t);
-                       if (read)
-                               intel_gvt_hypervisor_read_gpa(vgpu, pa,
-                                               p_data, bytes);
-                       else
-                               intel_gvt_hypervisor_write_gpa(vgpu, pa,
-                                               p_data, bytes);
-               }
        }
        mutex_unlock(&gvt->lock);
 }
@@ -168,23 +149,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                goto out;
        }
 
-       if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
-               struct intel_vgpu_page_track *t;
-
-               t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
-               if (t) {
-                       ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
-                                       p_data, bytes);
-                       if (ret) {
-                               gvt_vgpu_err("guest page read error %d, "
-                                       "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
-                                       ret, t->gfn, pa, *(u32 *)p_data,
-                                       bytes);
-                       }
-                       goto out;
-               }
-       }
-
        offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
        if (WARN_ON(bytes > 8))
@@ -263,23 +227,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                goto out;
        }
 
-       if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
-               struct intel_vgpu_page_track *t;
-
-               t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
-               if (t) {
-                       ret = t->handler(t, pa, p_data, bytes);
-                       if (ret) {
-                               gvt_err("guest page write error %d, "
-                                       "gfn 0x%lx, pa 0x%llx, "
-                                       "var 0x%x, len %d\n",
-                                       ret, t->gfn, pa,
-                                       *(u32 *)p_data, bytes);
-                       }
-                       goto out;
-               }
-       }
-
        offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
        if (WARN_ON(bytes > 8))