Merge tag 'gvt-next-2017-12-14' of https://github.com/intel/gvt-linux into drm-intel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / handlers.c
index 56ee588377f481be2bd7200f168a3e2304115bed..c982867e7c2b11924df8539a96f57e12d1cb5c6e 100644 (file)
@@ -174,8 +174,10 @@ void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
                break;
        case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
                pr_err("Graphics resource is not enough for the guest\n");
+               break;
        case GVT_FAILSAFE_GUEST_ERR:
                pr_err("GVT Internal error  for the guest\n");
+               break;
        default:
                break;
        }
@@ -1396,7 +1398,7 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
         * update the VM CSB status correctly. Here listed registers can
         * support BDW, SKL or other platforms with same HWSP registers.
         */
-       if (unlikely(ring_id < 0 || ring_id > I915_NUM_ENGINES)) {
+       if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
                gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
                             vgpu->id, offset);
                return -EINVAL;
@@ -1420,40 +1422,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
        return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
 }
 
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes)
-{
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       u32 v = *(u32 *)p_data;
-
-       if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
-               return intel_vgpu_default_mmio_write(vgpu,
-                               offset, p_data, bytes);
-
-       switch (offset) {
-       case 0x4ddc:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
-               break;
-       case 0x42080:
-               /* bypass WaCompressedResourceDisplayNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
-               break;
-       case 0xe194:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
-               break;
-       case 0x7014:
-               /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
-               vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
@@ -1471,11 +1439,29 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       int ring_id;
+       u32 ring_base;
+
+       ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
+       /**
+        * Read HW reg in following case
+        * a. the offset isn't a ring mmio
+        * b. the offset's ring is running on hw.
+        * c. the offset is ring time stamp mmio
+        */
+       if (ring_id >= 0)
+               ring_base = dev_priv->engine[ring_id]->mmio_base;
+
+       if (ring_id < 0 || vgpu  == gvt->scheduler.engine_owner[ring_id] ||
+           offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
+           offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
+               mmio_hw_access_pre(dev_priv);
+               vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+               mmio_hw_access_post(dev_priv);
+       }
 
-       mmio_hw_access_pre(dev_priv);
-       vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
-       mmio_hw_access_post(dev_priv);
        return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 }
 
@@ -1487,7 +1473,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        u32 data = *(u32 *)p_data;
        int ret = 0;
 
-       if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+       if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
                return -EINVAL;
 
        execlist = &vgpu->submission.execlist[ring_id];
@@ -1722,8 +1708,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
-       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
-                skl_misc_ctl_write);
+       MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+                NULL, NULL);
        MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2601,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(0x6e570, D_BDW_PLUS);
        MMIO_D(0x65f10, D_BDW_PLUS);
 
-       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
-                skl_misc_ctl_write);
+       MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2666,8 +2651,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
-       MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+       MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+       MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
        MMIO_D(0x45504, D_SKL_PLUS);
        MMIO_D(0x45520, D_SKL_PLUS);
        MMIO_D(0x46000, D_SKL_PLUS);
@@ -2971,6 +2956,40 @@ err:
        return ret;
 }
 
+/**
+ * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
+ * @gvt: a GVT device
+ * @handler: the handler
+ * @data: private data given to handler
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
+       int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
+       void *data)
+{
+       struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+       struct intel_gvt_mmio_info *e;
+       int i, j, ret;
+
+       hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
+               ret = handler(gvt, e->offset, data);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+               for (j = 0; j < block->size; j += 4) {
+                       ret = handler(gvt,
+                               INTEL_GVT_MMIO_OFFSET(block->offset) + j,
+                               data);
+                       if (ret)
+                               return ret;
+               }
+       }
+       return 0;
+}
 
 /**
  * intel_vgpu_default_mmio_read - default MMIO read handler