drm/i915/gvt: correct the emulation in TLB control handler
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / handlers.c
index 970804aed381d597d22ebb8b5cc934bedd509991..2d97fb78343edd676520916f0b3e7b0c3bbf9d7b 100644 (file)
@@ -37,6 +37,8 @@
  */
 
 #include "i915_drv.h"
+#include "gvt.h"
+#include "i915_pvinfo.h"
 
 /* XXX FIXME i915 has changed PP_XXX definition */
 #define PCH_PP_STATUS  _MMIO(0xc7200)
@@ -130,12 +132,13 @@ static int new_mmio_info(struct intel_gvt *gvt,
 
 static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
 {
-       int i;
+       enum intel_engine_id id;
+       struct intel_engine_cs *engine;
 
        reg &= ~GENMASK(11, 0);
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               if (gvt->dev_priv->engine[i].mmio_base == reg)
-                       return i;
+       for_each_engine(engine, gvt->dev_priv, id) {
+               if (engine->mmio_base == reg)
+                       return id;
        }
        return -1;
 }
@@ -227,12 +230,49 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
+               void *p_data, unsigned int bytes, unsigned long bitmap)
+{
+       struct intel_gvt_workload_scheduler *scheduler =
+               &vgpu->gvt->scheduler;
+
+       vgpu->resetting = true;
+
+       intel_vgpu_stop_schedule(vgpu);
+       /*
+        * The current_vgpu will set to NULL after stopping the
+        * scheduler when the reset is triggered by current vgpu.
+        */
+       if (scheduler->current_vgpu == NULL) {
+               mutex_unlock(&vgpu->gvt->lock);
+               intel_gvt_wait_vgpu_idle(vgpu);
+               mutex_lock(&vgpu->gvt->lock);
+       }
+
+       intel_vgpu_reset_execlist(vgpu, bitmap);
+
+       /* full GPU reset */
+       if (bitmap == 0xff) {
+               mutex_unlock(&vgpu->gvt->lock);
+               intel_vgpu_clean_gtt(vgpu);
+               mutex_lock(&vgpu->gvt->lock);
+               setup_vgpu_mmio(vgpu);
+               populate_pvinfo_page(vgpu);
+               intel_vgpu_init_gtt(vgpu);
+       }
+
+       vgpu->resetting = false;
+
+       return 0;
+}
+
 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        u32 data;
-       u32 bitmap = 0;
+       u64 bitmap = 0;
 
+       write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
        if (data & GEN6_GRDOM_FULL) {
@@ -260,7 +300,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                if (HAS_BSD2(vgpu->gvt->dev_priv))
                        bitmap |= (1 << VCS2);
        }
-       return 0;
+       return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
 }
 
 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -1118,7 +1158,10 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
-       u32 mode = *(u32 *)p_data;
+       u32 mode;
+
+       write_vreg(vgpu, offset, p_data, bytes);
+       mode = vgpu_vreg(vgpu, offset);
 
        if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
                WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
@@ -1280,22 +1323,80 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
        struct intel_vgpu_execlist *execlist;
        u32 data = *(u32 *)p_data;
-       int ret;
+       int ret = 0;
 
-       if (WARN_ON(ring_id < 0))
+       if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
                return -EINVAL;
 
        execlist = &vgpu->execlist[ring_id];
 
        execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
-       if (execlist->elsp_dwords.index == 3)
+       if (execlist->elsp_dwords.index == 3) {
                ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+               if(ret)
+                       gvt_err("fail submit workload on ring %d\n", ring_id);
+       }
 
        ++execlist->elsp_dwords.index;
        execlist->elsp_dwords.index &= 0x3;
+       return ret;
+}
+
+static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+               void *p_data, unsigned int bytes)
+{
+       u32 data = *(u32 *)p_data;
+       int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
+       bool enable_execlist;
+
+       write_vreg(vgpu, offset, p_data, bytes);
+       if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
+                       || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+               enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
+
+               gvt_dbg_core("EXECLIST %s on ring %d\n",
+                               (enable_execlist ? "enabling" : "disabling"),
+                               ring_id);
+
+               if (enable_execlist)
+                       intel_vgpu_start_schedule(vgpu);
+       }
        return 0;
 }
 
+static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       int rc = 0;
+       unsigned int id = 0;
+
+       write_vreg(vgpu, offset, p_data, bytes);
+
+       switch (offset) {
+       case 0x4260:
+               id = RCS;
+               break;
+       case 0x4264:
+               id = VCS;
+               break;
+       case 0x4268:
+               id = VCS2;
+               break;
+       case 0x426c:
+               id = BCS;
+               break;
+       case 0x4270:
+               id = VECS;
+               break;
+       default:
+               rc = -EINVAL;
+               break;
+       }
+       set_bit(id, (void *)vgpu->tlb_handle_pending);
+
+       return rc;
+}
+
 #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
        ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
                f, s, am, rm, d, r, w); \
@@ -1377,7 +1478,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
        /* RING MODE */
 #define RING_REG(base) (base + 0x29c)
-       MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, NULL);
+       MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
 #undef RING_REG
 
        MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
@@ -2103,11 +2204,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
        MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
        MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
-       MMIO_DH(0x4260, D_BDW_PLUS, NULL, NULL);
-       MMIO_DH(0x4264, D_BDW_PLUS, NULL, NULL);
-       MMIO_DH(0x4268, D_BDW_PLUS, NULL, NULL);
-       MMIO_DH(0x426c, D_BDW_PLUS, NULL, NULL);
-       MMIO_DH(0x4270, D_BDW_PLUS, NULL, NULL);
+       MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+       MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+       MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+       MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+       MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
        MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
 
        return 0;
@@ -2192,7 +2293,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
        MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
        MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
        MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
-       MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
+       MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
        MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
                        NULL, NULL);
        MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,