Merge tag 'gvt-next-2017-06-08' of https://github.com/01org/gvt-linux into drm-intel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / render.c
index a5e11d89df2f86d13dc546f7b3e1a9e9c8ba7d97..504e57c3bc23c824036dff1bf51cd1168f5b15f3 100644 (file)
@@ -35,6 +35,7 @@
 
 #include "i915_drv.h"
 #include "gvt.h"
+#include "trace.h"
 
 struct render_mmio {
        int ring_id;
@@ -260,7 +261,8 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
 
 #define CTX_CONTEXT_CONTROL_VAL        0x03
 
-void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+/* Switch ring mmio values (context) from host to a vgpu. */
+static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct render_mmio *mmio;
@@ -305,14 +307,15 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
                I915_WRITE(mmio->reg, v);
                POSTING_READ(mmio->reg);
 
-               gvt_dbg_render("load reg %x old %x new %x\n",
-                               i915_mmio_reg_offset(mmio->reg),
-                               mmio->value, v);
+               trace_render_mmio(vgpu->id, "load",
+                                 i915_mmio_reg_offset(mmio->reg),
+                                 mmio->value, v);
        }
        handle_tlb_pending_event(vgpu, ring_id);
 }
 
-void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+/* Switch ring mmio values (context) from vgpu to host. */
+static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct render_mmio *mmio;
@@ -346,8 +349,37 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
                I915_WRITE(mmio->reg, v);
                POSTING_READ(mmio->reg);
 
-               gvt_dbg_render("restore reg %x old %x new %x\n",
-                               i915_mmio_reg_offset(mmio->reg),
-                               mmio->value, v);
+               trace_render_mmio(vgpu->id, "restore",
+                                 i915_mmio_reg_offset(mmio->reg),
+                                 mmio->value, v);
        }
 }
+
+/**
+ * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * @pre: the last vGPU that own the engine
+ * @next: the vGPU to switch to
+ * @ring_id: specify the engine
+ *
+ * If pre is null indicates that host own the engine. If next is null
+ * indicates that we are switching to host workload.
+ */
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+                          struct intel_vgpu *next, int ring_id)
+{
+       if (WARN_ON(!pre && !next))
+               return;
+
+       gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+                      pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+
+       /**
+        * TODO: Optimize for vGPU to vGPU switch by merging
+        * switch_mmio_to_host() and switch_mmio_to_vgpu().
+        */
+       if (pre)
+               switch_mmio_to_host(pre, ring_id);
+
+       if (next)
+               switch_mmio_to_vgpu(next, ring_id);
+}