BackMerge v5.1-rc5 into drm-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / scheduler.c
index 3faf2438b9bcf6ad9f05849ffb4f8bd9e316f96e..2f91281ac0002c0f25a603d5c061e4e8b061ea0b 100644 (file)
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
        int i = 0;
 
        if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
-               return -1;
+               return -EINVAL;
 
        if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
                px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        if (workload->shadow)
                return 0;
 
-       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
-       if (ret < 0) {
-               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
-               return ret;
-       }
-
        /* pin shadow context by gvt even the shadow context will be pinned
         * when i915 alloc request. That is because gvt will update the guest
         * context from shadow context when workload is completed, and at that
@@ -677,6 +671,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+       struct i915_request *rq;
        int ring_id = workload->ring_id;
        int ret;
 
@@ -686,6 +683,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        mutex_lock(&vgpu->vgpu_lock);
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+       if (ret < 0) {
+               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+               goto err_req;
+       }
+
        ret = intel_gvt_workload_req_alloc(workload);
        if (ret)
                goto err_req;
@@ -702,6 +705,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
        ret = prepare_workload(workload);
 out:
+       if (ret) {
+               /* We might still need to add request with
+                * clean ctx to retire it properly..
+                */
+               rq = fetch_and_zero(&workload->req);
+               i915_request_put(rq);
+       }
+
        if (!IS_ERR_OR_NULL(workload->req)) {
                gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
                                ring_id, workload->req);
@@ -738,7 +749,8 @@ static struct intel_vgpu_workload *pick_next_workload(
                goto out;
        }
 
-       if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+       if (!scheduler->current_vgpu->active ||
+           list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
                goto out;
 
        /*
@@ -1473,8 +1485,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                intel_runtime_pm_put_unchecked(dev_priv);
        }
 
-       if (ret && (vgpu_is_vm_unhealthy(ret))) {
-               enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+       if (ret) {
+               if (vgpu_is_vm_unhealthy(ret))
+                       enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
                intel_vgpu_destroy_workload(workload);
                return ERR_PTR(ret);
        }