drm/i915/gvt: Wean off struct_mutex
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 16 Oct 2019 18:39:01 +0000 (19:39 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 21 Oct 2019 07:57:27 +0000 (08:57 +0100)
Use the local vgpu_lock while preparing workloads to avoid taking the
obsolete i915->drm.struct_mutex

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191016183902.13614-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gvt/scheduler.c

index 9ebb2534558b8be323ff2d1e902abfc3dc17067f..36bb7639e82fbb3afaeae1ad1f9f20c8ed1a47f8 100644 (file)
@@ -415,10 +415,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_vgpu_submission *s = &vgpu->submission;
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int ret;
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       lockdep_assert_held(&vgpu->vgpu_lock);
 
        if (workload->shadow)
                return 0;
@@ -580,8 +579,6 @@ static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
 
 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 {
-       struct intel_vgpu *vgpu = workload->vgpu;
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_vgpu_shadow_bb *bb, *pos;
 
        if (list_empty(&workload->shadow_bb))
@@ -590,8 +587,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
        bb = list_first_entry(&workload->shadow_bb,
                        struct intel_vgpu_shadow_bb, list);
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-
        list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
                if (bb->obj) {
                        if (bb->accessing)
@@ -609,8 +604,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
                list_del(&bb->list);
                kfree(bb);
        }
-
-       mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 static int prepare_workload(struct intel_vgpu_workload *workload)
@@ -685,7 +678,6 @@ err_unpin_mm:
 static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct i915_request *rq;
        int ring_id = workload->ring_id;
        int ret;
@@ -694,7 +686,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
                ring_id, workload);
 
        mutex_lock(&vgpu->vgpu_lock);
-       mutex_lock(&dev_priv->drm.struct_mutex);
 
        ret = intel_gvt_workload_req_alloc(workload);
        if (ret)
@@ -729,7 +720,6 @@ out:
 err_req:
        if (ret)
                workload->status = ret;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
        mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
@@ -1594,9 +1584,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
         */
        if (list_empty(workload_q_head(vgpu, ring_id))) {
                intel_runtime_pm_get(&dev_priv->runtime_pm);
-               mutex_lock(&dev_priv->drm.struct_mutex);
+               mutex_lock(&vgpu->vgpu_lock);
                ret = intel_gvt_scan_and_shadow_workload(workload);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
+               mutex_unlock(&vgpu->vgpu_lock);
                intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
        }