*
*/
-#include "i915_drv.h"
-
#include <linux/kthread.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)
-void set_context_pdp_root_pointer(struct execlist_ring_context *ring_context,
+static void set_context_pdp_root_pointer(
+ struct execlist_ring_context *ring_context,
u32 pdp[8])
{
struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
workload->ctx_desc.lrca);
context_page_num = intel_lr_context_size(
- &gvt->dev_priv->engine[ring_id]);
+ gvt->dev_priv->engine[ring_id]);
context_page_num = context_page_num >> PAGE_SHIFT;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_gem_request *rq;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- workload->req = i915_gem_request_alloc(&dev_priv->engine[ring_id],
- shadow_ctx);
- if (IS_ERR_OR_NULL(workload->req)) {
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ if (IS_ERR(rq)) {
gvt_err("fail to allocate gem request\n");
- workload->status = PTR_ERR(workload->req);
- workload->req = NULL;
+ workload->status = PTR_ERR(rq);
return workload->status;
}
- gvt_dbg_sched("ring id %d get i915 gem request %p\n",
- ring_id, workload->req);
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+ workload->req = i915_gem_request_get(rq);
mutex_lock(&gvt->lock);
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
- i915_add_request_no_flush(workload->req);
-
+ i915_add_request_no_flush(rq);
workload->dispatched = true;
return 0;
err:
workload->status = ret;
- if (workload->req)
- workload->req = NULL;
mutex_unlock(&gvt->lock);
+
+ i915_add_request_no_flush(rq);
return ret;
}
workload->ctx_desc.lrca);
context_page_num = intel_lr_context_size(
- &gvt->dev_priv->engine[ring_id]);
+ gvt->dev_priv->engine[ring_id]);
context_page_num = context_page_num >> PAGE_SHIFT;
int ring_id;
};
+static DEFINE_MUTEX(scheduler_mutex);
+
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
+ long lret;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(scheduler->waitq[ring_id],
- kthread_should_stop() ||
- (workload = pick_next_workload(gvt, ring_id)));
-
- WARN_ON_ONCE(ret);
-
- if (kthread_should_stop())
+ add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ do {
+ workload = pick_next_workload(gvt, ring_id);
+ if (workload)
+ break;
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ } while (!kthread_should_stop());
+ remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+
+ if (!workload)
break;
+ mutex_lock(&scheduler_mutex);
+
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
intel_runtime_pm_get(gvt->dev_priv);
- /*
- * Always take i915 big lock first
- */
- ret = i915_mutex_lock_interruptible(&gvt->dev_priv->drm);
- if (ret < 0) {
- gvt_err("i915 submission is not available, retry\n");
- schedule_timeout(1);
- continue;
- }
-
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
intel_uncore_forcewake_get(gvt->dev_priv,
FORCEWAKE_ALL);
+ mutex_lock(&gvt->dev_priv->drm.struct_mutex);
ret = dispatch_workload(workload);
+ mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
+
if (ret) {
gvt_err("fail to dispatch workload, skip\n");
goto complete;
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
- workload->status = i915_wait_request(workload->req,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- NULL, NULL);
- if (workload->status != 0)
+ lret = i915_wait_request(workload->req,
+ 0, MAX_SCHEDULE_TIMEOUT);
+ if (lret < 0) {
+ workload->status = lret;
gvt_err("fail to wait workload, skip\n");
+ } else {
+ workload->status = 0;
+ }
complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
+ mutex_lock(&gvt->dev_priv->drm.struct_mutex);
complete_current_workload(gvt, ring_id);
+ mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
+
+ i915_gem_request_put(fetch_and_zero(&workload->req));
if (need_force_wake)
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
- mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
-
intel_runtime_pm_put(gvt->dev_priv);
+
+ mutex_unlock(&scheduler_mutex);
+
}
return 0;
}
init_waitqueue_head(&scheduler->workload_complete_wq);
for (i = 0; i < I915_NUM_ENGINES; i++) {
+ /* check ring mask at init time */
+ if (!HAS_ENGINE(gvt->dev_priv, i))
+ continue;
+
init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);