drm/i915: Make for_each_engine_masked work on intel_gt
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>
Thu, 17 Oct 2019 16:18:52 +0000 (17:18 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 17 Oct 2019 23:06:25 +0000 (00:06 +0100)
Medium term goal is to eliminate the i915->engine[] array and to get there
we have recently introduced equivalent array in intel_gt. Now we need to
migrate the code further towards this state.

This next step is to eliminate usage of i915->engines[] from the
for_each_engine_masked iterator.

For this to work we also need to use engine->id as index when populating
the gt->engine[] array and adjust the default engine set indexing to use
engine->legacy_idx instead of assuming gt->engines[] indexing.

v2:
  * Populate gt->engine[] earlier.
  * Check that we don't duplicate engine->legacy_idx

v3:
  * Work around the initialization order issue between default_engines()
    and intel_engines_driver_register() which sets engine->legacy_idx for
    now. It will be fixed properly later.

v4:
  * Merge with forgotten v2.5.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191017161852.8836-1-tvrtko.ursulin@linux.intel.com
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_engine_user.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_hangcheck.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_drv.h

index 5d8221c7ba83d79bb71c0b3940b2a5f7947480d3..7b01f4605f217bd9740eea929be79fe11cc04194 100644 (file)
@@ -203,15 +203,22 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
+               if (engine->legacy_idx == INVALID_ENGINE)
+                       continue;
+
+               GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
+               GEM_BUG_ON(e->engines[engine->legacy_idx]);
+
                ce = intel_context_create(ctx, engine);
                if (IS_ERR(ce)) {
-                       __free_engines(e, id);
+                       __free_engines(e, e->num_engines + 1);
                        return ERR_CAST(ce);
                }
 
-               e->engines[id] = ce;
-               e->num_engines = id + 1;
+               e->engines[engine->legacy_idx] = ce;
+               e->num_engines = max(e->num_engines, engine->legacy_idx);
        }
+       e->num_engines++;
 
        return e;
 }
index 5051a1fd256525f19eae4d03800a58d9b6e0f699..e514c68b07132a1f2c490df5f1e8f66fd7de16da 100644 (file)
@@ -277,6 +277,9 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
        BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
        BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
 
+       if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
+               return -EINVAL;
+
        if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
                return -EINVAL;
 
@@ -293,6 +296,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
        BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
 
        engine->id = id;
+       engine->legacy_idx = INVALID_ENGINE;
        engine->mask = BIT(id);
        engine->i915 = gt->i915;
        engine->gt = gt;
@@ -328,6 +332,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
        intel_engine_sanitize_mmio(engine);
 
        gt->engine_class[info->class][info->instance] = engine;
+       gt->engine[id] = engine;
 
        intel_engine_add_user(engine);
        gt->i915->engine[id] = engine;
index 6199064f332bddd9fa7d40df4d1ee754d56c2beb..3451be034caf4c3b6396dea6249c1c0f577ac249 100644 (file)
@@ -148,6 +148,7 @@ enum intel_engine_id {
        VECS1,
 #define _VECS(n) (VECS0 + (n))
        I915_NUM_ENGINES
+#define INVALID_ENGINE ((enum intel_engine_id)-1)
 };
 
 struct st_preempt_hang {
index 77cd5de8393001a2c77b69cdd95b7b758932764e..7f7150a733f44069558ea9546e83d8fe7f6438c9 100644 (file)
@@ -160,10 +160,10 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
        };
 
        if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
-               return -1;
+               return INVALID_ENGINE;
 
        if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
-               return -1;
+               return INVALID_ENGINE;
 
        return map[ring->class].base + ring->instance;
 }
@@ -171,23 +171,15 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
 static void add_legacy_ring(struct legacy_ring *ring,
                            struct intel_engine_cs *engine)
 {
-       int idx;
-
        if (engine->gt != ring->gt || engine->class != ring->class) {
                ring->gt = engine->gt;
                ring->class = engine->class;
                ring->instance = 0;
        }
 
-       idx = legacy_ring_idx(ring);
-       if (unlikely(idx == -1))
-               return;
-
-       GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
-       ring->gt->engine[idx] = engine;
-       ring->instance++;
-
-       engine->legacy_idx = idx;
+       engine->legacy_idx = legacy_ring_idx(ring);
+       if (engine->legacy_idx != INVALID_ENGINE)
+               ring->instance++;
 }
 
 void intel_engines_driver_register(struct drm_i915_private *i915)
index b3619a2a5d0e02cdf0ec120ce7cf3914a3e665aa..c99b6b2f38c2260c80c742df29b06f521d9fe61a 100644 (file)
@@ -186,7 +186,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
                struct intel_engine_cs *engine;
                enum intel_engine_id id;
 
-               for_each_engine_masked(engine, i915, engine_mask, id)
+               for_each_engine_masked(engine, gt, engine_mask, id)
                        gen8_clear_engine_error_register(engine);
        }
 }
index c14dbeb3ccc30c0e136e4b823d8d4a7bb6f05c8e..b2af73984f93dd0511ea3d6bb28a3b9958711d27 100644 (file)
@@ -237,7 +237,7 @@ static void hangcheck_declare_hang(struct intel_gt *gt,
                hung &= ~stuck;
        len = scnprintf(msg, sizeof(msg),
                        "%s on ", stuck == hung ? "no progress" : "hang");
-       for_each_engine_masked(engine, gt->i915, hung, tmp)
+       for_each_engine_masked(engine, gt, hung, tmp)
                len += scnprintf(msg + len, sizeof(msg) - len,
                                 "%s, ", engine->name);
        msg[len-2] = '\0';
index 477bfafdb103c3c5ee65ed791380de0c19196939..b191b07457035f0f1750c9bbf976eb9614dcb33c 100644 (file)
@@ -298,7 +298,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
                intel_engine_mask_t tmp;
 
                hw_mask = 0;
-               for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+               for_each_engine_masked(engine, gt, engine_mask, tmp) {
                        GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
                        hw_mask |= hw_engine_mask[engine->id];
                }
@@ -432,7 +432,7 @@ static int gen11_reset_engines(struct intel_gt *gt,
                hw_mask = GEN11_GRDOM_FULL;
        } else {
                hw_mask = 0;
-               for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+               for_each_engine_masked(engine, gt, engine_mask, tmp) {
                        GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
                        hw_mask |= hw_engine_mask[engine->id];
                        ret = gen11_lock_sfc(engine, &hw_mask);
@@ -451,7 +451,7 @@ sfc_unlock:
         * expiration).
         */
        if (engine_mask != ALL_ENGINES)
-               for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
+               for_each_engine_masked(engine, gt, engine_mask, tmp)
                        gen11_unlock_sfc(engine);
 
        return ret;
@@ -510,7 +510,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
        intel_engine_mask_t tmp;
        int ret;
 
-       for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+       for_each_engine_masked(engine, gt, engine_mask, tmp) {
                ret = gen8_engine_reset_prepare(engine);
                if (ret && !reset_non_ready)
                        goto skip_reset;
@@ -536,7 +536,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
                ret = gen6_reset_engines(gt, engine_mask, retry);
 
 skip_reset:
-       for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
+       for_each_engine_masked(engine, gt, engine_mask, tmp)
                gen8_engine_reset_cancel(engine);
 
        return ret;
@@ -1206,7 +1206,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
         * single reset fails.
         */
        if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
-               for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+               for_each_engine_masked(engine, gt, engine_mask, tmp) {
                        BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
                        if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
                                             &gt->reset.flags))
index f21b8fb5b37e11a4cfb071d3adff9fe19245422f..d6e7a1189bad786efab7c812255f0f380a4fffc4 100644 (file)
@@ -534,7 +534,7 @@ static void clean_execlist(struct intel_vgpu *vgpu,
        struct intel_vgpu_submission *s = &vgpu->submission;
        intel_engine_mask_t tmp;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+       for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
                kfree(s->ring_scan_buffer[engine->id]);
                s->ring_scan_buffer[engine->id] = NULL;
                s->ring_scan_buffer_size[engine->id] = 0;
@@ -548,7 +548,7 @@ static void reset_execlist(struct intel_vgpu *vgpu,
        struct intel_engine_cs *engine;
        intel_engine_mask_t tmp;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+       for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
                init_vgpu_execlist(vgpu, engine->id);
 }
 
index 6850f1f4024106a84567a9a09f17b99e4ffca86e..9ebb2534558b8be323ff2d1e902abfc3dc17067f 100644 (file)
@@ -887,7 +887,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
        intel_engine_mask_t tmp;
 
        /* free the unsubmited workloads in the queues. */
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+       for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
                list_for_each_entry_safe(pos, n,
                        &s->workload_q_head[engine->id], list) {
                        list_del_init(&pos->list);
index aa37c07004b966e97d24b0870b71258d789ec6e8..7927b1a0c7a6cb112c7b4706abad32dbbbf6d906 100644 (file)
@@ -590,8 +590,8 @@ match:
 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
                                            struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *i915 = engine->i915;
        intel_engine_mask_t tmp, mask = engine->mask;
+       struct intel_gt *gt = engine->gt;
        struct llist_node *pos, *next;
        int err;
 
@@ -603,7 +603,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
         * We can then use the preallocated nodes in
         * i915_active_acquire_barrier()
         */
-       for_each_engine_masked(engine, i915, mask, tmp) {
+       for_each_engine_masked(engine, gt, mask, tmp) {
                u64 idx = engine->kernel_context->timeline->fence_context;
                struct active_node *node;
 
index 88956f37d96c59ee74b697994589aba8ccaabcff..40e923b0c2c8285df892b5313972f9fc43f5335c 100644 (file)
@@ -1415,10 +1415,10 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
                for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
 
 /* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
-       for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
+       for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
             (tmp__) ? \
-            ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
+            ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
             0;)
 
 #define rb_to_uabi_engine(rb) \