drm/i915/gt: Mark context->active_count as protected by timeline->mutex
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 16 Aug 2019 12:09:59 +0000 (13:09 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 16 Aug 2019 17:02:06 +0000 (18:02 +0100)
We use timeline->mutex to protect modifications to
context->active_count, and the associated enable/disable callbacks.
Due to complications with engine-pm barrier there is a path where we used
a "superlock" to provide serialised protect and so could not
unconditionally assert with lockdep that it was always held. However,
we can mark the mutex as taken (noting that we may be nested underneath
ourselves) which means we can be reassured the right timeline->mutex is
always treated as held and let lockdep roam free.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190816121000.8507-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_context.h
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/i915_request.c

index 053a1307ecb4b2ebda3ce54d197bec958d0532a9..dd742ac2fbdb743db6ef5b0ad776bc0b2137ca6d 100644 (file)
@@ -89,17 +89,20 @@ void intel_context_exit_engine(struct intel_context *ce);
 
 static inline void intel_context_enter(struct intel_context *ce)
 {
+       lockdep_assert_held(&ce->timeline->mutex);
        if (!ce->active_count++)
                ce->ops->enter(ce);
 }
 
 static inline void intel_context_mark_active(struct intel_context *ce)
 {
+       lockdep_assert_held(&ce->timeline->mutex);
        ++ce->active_count;
 }
 
 static inline void intel_context_exit(struct intel_context *ce)
 {
+       lockdep_assert_held(&ce->timeline->mutex);
        GEM_BUG_ON(!ce->active_count);
        if (!--ce->active_count)
                ce->ops->exit(ce);
index a632b20ec4d87e6418685532d702020e04c5535d..616f3f9a682531a7b69f63bfde1551e4afe1ab18 100644 (file)
@@ -61,7 +61,7 @@ struct intel_context {
        u32 *lrc_reg_state;
        u64 lrc_desc;
 
-       unsigned int active_count; /* notionally protected by timeline->mutex */
+       unsigned int active_count; /* protected by timeline->mutex */
 
        atomic_t pin_count;
        struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
index f3f0109f9e228a9cb538523d93bb7d931b660b22..5f03f7dcad72f4fd03979a666ace50f5d3d3910a 100644 (file)
@@ -37,6 +37,16 @@ static int __engine_unpark(struct intel_wakeref *wf)
        return 0;
 }
 
+static inline void __timeline_mark_lock(struct intel_context *ce)
+{
+       mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
+}
+
+static inline void __timeline_mark_unlock(struct intel_context *ce)
+{
+       mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+}
+
 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 {
        struct i915_request *rq;
@@ -61,6 +71,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
         * retiring the last request, thus all rings should be empty and
         * all timelines idle.
         */
+       __timeline_mark_lock(engine->kernel_context);
+
        rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
        if (IS_ERR(rq))
                /* Context switch failed, hope for the best! Maybe reset? */
@@ -80,6 +92,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
        __intel_wakeref_defer_park(&engine->wakeref);
        __i915_request_queue(rq, NULL);
 
+       __timeline_mark_unlock(engine->kernel_context);
+
        return false;
 }
 
index 7b476cd55dac9a63462580002655c11a36c58da9..eafd94d5e211092c5d99f206a5e5d6d5815385a7 100644 (file)
@@ -338,6 +338,8 @@ void intel_timeline_enter(struct intel_timeline *tl)
 {
        struct intel_gt_timelines *timelines = &tl->gt->timelines;
 
+       lockdep_assert_held(&tl->mutex);
+
        GEM_BUG_ON(!atomic_read(&tl->pin_count));
        if (tl->active_count++)
                return;
@@ -352,6 +354,8 @@ void intel_timeline_exit(struct intel_timeline *tl)
 {
        struct intel_gt_timelines *timelines = &tl->gt->timelines;
 
+       lockdep_assert_held(&tl->mutex);
+
        GEM_BUG_ON(!tl->active_count);
        if (--tl->active_count)
                return;
index 7cefff8b2c5e8a84a6dc62ec91509574378b822a..91147a6b68b5f7a11bb14e09e428755e99f954ab 100644 (file)
@@ -1087,7 +1087,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
         * precludes optimising to use semaphores serialisation of a single
         * timeline across engines.
         */
-       prev = rcu_dereference_protected(timeline->last_request.request, 1);
+       prev = rcu_dereference_protected(timeline->last_request.request,
+                                        lockdep_is_held(&timeline->mutex));
        if (prev && !i915_request_completed(prev)) {
                if (is_power_of_2(prev->engine->mask | rq->engine->mask))
                        i915_sw_fence_await_sw_fence(&rq->submit,