2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
30 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
32 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
34 struct intel_wait *wait;
35 unsigned int result = 0;
37 lockdep_assert_held(&b->irq_lock);
42 * N.B. Since task_asleep() and ttwu are not atomic, the
43 * waiter may actually go to sleep after the check, causing
44 * us to suppress a valid wakeup. We prefer to reduce the
45 * number of false positive missed_breadcrumb() warnings
46 * at the expense of a few false negatives, as it it easy
47 * to trigger a false positive under heavy load. Enough
48 * signal should remain from genuine missed_breadcrumb()
49 * for us to detect in CI.
51 bool was_asleep = task_asleep(wait->tsk);
53 result = ENGINE_WAKEUP_WAITER;
54 if (wake_up_process(wait->tsk) && was_asleep)
55 result |= ENGINE_WAKEUP_ASLEEP;
61 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
63 struct intel_breadcrumbs *b = &engine->breadcrumbs;
67 spin_lock_irqsave(&b->irq_lock, flags);
68 result = __intel_breadcrumbs_wakeup(b);
69 spin_unlock_irqrestore(&b->irq_lock, flags);
74 static unsigned long wait_timeout(void)
76 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
79 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
81 if (GEM_SHOW_DEBUG()) {
82 struct drm_printer p = drm_debug_printer(__func__);
84 intel_engine_dump(engine, &p,
85 "%s missed breadcrumb at %pS\n",
86 engine->name, __builtin_return_address(0));
89 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
92 static void intel_breadcrumbs_hangcheck(struct timer_list *t)
94 struct intel_engine_cs *engine =
95 from_timer(engine, t, breadcrumbs.hangcheck);
96 struct intel_breadcrumbs *b = &engine->breadcrumbs;
97 unsigned int irq_count;
102 irq_count = READ_ONCE(b->irq_count);
103 if (b->hangcheck_interrupts != irq_count) {
104 b->hangcheck_interrupts = irq_count;
105 mod_timer(&b->hangcheck, wait_timeout());
109 /* We keep the hangcheck timer alive until we disarm the irq, even
110 * if there are no waiters at present.
112 * If the waiter was currently running, assume it hasn't had a chance
113 * to process the pending interrupt (e.g, low priority task on a loaded
114 * system) and wait until it sleeps before declaring a missed interrupt.
116 * If the waiter was asleep (and not even pending a wakeup), then we
117 * must have missed an interrupt as the GPU has stopped advancing
118 * but we still have a waiter. Assuming all batches complete within
119 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
121 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
122 missed_breadcrumb(engine);
123 mod_timer(&b->fake_irq, jiffies + 1);
125 mod_timer(&b->hangcheck, wait_timeout());
129 static void intel_breadcrumbs_fake_irq(struct timer_list *t)
131 struct intel_engine_cs *engine =
132 from_timer(engine, t, breadcrumbs.fake_irq);
133 struct intel_breadcrumbs *b = &engine->breadcrumbs;
136 * The timer persists in case we cannot enable interrupts,
137 * or if we have previously seen seqno/interrupt incoherency
138 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
139 * Here the worker will wake up every jiffie in order to kick the
140 * oldest waiter to do the coherent seqno check.
143 spin_lock_irq(&b->irq_lock);
144 if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
145 __intel_engine_disarm_breadcrumbs(engine);
146 spin_unlock_irq(&b->irq_lock);
150 /* If the user has disabled the fake-irq, restore the hangchecking */
151 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
152 mod_timer(&b->hangcheck, wait_timeout());
156 mod_timer(&b->fake_irq, jiffies + 1);
159 static void irq_enable(struct intel_engine_cs *engine)
162 * FIXME: Ideally we want this on the API boundary, but for the
163 * sake of testing with mock breadcrumbs (no HW so unable to
164 * enable irqs) we place it deep within the bowels, at the point
167 GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
169 /* Enabling the IRQ may miss the generation of the interrupt, but
170 * we still need to force the barrier before reading the seqno,
173 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
175 /* Caller disables interrupts */
176 if (engine->irq_enable) {
177 spin_lock(&engine->i915->irq_lock);
178 engine->irq_enable(engine);
179 spin_unlock(&engine->i915->irq_lock);
183 static void irq_disable(struct intel_engine_cs *engine)
185 /* Caller disables interrupts */
186 if (engine->irq_disable) {
187 spin_lock(&engine->i915->irq_lock);
188 engine->irq_disable(engine);
189 spin_unlock(&engine->i915->irq_lock);
193 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
195 struct intel_breadcrumbs *b = &engine->breadcrumbs;
197 lockdep_assert_held(&b->irq_lock);
198 GEM_BUG_ON(b->irq_wait);
199 GEM_BUG_ON(!b->irq_armed);
201 GEM_BUG_ON(!b->irq_enabled);
202 if (!--b->irq_enabled)
205 b->irq_armed = false;
208 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
210 struct intel_breadcrumbs *b = &engine->breadcrumbs;
212 spin_lock_irq(&b->irq_lock);
213 if (!b->irq_enabled++)
215 GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
216 spin_unlock_irq(&b->irq_lock);
219 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
221 struct intel_breadcrumbs *b = &engine->breadcrumbs;
223 spin_lock_irq(&b->irq_lock);
224 GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
225 if (!--b->irq_enabled)
227 spin_unlock_irq(&b->irq_lock);
230 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
232 struct intel_breadcrumbs *b = &engine->breadcrumbs;
233 struct intel_wait *wait, *n;
239 * We only disarm the irq when we are idle (all requests completed),
240 * so if the bottom-half remains asleep, it missed the request
243 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
244 missed_breadcrumb(engine);
246 spin_lock_irq(&b->rb_lock);
248 spin_lock(&b->irq_lock);
251 __intel_engine_disarm_breadcrumbs(engine);
252 spin_unlock(&b->irq_lock);
254 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
255 GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
256 RB_CLEAR_NODE(&wait->node);
257 wake_up_process(wait->tsk);
259 b->waiters = RB_ROOT;
261 spin_unlock_irq(&b->rb_lock);
264 static bool use_fake_irq(const struct intel_breadcrumbs *b)
266 const struct intel_engine_cs *engine =
267 container_of(b, struct intel_engine_cs, breadcrumbs);
269 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
273 * Only start with the heavy weight fake irq timer if we have not
274 * seen any interrupts since enabling it the first time. If the
275 * interrupts are still arriving, it means we made a mistake in our
276 * engine->seqno_barrier(), a timing error that should be transient
277 * and unlikely to reoccur.
279 return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
282 static void enable_fake_irq(struct intel_breadcrumbs *b)
284 /* Ensure we never sleep indefinitely */
285 if (!b->irq_enabled || use_fake_irq(b))
286 mod_timer(&b->fake_irq, jiffies + 1);
288 mod_timer(&b->hangcheck, wait_timeout());
291 static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
293 struct intel_engine_cs *engine =
294 container_of(b, struct intel_engine_cs, breadcrumbs);
295 struct drm_i915_private *i915 = engine->i915;
298 lockdep_assert_held(&b->irq_lock);
302 /* The breadcrumb irq will be disarmed on the interrupt after the
303 * waiters are signaled. This gives us a single interrupt window in
304 * which we can add a new waiter and avoid the cost of re-enabling
309 if (I915_SELFTEST_ONLY(b->mock)) {
310 /* For our mock objects we want to avoid interaction
311 * with the real hardware (which is not set up). So
312 * we simply pretend we have enabled the powerwell
313 * and the irq, and leave it up to the mock
314 * implementation to call intel_engine_wakeup()
315 * itself when it wants to simulate a user interrupt,
320 /* Since we are waiting on a request, the GPU should be busy
321 * and should have its own rpm reference. This is tracked
322 * by i915->gt.awake, we can forgo holding our own wakref
323 * for the interrupt as before i915->gt.awake is released (when
324 * the driver is idle) we disarm the breadcrumbs.
327 /* No interrupts? Kick the waiter every jiffie! */
329 if (!b->irq_enabled++ &&
330 !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
339 static inline struct intel_wait *to_wait(struct rb_node *node)
341 return rb_entry(node, struct intel_wait, node);
344 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
345 struct intel_wait *wait)
347 lockdep_assert_held(&b->rb_lock);
348 GEM_BUG_ON(b->irq_wait == wait);
351 * This request is completed, so remove it from the tree, mark it as
352 * complete, and *then* wake up the associated task. N.B. when the
353 * task wakes up, it will find the empty rb_node, discern that it
354 * has already been removed from the tree and skip the serialisation
355 * of the b->rb_lock and b->irq_lock. This means that the destruction
356 * of the intel_wait is not serialised with the interrupt handler
357 * by the waiter - it must instead be serialised by the caller.
359 rb_erase(&wait->node, &b->waiters);
360 RB_CLEAR_NODE(&wait->node);
362 if (wait->tsk->state != TASK_RUNNING)
363 wake_up_process(wait->tsk); /* implicit smp_wmb() */
366 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
367 struct rb_node *next)
369 struct intel_breadcrumbs *b = &engine->breadcrumbs;
371 spin_lock(&b->irq_lock);
372 GEM_BUG_ON(!b->irq_armed);
373 GEM_BUG_ON(!b->irq_wait);
374 b->irq_wait = to_wait(next);
375 spin_unlock(&b->irq_lock);
377 /* We always wake up the next waiter that takes over as the bottom-half
378 * as we may delegate not only the irq-seqno barrier to the next waiter
379 * but also the task of waking up concurrent waiters.
382 wake_up_process(to_wait(next)->tsk);
385 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
386 struct intel_wait *wait)
388 struct intel_breadcrumbs *b = &engine->breadcrumbs;
389 struct rb_node **p, *parent, *completed;
393 GEM_BUG_ON(!wait->seqno);
395 /* Insert the request into the retirement ordered list
396 * of waiters by walking the rbtree. If we are the oldest
397 * seqno in the tree (the first to be retired), then
398 * set ourselves as the bottom-half.
400 * As we descend the tree, prune completed branches since we hold the
401 * spinlock we know that the first_waiter must be delayed and can
402 * reduce some of the sequential wake up latency if we take action
403 * ourselves and wake up the completed tasks in parallel. Also, by
404 * removing stale elements in the tree, we may be able to reduce the
405 * ping-pong between the old bottom-half and ourselves as first-waiter.
411 seqno = intel_engine_get_seqno(engine);
413 /* If the request completed before we managed to grab the spinlock,
414 * return now before adding ourselves to the rbtree. We let the
415 * current bottom-half handle any pending wakeups and instead
416 * try and get out of the way quickly.
418 if (i915_seqno_passed(seqno, wait->seqno)) {
419 RB_CLEAR_NODE(&wait->node);
423 p = &b->waiters.rb_node;
426 if (wait->seqno == to_wait(parent)->seqno) {
427 /* We have multiple waiters on the same seqno, select
428 * the highest priority task (that with the smallest
429 * task->prio) to serve as the bottom-half for this
432 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
433 p = &parent->rb_right;
436 p = &parent->rb_left;
438 } else if (i915_seqno_passed(wait->seqno,
439 to_wait(parent)->seqno)) {
440 p = &parent->rb_right;
441 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
446 p = &parent->rb_left;
449 rb_link_node(&wait->node, parent, p);
450 rb_insert_color(&wait->node, &b->waiters);
453 spin_lock(&b->irq_lock);
455 /* After assigning ourselves as the new bottom-half, we must
456 * perform a cursory check to prevent a missed interrupt.
457 * Either we miss the interrupt whilst programming the hardware,
458 * or if there was a previous waiter (for a later seqno) they
459 * may be woken instead of us (due to the inherent race
460 * in the unlocked read of b->irq_seqno_bh in the irq handler)
461 * and so we miss the wake up.
463 armed = __intel_breadcrumbs_enable_irq(b);
464 spin_unlock(&b->irq_lock);
468 /* Advance the bottom-half (b->irq_wait) before we wake up
469 * the waiters who may scribble over their intel_wait
470 * just as the interrupt handler is dereferencing it via
474 struct rb_node *next = rb_next(completed);
475 GEM_BUG_ON(next == &wait->node);
476 __intel_breadcrumbs_next(engine, next);
480 struct intel_wait *crumb = to_wait(completed);
481 completed = rb_prev(completed);
482 __intel_breadcrumbs_finish(b, crumb);
486 GEM_BUG_ON(!b->irq_wait);
487 GEM_BUG_ON(!b->irq_armed);
488 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
493 bool intel_engine_add_wait(struct intel_engine_cs *engine,
494 struct intel_wait *wait)
496 struct intel_breadcrumbs *b = &engine->breadcrumbs;
499 spin_lock_irq(&b->rb_lock);
500 armed = __intel_engine_add_wait(engine, wait);
501 spin_unlock_irq(&b->rb_lock);
505 /* Make the caller recheck if its request has already started. */
506 return intel_engine_has_started(engine, wait->seqno);
509 static inline bool chain_wakeup(struct rb_node *rb, int priority)
511 return rb && to_wait(rb)->tsk->prio <= priority;
514 static inline int wakeup_priority(struct intel_breadcrumbs *b,
515 struct task_struct *tsk)
517 if (tsk == b->signaler)
523 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
524 struct intel_wait *wait)
526 struct intel_breadcrumbs *b = &engine->breadcrumbs;
528 lockdep_assert_held(&b->rb_lock);
530 if (RB_EMPTY_NODE(&wait->node))
533 if (b->irq_wait == wait) {
534 const int priority = wakeup_priority(b, wait->tsk);
535 struct rb_node *next;
537 /* We are the current bottom-half. Find the next candidate,
538 * the first waiter in the queue on the remaining oldest
539 * request. As multiple seqnos may complete in the time it
540 * takes us to wake up and find the next waiter, we have to
541 * wake up that waiter for it to perform its own coherent
544 next = rb_next(&wait->node);
545 if (chain_wakeup(next, priority)) {
546 /* If the next waiter is already complete,
547 * wake it up and continue onto the next waiter. So
548 * if have a small herd, they will wake up in parallel
549 * rather than sequentially, which should reduce
550 * the overall latency in waking all the completed
553 * However, waking up a chain adds extra latency to
554 * the first_waiter. This is undesirable if that
555 * waiter is a high priority task.
557 u32 seqno = intel_engine_get_seqno(engine);
559 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
560 struct rb_node *n = rb_next(next);
562 __intel_breadcrumbs_finish(b, to_wait(next));
564 if (!chain_wakeup(next, priority))
569 __intel_breadcrumbs_next(engine, next);
571 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
574 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
575 rb_erase(&wait->node, &b->waiters);
576 RB_CLEAR_NODE(&wait->node);
579 GEM_BUG_ON(b->irq_wait == wait);
580 GEM_BUG_ON(rb_first(&b->waiters) !=
581 (b->irq_wait ? &b->irq_wait->node : NULL));
584 void intel_engine_remove_wait(struct intel_engine_cs *engine,
585 struct intel_wait *wait)
587 struct intel_breadcrumbs *b = &engine->breadcrumbs;
589 /* Quick check to see if this waiter was already decoupled from
590 * the tree by the bottom-half to avoid contention on the spinlock
593 if (RB_EMPTY_NODE(&wait->node)) {
594 GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
598 spin_lock_irq(&b->rb_lock);
599 __intel_engine_remove_wait(engine, wait);
600 spin_unlock_irq(&b->rb_lock);
603 static void signaler_set_rtpriority(void)
605 struct sched_param param = { .sched_priority = 1 };
607 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
610 static int intel_breadcrumbs_signaler(void *arg)
612 struct intel_engine_cs *engine = arg;
613 struct intel_breadcrumbs *b = &engine->breadcrumbs;
614 struct i915_request *rq, *n;
616 /* Install ourselves with high priority to reduce signalling latency */
617 signaler_set_rtpriority();
620 bool do_schedule = true;
624 set_current_state(TASK_INTERRUPTIBLE);
625 if (list_empty(&b->signals))
629 * We are either woken up by the interrupt bottom-half,
630 * or by a client adding a new signaller. In both cases,
631 * the GPU seqno may have advanced beyond our oldest signal.
632 * If it has, propagate the signal, remove the waiter and
633 * check again with the next oldest signal. Otherwise we
634 * need to wait for a new interrupt from the GPU or for
637 seqno = intel_engine_get_seqno(engine);
639 spin_lock_irq(&b->rb_lock);
640 list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
641 u32 this = rq->signaling.wait.seqno;
643 GEM_BUG_ON(!rq->signaling.wait.seqno);
645 if (!i915_seqno_passed(seqno, this))
648 if (likely(this == i915_request_global_seqno(rq))) {
649 __intel_engine_remove_wait(engine,
650 &rq->signaling.wait);
652 rq->signaling.wait.seqno = 0;
653 __list_del_entry(&rq->signaling.link);
655 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
657 list_add_tail(&rq->signaling.link,
659 i915_request_get(rq);
663 spin_unlock_irq(&b->rb_lock);
665 if (!list_empty(&list)) {
667 list_for_each_entry_safe(rq, n, &list, signaling.link) {
668 dma_fence_signal(&rq->fence);
669 GEM_BUG_ON(!i915_request_completed(rq));
670 i915_request_put(rq);
672 local_bh_enable(); /* kick start the tasklets */
675 * If the engine is saturated we may be continually
676 * processing completed requests. This angers the
677 * NMI watchdog if we never let anything else
678 * have access to the CPU. Let's pretend to be nice
679 * and relinquish the CPU if we burn through the
680 * entire RT timeslice!
682 do_schedule = need_resched();
685 if (unlikely(do_schedule)) {
686 /* Before we sleep, check for a missed seqno */
687 if (current->state & TASK_NORMAL &&
688 !list_empty(&b->signals) &&
689 engine->irq_seqno_barrier &&
690 test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
691 &engine->irq_posted)) {
692 engine->irq_seqno_barrier(engine);
693 intel_engine_wakeup(engine);
697 if (kthread_should_park())
700 if (unlikely(kthread_should_stop()))
706 __set_current_state(TASK_RUNNING);
711 static void insert_signal(struct intel_breadcrumbs *b,
712 struct i915_request *request,
715 struct i915_request *iter;
717 lockdep_assert_held(&b->rb_lock);
720 * A reasonable assumption is that we are called to add signals
721 * in sequence, as the requests are submitted for execution and
722 * assigned a global_seqno. This will be the case for the majority
723 * of internally generated signals (inter-engine signaling).
725 * Out of order waiters triggering random signaling enabling will
726 * be more problematic, but hopefully rare enough and the list
727 * small enough that the O(N) insertion sort is not an issue.
730 list_for_each_entry_reverse(iter, &b->signals, signaling.link)
731 if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
734 list_add(&request->signaling.link, &iter->signaling.link);
737 bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
739 struct intel_engine_cs *engine = request->engine;
740 struct intel_breadcrumbs *b = &engine->breadcrumbs;
741 struct intel_wait *wait = &request->signaling.wait;
745 * Note that we may be called from an interrupt handler on another
746 * device (e.g. nouveau signaling a fence completion causing us
747 * to submit a request, and so enable signaling). As such,
748 * we need to make sure that all other users of b->rb_lock protect
749 * against interrupts, i.e. use spin_lock_irqsave.
752 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
753 GEM_BUG_ON(!irqs_disabled());
754 lockdep_assert_held(&request->lock);
756 seqno = i915_request_global_seqno(request);
757 if (!seqno) /* will be enabled later upon execution */
760 GEM_BUG_ON(wait->seqno);
761 wait->tsk = b->signaler;
762 wait->request = request;
766 * Add ourselves into the list of waiters, but registering our
767 * bottom-half as the signaller thread. As per usual, only the oldest
768 * waiter (not just signaller) is tasked as the bottom-half waking
769 * up all completed waiters after the user interrupt.
771 * If we are the oldest waiter, enable the irq (after which we
772 * must double check that the seqno did not complete).
774 spin_lock(&b->rb_lock);
775 insert_signal(b, request, seqno);
776 wakeup &= __intel_engine_add_wait(engine, wait);
777 spin_unlock(&b->rb_lock);
780 wake_up_process(b->signaler);
781 return !intel_wait_complete(wait);
787 void intel_engine_cancel_signaling(struct i915_request *request)
789 struct intel_engine_cs *engine = request->engine;
790 struct intel_breadcrumbs *b = &engine->breadcrumbs;
792 GEM_BUG_ON(!irqs_disabled());
793 lockdep_assert_held(&request->lock);
795 if (!READ_ONCE(request->signaling.wait.seqno))
798 spin_lock(&b->rb_lock);
799 __intel_engine_remove_wait(engine, &request->signaling.wait);
800 if (fetch_and_zero(&request->signaling.wait.seqno))
801 __list_del_entry(&request->signaling.link);
802 spin_unlock(&b->rb_lock);
805 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
807 struct intel_breadcrumbs *b = &engine->breadcrumbs;
808 struct task_struct *tsk;
810 spin_lock_init(&b->rb_lock);
811 spin_lock_init(&b->irq_lock);
813 timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
814 timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
816 INIT_LIST_HEAD(&b->signals);
818 /* Spawn a thread to provide a common bottom-half for all signals.
819 * As this is an asynchronous interface we cannot steal the current
820 * task for handling the bottom-half to the user interrupt, therefore
821 * we create a thread to do the coherent seqno dance after the
822 * interrupt and then signal the waitqueue (via the dma-buf/fence).
824 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
825 "i915/signal:%d", engine->id);
834 static void cancel_fake_irq(struct intel_engine_cs *engine)
836 struct intel_breadcrumbs *b = &engine->breadcrumbs;
838 del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
839 del_timer_sync(&b->hangcheck);
840 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
843 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
845 struct intel_breadcrumbs *b = &engine->breadcrumbs;
848 spin_lock_irqsave(&b->irq_lock, flags);
851 * Leave the fake_irq timer enabled (if it is running), but clear the
852 * bit so that it turns itself off on its next wake up and goes back
853 * to the long hangcheck interval if still required.
855 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
863 * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
864 * GPU is active and may have already executed the MI_USER_INTERRUPT
865 * before the CPU is ready to receive. However, the engine is currently
866 * idle (we haven't started it yet), there is no possibility for a
867 * missed interrupt as we enabled the irq and so we can clear the
868 * immediate wakeup (until a real interrupt arrives for the waiter).
870 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
872 spin_unlock_irqrestore(&b->irq_lock, flags);
875 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
877 struct intel_breadcrumbs *b = &engine->breadcrumbs;
879 /* The engines should be idle and all requests accounted for! */
880 WARN_ON(READ_ONCE(b->irq_wait));
881 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
882 WARN_ON(!list_empty(&b->signals));
884 if (!IS_ERR_OR_NULL(b->signaler))
885 kthread_stop(b->signaler);
887 cancel_fake_irq(engine);
890 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
891 #include "selftests/intel_breadcrumbs.c"