1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include <linux/circ_buf.h>
8 #include "gem/i915_gem_context.h"
9 #include "gt/gen8_engine_cs.h"
10 #include "gt/intel_breadcrumbs.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_engine_pm.h"
13 #include "gt/intel_engine_heartbeat.h"
14 #include "gt/intel_gt.h"
15 #include "gt/intel_gt_irq.h"
16 #include "gt/intel_gt_pm.h"
17 #include "gt/intel_gt_requests.h"
18 #include "gt/intel_lrc.h"
19 #include "gt/intel_lrc_reg.h"
20 #include "gt/intel_mocs.h"
21 #include "gt/intel_ring.h"
23 #include "intel_guc_submission.h"
26 #include "i915_trace.h"
29 * DOC: GuC-based command submission
31 * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
32 * firmware is moving to an updated submission interface and we plan to
33 * turn submission back on when that lands. The below documentation (and related
34 * code) matches the old submission model and will be updated as part of the
35 * upgrade to the new flow.
37 * GuC stage descriptor:
38 * During initialization, the driver allocates a static pool of 1024 such
39 * descriptors, and shares them with the GuC. Currently, we only use one
40 * descriptor. This stage descriptor lets the GuC know about the workqueue and
41 * process descriptor. Theoretically, it also lets the GuC know about our HW
42 * contexts (context ID, etc...), but we actually employ a kind of submission
43 * where the GuC uses the LRCA sent via the work item instead. This is called
44 * a "proxy" submission.
46 * The Scratch registers:
47 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
48 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
49 * triggers an interrupt on the GuC via another register write (0xC4C8).
50 * Firmware writes a success/fail code back to the action register after
51 * processes the request. The kernel driver polls waiting for this update and
55 * There are several types of work items that the host may place into a
56 * workqueue, each with its own requirements and limitations. Currently only
57 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
58 * represents in-order queue. The kernel driver packs ring tail pointer and an
59 * ELSP context descriptor dword into Work Item.
60 * See guc_add_request()
64 /* GuC Virtual Engine */
65 struct guc_virtual_engine {
66 struct intel_engine_cs base;
67 struct intel_context context;
70 static struct intel_context *
71 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
73 #define GUC_REQUEST_SIZE 64 /* bytes */
76 * Below is a set of functions which control the GuC scheduling state which
79 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
80 #define SCHED_STATE_DESTROYED BIT(1)
81 #define SCHED_STATE_PENDING_DISABLE BIT(2)
82 #define SCHED_STATE_BANNED BIT(3)
83 #define SCHED_STATE_ENABLED BIT(4)
84 #define SCHED_STATE_PENDING_ENABLE BIT(5)
85 #define SCHED_STATE_REGISTERED BIT(6)
86 #define SCHED_STATE_BLOCKED_SHIFT 7
87 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
88 #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
90 static inline void init_sched_state(struct intel_context *ce)
92 lockdep_assert_held(&ce->guc_state.lock);
93 ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
97 static bool sched_state_is_init(struct intel_context *ce)
100 * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
103 return !(ce->guc_state.sched_state &=
104 ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
108 context_wait_for_deregister_to_register(struct intel_context *ce)
110 return ce->guc_state.sched_state &
111 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
115 set_context_wait_for_deregister_to_register(struct intel_context *ce)
117 lockdep_assert_held(&ce->guc_state.lock);
118 ce->guc_state.sched_state |=
119 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
123 clr_context_wait_for_deregister_to_register(struct intel_context *ce)
125 lockdep_assert_held(&ce->guc_state.lock);
126 ce->guc_state.sched_state &=
127 ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
131 context_destroyed(struct intel_context *ce)
133 return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
137 set_context_destroyed(struct intel_context *ce)
139 lockdep_assert_held(&ce->guc_state.lock);
140 ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
143 static inline bool context_pending_disable(struct intel_context *ce)
145 return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
148 static inline void set_context_pending_disable(struct intel_context *ce)
150 lockdep_assert_held(&ce->guc_state.lock);
151 ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
154 static inline void clr_context_pending_disable(struct intel_context *ce)
156 lockdep_assert_held(&ce->guc_state.lock);
157 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
160 static inline bool context_banned(struct intel_context *ce)
162 return ce->guc_state.sched_state & SCHED_STATE_BANNED;
165 static inline void set_context_banned(struct intel_context *ce)
167 lockdep_assert_held(&ce->guc_state.lock);
168 ce->guc_state.sched_state |= SCHED_STATE_BANNED;
171 static inline void clr_context_banned(struct intel_context *ce)
173 lockdep_assert_held(&ce->guc_state.lock);
174 ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
177 static inline bool context_enabled(struct intel_context *ce)
179 return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
182 static inline void set_context_enabled(struct intel_context *ce)
184 lockdep_assert_held(&ce->guc_state.lock);
185 ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
188 static inline void clr_context_enabled(struct intel_context *ce)
190 lockdep_assert_held(&ce->guc_state.lock);
191 ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
194 static inline bool context_pending_enable(struct intel_context *ce)
196 return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
199 static inline void set_context_pending_enable(struct intel_context *ce)
201 lockdep_assert_held(&ce->guc_state.lock);
202 ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
205 static inline void clr_context_pending_enable(struct intel_context *ce)
207 lockdep_assert_held(&ce->guc_state.lock);
208 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
211 static inline bool context_registered(struct intel_context *ce)
213 return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
216 static inline void set_context_registered(struct intel_context *ce)
218 lockdep_assert_held(&ce->guc_state.lock);
219 ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
222 static inline void clr_context_registered(struct intel_context *ce)
224 lockdep_assert_held(&ce->guc_state.lock);
225 ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
228 static inline u32 context_blocked(struct intel_context *ce)
230 return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
231 SCHED_STATE_BLOCKED_SHIFT;
234 static inline void incr_context_blocked(struct intel_context *ce)
236 lockdep_assert_held(&ce->guc_state.lock);
238 ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
240 GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
243 static inline void decr_context_blocked(struct intel_context *ce)
245 lockdep_assert_held(&ce->guc_state.lock);
247 GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
249 ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
252 static inline bool context_has_committed_requests(struct intel_context *ce)
254 return !!ce->guc_state.number_committed_requests;
257 static inline void incr_context_committed_requests(struct intel_context *ce)
259 lockdep_assert_held(&ce->guc_state.lock);
260 ++ce->guc_state.number_committed_requests;
261 GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
264 static inline void decr_context_committed_requests(struct intel_context *ce)
266 lockdep_assert_held(&ce->guc_state.lock);
267 --ce->guc_state.number_committed_requests;
268 GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
271 static inline bool context_guc_id_invalid(struct intel_context *ce)
273 return ce->guc_id.id == GUC_INVALID_LRC_ID;
276 static inline void set_context_guc_id_invalid(struct intel_context *ce)
278 ce->guc_id.id = GUC_INVALID_LRC_ID;
281 static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
283 return &ce->engine->gt->uc.guc;
286 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
288 return rb_entry(rb, struct i915_priolist, node);
291 static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
293 struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
295 GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS);
300 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
302 struct intel_context *ce = xa_load(&guc->context_lookup, id);
304 GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS);
309 static int guc_lrc_desc_pool_create(struct intel_guc *guc)
314 size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) *
315 GUC_MAX_LRC_DESCRIPTORS);
316 ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
317 (void **)&guc->lrc_desc_pool_vaddr);
324 static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
326 guc->lrc_desc_pool_vaddr = NULL;
327 i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
330 static inline bool guc_submission_initialized(struct intel_guc *guc)
332 return !!guc->lrc_desc_pool_vaddr;
335 static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
337 if (likely(guc_submission_initialized(guc))) {
338 struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
341 memset(desc, 0, sizeof(*desc));
344 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
345 * the lower level functions directly.
347 xa_lock_irqsave(&guc->context_lookup, flags);
348 __xa_erase(&guc->context_lookup, id);
349 xa_unlock_irqrestore(&guc->context_lookup, flags);
353 static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
355 return __get_context(guc, id);
358 static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
359 struct intel_context *ce)
364 * xarray API doesn't have xa_save_irqsave wrapper, so calling the
365 * lower level functions directly.
367 xa_lock_irqsave(&guc->context_lookup, flags);
368 __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
369 xa_unlock_irqrestore(&guc->context_lookup, flags);
372 static void decr_outstanding_submission_g2h(struct intel_guc *guc)
374 if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
375 wake_up_all(&guc->ct.wq);
378 static int guc_submission_send_busy_loop(struct intel_guc *guc,
385 * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
386 * so we don't handle the case where we don't get a reply because we
387 * aborted the send due to the channel being busy.
389 GEM_BUG_ON(g2h_len_dw && !loop);
392 atomic_inc(&guc->outstanding_submission_g2h);
394 return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
397 int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
402 const int state = interruptible ?
403 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
407 GEM_BUG_ON(timeout < 0);
409 if (!atomic_read(wait_var))
416 prepare_to_wait(&guc->ct.wq, &wait, state);
418 if (!atomic_read(wait_var))
421 if (signal_pending_state(state, current)) {
431 timeout = io_schedule_timeout(timeout);
433 finish_wait(&guc->ct.wq, &wait);
435 return (timeout < 0) ? timeout : 0;
438 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
440 if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
443 return intel_guc_wait_for_pending_msg(guc,
444 &guc->outstanding_submission_g2h,
448 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
450 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
453 struct intel_context *ce = rq->context;
459 lockdep_assert_held(&rq->engine->sched_engine->lock);
462 * Corner case where requests were sitting in the priority list or a
463 * request resubmitted after the context was banned.
465 if (unlikely(intel_context_is_banned(ce))) {
466 i915_request_put(i915_request_mark_eio(rq));
467 intel_engine_signal_breadcrumbs(ce->engine);
471 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
472 GEM_BUG_ON(context_guc_id_invalid(ce));
475 * Corner case where the GuC firmware was blown away and reloaded while
476 * this context was pinned.
478 if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id))) {
479 err = guc_lrc_desc_pin(ce, false);
484 spin_lock(&ce->guc_state.lock);
487 * The request / context will be run on the hardware when scheduling
488 * gets enabled in the unblock.
490 if (unlikely(context_blocked(ce)))
493 enabled = context_enabled(ce);
496 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
497 action[len++] = ce->guc_id.id;
498 action[len++] = GUC_CONTEXT_ENABLE;
499 set_context_pending_enable(ce);
500 intel_context_get(ce);
501 g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
503 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
504 action[len++] = ce->guc_id.id;
507 err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
508 if (!enabled && !err) {
509 trace_intel_context_sched_enable(ce);
510 atomic_inc(&guc->outstanding_submission_g2h);
511 set_context_enabled(ce);
512 } else if (!enabled) {
513 clr_context_pending_enable(ce);
514 intel_context_put(ce);
517 trace_i915_request_guc_submit(rq);
520 spin_unlock(&ce->guc_state.lock);
524 static inline void guc_set_lrc_tail(struct i915_request *rq)
526 rq->context->lrc_reg_state[CTX_RING_TAIL] =
527 intel_ring_set_tail(rq->ring, rq->tail);
530 static inline int rq_prio(const struct i915_request *rq)
532 return rq->sched.attr.priority;
535 static int guc_dequeue_one_context(struct intel_guc *guc)
537 struct i915_sched_engine * const sched_engine = guc->sched_engine;
538 struct i915_request *last = NULL;
543 lockdep_assert_held(&sched_engine->lock);
545 if (guc->stalled_request) {
547 last = guc->stalled_request;
551 while ((rb = rb_first_cached(&sched_engine->queue))) {
552 struct i915_priolist *p = to_priolist(rb);
553 struct i915_request *rq, *rn;
555 priolist_for_each_request_consume(rq, rn, p) {
556 if (last && rq->context != last->context)
559 list_del_init(&rq->sched.link);
561 __i915_request_submit(rq);
563 trace_i915_request_in(rq, 0);
568 rb_erase_cached(&p->node, &sched_engine->queue);
569 i915_priolist_free(p);
573 guc_set_lrc_tail(last);
575 ret = guc_add_request(guc, last);
576 if (unlikely(ret == -EPIPE))
578 else if (ret == -EBUSY) {
579 tasklet_schedule(&sched_engine->tasklet);
580 guc->stalled_request = last;
585 guc->stalled_request = NULL;
589 sched_engine->tasklet.callback = NULL;
590 tasklet_disable_nosync(&sched_engine->tasklet);
594 static void guc_submission_tasklet(struct tasklet_struct *t)
596 struct i915_sched_engine *sched_engine =
597 from_tasklet(sched_engine, t, tasklet);
601 spin_lock_irqsave(&sched_engine->lock, flags);
604 loop = guc_dequeue_one_context(sched_engine->private_data);
607 i915_sched_engine_reset_on_empty(sched_engine);
609 spin_unlock_irqrestore(&sched_engine->lock, flags);
612 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
614 if (iir & GT_RENDER_USER_INTERRUPT)
615 intel_engine_signal_breadcrumbs(engine);
618 static void __guc_context_destroy(struct intel_context *ce);
619 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
620 static void guc_signal_context_fence(struct intel_context *ce);
621 static void guc_cancel_context_requests(struct intel_context *ce);
622 static void guc_blocked_fence_complete(struct intel_context *ce);
624 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
626 struct intel_context *ce;
627 unsigned long index, flags;
628 bool pending_disable, pending_enable, deregister, destroyed, banned;
630 xa_lock_irqsave(&guc->context_lookup, flags);
631 xa_for_each(&guc->context_lookup, index, ce) {
633 * Corner case where the ref count on the object is zero but and
634 * deregister G2H was lost. In this case we don't touch the ref
635 * count and finish the destroy of the context.
637 bool do_put = kref_get_unless_zero(&ce->ref);
639 xa_unlock(&guc->context_lookup);
641 spin_lock(&ce->guc_state.lock);
644 * Once we are at this point submission_disabled() is guaranteed
645 * to be visible to all callers who set the below flags (see above
646 * flush and flushes in reset_prepare). If submission_disabled()
647 * is set, the caller shouldn't set these flags.
650 destroyed = context_destroyed(ce);
651 pending_enable = context_pending_enable(ce);
652 pending_disable = context_pending_disable(ce);
653 deregister = context_wait_for_deregister_to_register(ce);
654 banned = context_banned(ce);
655 init_sched_state(ce);
657 spin_unlock(&ce->guc_state.lock);
659 GEM_BUG_ON(!do_put && !destroyed);
661 if (pending_enable || destroyed || deregister) {
662 decr_outstanding_submission_g2h(guc);
664 guc_signal_context_fence(ce);
666 release_guc_id(guc, ce);
667 __guc_context_destroy(ce);
669 if (pending_enable || deregister)
670 intel_context_put(ce);
673 /* Not mutualy exclusive with above if statement. */
674 if (pending_disable) {
675 guc_signal_context_fence(ce);
677 guc_cancel_context_requests(ce);
678 intel_engine_signal_breadcrumbs(ce->engine);
680 intel_context_sched_disable_unpin(ce);
681 decr_outstanding_submission_g2h(guc);
683 spin_lock(&ce->guc_state.lock);
684 guc_blocked_fence_complete(ce);
685 spin_unlock(&ce->guc_state.lock);
687 intel_context_put(ce);
691 intel_context_put(ce);
692 xa_lock(&guc->context_lookup);
694 xa_unlock_irqrestore(&guc->context_lookup, flags);
698 submission_disabled(struct intel_guc *guc)
700 struct i915_sched_engine * const sched_engine = guc->sched_engine;
702 return unlikely(!sched_engine ||
703 !__tasklet_is_enabled(&sched_engine->tasklet));
706 static void disable_submission(struct intel_guc *guc)
708 struct i915_sched_engine * const sched_engine = guc->sched_engine;
710 if (__tasklet_is_enabled(&sched_engine->tasklet)) {
711 GEM_BUG_ON(!guc->ct.enabled);
712 __tasklet_disable_sync_once(&sched_engine->tasklet);
713 sched_engine->tasklet.callback = NULL;
717 static void enable_submission(struct intel_guc *guc)
719 struct i915_sched_engine * const sched_engine = guc->sched_engine;
722 spin_lock_irqsave(&guc->sched_engine->lock, flags);
723 sched_engine->tasklet.callback = guc_submission_tasklet;
724 wmb(); /* Make sure callback visible */
725 if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
726 __tasklet_enable(&sched_engine->tasklet)) {
727 GEM_BUG_ON(!guc->ct.enabled);
729 /* And kick in case we missed a new request submission. */
730 tasklet_hi_schedule(&sched_engine->tasklet);
732 spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
735 static void guc_flush_submissions(struct intel_guc *guc)
737 struct i915_sched_engine * const sched_engine = guc->sched_engine;
740 spin_lock_irqsave(&sched_engine->lock, flags);
741 spin_unlock_irqrestore(&sched_engine->lock, flags);
744 void intel_guc_submission_reset_prepare(struct intel_guc *guc)
748 if (unlikely(!guc_submission_initialized(guc))) {
749 /* Reset called during driver load? GuC not yet initialised! */
753 intel_gt_park_heartbeats(guc_to_gt(guc));
754 disable_submission(guc);
755 guc->interrupts.disable(guc);
757 /* Flush IRQ handler */
758 spin_lock_irq(&guc_to_gt(guc)->irq_lock);
759 spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
761 guc_flush_submissions(guc);
764 * Handle any outstanding G2Hs before reset. Call IRQ handler directly
765 * each pass as interrupt have been disabled. We always scrub for
766 * outstanding G2H as it is possible for outstanding_submission_g2h to
767 * be incremented after the context state update.
769 for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) {
770 intel_guc_to_host_event_handler(guc);
771 #define wait_for_reset(guc, wait_var) \
772 intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20))
774 wait_for_reset(guc, &guc->outstanding_submission_g2h);
775 } while (!list_empty(&guc->ct.requests.incoming));
778 scrub_guc_desc_for_outstanding_g2h(guc);
781 static struct intel_engine_cs *
782 guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
784 struct intel_engine_cs *engine;
785 intel_engine_mask_t tmp, mask = ve->mask;
786 unsigned int num_siblings = 0;
788 for_each_engine_masked(engine, ve->gt, mask, tmp)
789 if (num_siblings++ == sibling)
795 static inline struct intel_engine_cs *
796 __context_to_physical_engine(struct intel_context *ce)
798 struct intel_engine_cs *engine = ce->engine;
800 if (intel_engine_is_virtual(engine))
801 engine = guc_virtual_get_sibling(engine, 0);
806 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
808 struct intel_engine_cs *engine = __context_to_physical_engine(ce);
810 if (intel_context_is_banned(ce))
813 GEM_BUG_ON(!intel_context_is_pinned(ce));
816 * We want a simple context + ring to execute the breadcrumb update.
817 * We cannot rely on the context being intact across the GPU hang,
818 * so clear it and rebuild just what we need for the breadcrumb.
819 * All pending requests for this context will be zapped, and any
820 * future request will be after userspace has had the opportunity
821 * to recreate its own state.
824 lrc_init_regs(ce, engine, true);
826 /* Rerun the request; its payload has been neutered (if guilty). */
827 lrc_update_regs(ce, engine, head);
830 static void guc_reset_nop(struct intel_engine_cs *engine)
834 static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
839 __unwind_incomplete_requests(struct intel_context *ce)
841 struct i915_request *rq, *rn;
842 struct list_head *pl;
843 int prio = I915_PRIORITY_INVALID;
844 struct i915_sched_engine * const sched_engine =
845 ce->engine->sched_engine;
848 spin_lock_irqsave(&sched_engine->lock, flags);
849 spin_lock(&ce->guc_active.lock);
850 list_for_each_entry_safe_reverse(rq, rn,
851 &ce->guc_active.requests,
853 if (i915_request_completed(rq))
856 list_del_init(&rq->sched.link);
857 __i915_request_unsubmit(rq);
859 /* Push the request back into the queue for later resubmission. */
860 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
861 if (rq_prio(rq) != prio) {
863 pl = i915_sched_lookup_priolist(sched_engine, prio);
865 GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
867 list_add(&rq->sched.link, pl);
868 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
870 spin_unlock(&ce->guc_active.lock);
871 spin_unlock_irqrestore(&sched_engine->lock, flags);
874 static void __guc_reset_context(struct intel_context *ce, bool stalled)
876 struct i915_request *rq;
881 intel_context_get(ce);
884 * GuC will implicitly mark the context as non-schedulable when it sends
885 * the reset notification. Make sure our state reflects this change. The
886 * context will be marked enabled on resubmission.
888 * XXX: If the context is reset as a result of the request cancellation
889 * this G2H is received after the schedule disable complete G2H which is
890 * wrong as this creates a race between the request cancellation code
891 * re-submitting the context and this G2H handler. This is a bug in the
892 * GuC but can be worked around in the meantime but converting this to a
893 * NOP if a pending enable is in flight as this indicates that a request
894 * cancellation has occurred.
896 spin_lock_irqsave(&ce->guc_state.lock, flags);
897 if (likely(!context_pending_enable(ce)))
898 clr_context_enabled(ce);
901 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
905 rq = intel_context_find_active_request(ce);
907 head = ce->ring->tail;
912 if (!i915_request_started(rq))
915 GEM_BUG_ON(i915_active_is_idle(&ce->active));
916 head = intel_ring_wrap(ce->ring, rq->head);
917 __i915_request_reset(rq, stalled);
920 guc_reset_state(ce, head, stalled);
921 __unwind_incomplete_requests(ce);
923 intel_context_put(ce);
926 void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
928 struct intel_context *ce;
932 if (unlikely(!guc_submission_initialized(guc))) {
933 /* Reset called during driver load? GuC not yet initialised! */
937 xa_lock_irqsave(&guc->context_lookup, flags);
938 xa_for_each(&guc->context_lookup, index, ce) {
939 if (!kref_get_unless_zero(&ce->ref))
942 xa_unlock(&guc->context_lookup);
944 if (intel_context_is_pinned(ce))
945 __guc_reset_context(ce, stalled);
947 intel_context_put(ce);
949 xa_lock(&guc->context_lookup);
951 xa_unlock_irqrestore(&guc->context_lookup, flags);
953 /* GuC is blown away, drop all references to contexts */
954 xa_destroy(&guc->context_lookup);
957 static void guc_cancel_context_requests(struct intel_context *ce)
959 struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
960 struct i915_request *rq;
963 /* Mark all executing requests as skipped. */
964 spin_lock_irqsave(&sched_engine->lock, flags);
965 spin_lock(&ce->guc_active.lock);
966 list_for_each_entry(rq, &ce->guc_active.requests, sched.link)
967 i915_request_put(i915_request_mark_eio(rq));
968 spin_unlock(&ce->guc_active.lock);
969 spin_unlock_irqrestore(&sched_engine->lock, flags);
973 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
975 struct i915_request *rq, *rn;
979 /* Can be called during boot if GuC fails to load */
984 * Before we call engine->cancel_requests(), we should have exclusive
985 * access to the submission state. This is arranged for us by the
986 * caller disabling the interrupt generation, the tasklet and other
987 * threads that may then access the same state, giving us a free hand
988 * to reset state. However, we still need to let lockdep be aware that
989 * we know this state may be accessed in hardirq context, so we
990 * disable the irq around this manipulation and we want to keep
991 * the spinlock focused on its duties and not accidentally conflate
992 * coverage to the submission's irq state. (Similarly, although we
993 * shouldn't need to disable irq around the manipulation of the
994 * submission's irq state, we also wish to remind ourselves that
997 spin_lock_irqsave(&sched_engine->lock, flags);
999 /* Flush the queued requests to the timeline list (for retiring). */
1000 while ((rb = rb_first_cached(&sched_engine->queue))) {
1001 struct i915_priolist *p = to_priolist(rb);
1003 priolist_for_each_request_consume(rq, rn, p) {
1004 list_del_init(&rq->sched.link);
1006 __i915_request_submit(rq);
1008 i915_request_put(i915_request_mark_eio(rq));
1011 rb_erase_cached(&p->node, &sched_engine->queue);
1012 i915_priolist_free(p);
1015 /* Remaining _unready_ requests will be nop'ed when submitted */
1017 sched_engine->queue_priority_hint = INT_MIN;
1018 sched_engine->queue = RB_ROOT_CACHED;
1020 spin_unlock_irqrestore(&sched_engine->lock, flags);
1023 void intel_guc_submission_cancel_requests(struct intel_guc *guc)
1025 struct intel_context *ce;
1026 unsigned long index;
1027 unsigned long flags;
1029 xa_lock_irqsave(&guc->context_lookup, flags);
1030 xa_for_each(&guc->context_lookup, index, ce) {
1031 if (!kref_get_unless_zero(&ce->ref))
1034 xa_unlock(&guc->context_lookup);
1036 if (intel_context_is_pinned(ce))
1037 guc_cancel_context_requests(ce);
1039 intel_context_put(ce);
1041 xa_lock(&guc->context_lookup);
1043 xa_unlock_irqrestore(&guc->context_lookup, flags);
1045 guc_cancel_sched_engine_requests(guc->sched_engine);
1047 /* GuC is blown away, drop all references to contexts */
1048 xa_destroy(&guc->context_lookup);
1051 void intel_guc_submission_reset_finish(struct intel_guc *guc)
1053 /* Reset called during driver load or during wedge? */
1054 if (unlikely(!guc_submission_initialized(guc) ||
1055 test_bit(I915_WEDGED, &guc_to_gt(guc)->reset.flags))) {
1060 * Technically possible for either of these values to be non-zero here,
1061 * but very unlikely + harmless. Regardless let's add a warn so we can
1062 * see in CI if this happens frequently / a precursor to taking down the
1065 GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
1066 atomic_set(&guc->outstanding_submission_g2h, 0);
1068 intel_guc_global_policies_update(guc);
1069 enable_submission(guc);
1070 intel_gt_unpark_heartbeats(guc_to_gt(guc));
1074 * Set up the memory resources to be shared with the GuC (via the GGTT)
1075 * at firmware loading time.
1077 int intel_guc_submission_init(struct intel_guc *guc)
1081 if (guc->lrc_desc_pool)
1084 ret = guc_lrc_desc_pool_create(guc);
1088 * Keep static analysers happy, let them know that we allocated the
1089 * vma after testing that it didn't exist earlier.
1091 GEM_BUG_ON(!guc->lrc_desc_pool);
1093 xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
1095 spin_lock_init(&guc->contexts_lock);
1096 INIT_LIST_HEAD(&guc->guc_id_list);
1097 ida_init(&guc->guc_ids);
1102 void intel_guc_submission_fini(struct intel_guc *guc)
1104 if (!guc->lrc_desc_pool)
1107 guc_lrc_desc_pool_destroy(guc);
1108 i915_sched_engine_put(guc->sched_engine);
1111 static inline void queue_request(struct i915_sched_engine *sched_engine,
1112 struct i915_request *rq,
1115 GEM_BUG_ON(!list_empty(&rq->sched.link));
1116 list_add_tail(&rq->sched.link,
1117 i915_sched_lookup_priolist(sched_engine, prio));
1118 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1119 tasklet_hi_schedule(&sched_engine->tasklet);
1122 static int guc_bypass_tasklet_submit(struct intel_guc *guc,
1123 struct i915_request *rq)
1127 __i915_request_submit(rq);
1129 trace_i915_request_in(rq, 0);
1131 guc_set_lrc_tail(rq);
1132 ret = guc_add_request(guc, rq);
1134 guc->stalled_request = rq;
1136 if (unlikely(ret == -EPIPE))
1137 disable_submission(guc);
1142 static void guc_submit_request(struct i915_request *rq)
1144 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1145 struct intel_guc *guc = &rq->engine->gt->uc.guc;
1146 unsigned long flags;
1148 /* Will be called from irq-context when using foreign fences. */
1149 spin_lock_irqsave(&sched_engine->lock, flags);
1151 if (submission_disabled(guc) || guc->stalled_request ||
1152 !i915_sched_engine_is_empty(sched_engine))
1153 queue_request(sched_engine, rq, rq_prio(rq));
1154 else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
1155 tasklet_hi_schedule(&sched_engine->tasklet);
1157 spin_unlock_irqrestore(&sched_engine->lock, flags);
1160 static int new_guc_id(struct intel_guc *guc)
1162 return ida_simple_get(&guc->guc_ids, 0,
1163 GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL |
1164 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
1167 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1169 if (!context_guc_id_invalid(ce)) {
1170 ida_simple_remove(&guc->guc_ids, ce->guc_id.id);
1171 reset_lrc_desc(guc, ce->guc_id.id);
1172 set_context_guc_id_invalid(ce);
1174 if (!list_empty(&ce->guc_id.link))
1175 list_del_init(&ce->guc_id.link);
1178 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1180 unsigned long flags;
1182 spin_lock_irqsave(&guc->contexts_lock, flags);
1183 __release_guc_id(guc, ce);
1184 spin_unlock_irqrestore(&guc->contexts_lock, flags);
1187 static int steal_guc_id(struct intel_guc *guc)
1189 struct intel_context *ce;
1192 lockdep_assert_held(&guc->contexts_lock);
1194 if (!list_empty(&guc->guc_id_list)) {
1195 ce = list_first_entry(&guc->guc_id_list,
1196 struct intel_context,
1199 GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
1200 GEM_BUG_ON(context_guc_id_invalid(ce));
1202 list_del_init(&ce->guc_id.link);
1203 guc_id = ce->guc_id.id;
1205 spin_lock(&ce->guc_state.lock);
1206 clr_context_registered(ce);
1207 spin_unlock(&ce->guc_state.lock);
1209 set_context_guc_id_invalid(ce);
1216 static int assign_guc_id(struct intel_guc *guc, u16 *out)
1220 lockdep_assert_held(&guc->contexts_lock);
1222 ret = new_guc_id(guc);
1223 if (unlikely(ret < 0)) {
1224 ret = steal_guc_id(guc);
1233 #define PIN_GUC_ID_TRIES 4
1234 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
1237 unsigned long flags, tries = PIN_GUC_ID_TRIES;
1239 GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
1242 spin_lock_irqsave(&guc->contexts_lock, flags);
1244 might_lock(&ce->guc_state.lock);
1246 if (context_guc_id_invalid(ce)) {
1247 ret = assign_guc_id(guc, &ce->guc_id.id);
1250 ret = 1; /* Indidcates newly assigned guc_id */
1252 if (!list_empty(&ce->guc_id.link))
1253 list_del_init(&ce->guc_id.link);
1254 atomic_inc(&ce->guc_id.ref);
1257 spin_unlock_irqrestore(&guc->contexts_lock, flags);
1260 * -EAGAIN indicates no guc_id are available, let's retire any
1261 * outstanding requests to see if that frees up a guc_id. If the first
1262 * retire didn't help, insert a sleep with the timeslice duration before
1263 * attempting to retire more requests. Double the sleep period each
1264 * subsequent pass before finally giving up. The sleep period has max of
1265 * 100ms and minimum of 1ms.
1267 if (ret == -EAGAIN && --tries) {
1268 if (PIN_GUC_ID_TRIES - tries > 1) {
1269 unsigned int timeslice_shifted =
1270 ce->engine->props.timeslice_duration_ms <<
1271 (PIN_GUC_ID_TRIES - tries - 2);
1272 unsigned int max = min_t(unsigned int, 100,
1275 msleep(max_t(unsigned int, max, 1));
1277 intel_gt_retire_requests(guc_to_gt(guc));
1284 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
1286 unsigned long flags;
1288 GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
1290 if (unlikely(context_guc_id_invalid(ce)))
1293 spin_lock_irqsave(&guc->contexts_lock, flags);
1294 if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
1295 !atomic_read(&ce->guc_id.ref))
1296 list_add_tail(&ce->guc_id.link, &guc->guc_id_list);
1297 spin_unlock_irqrestore(&guc->contexts_lock, flags);
1300 static int __guc_action_register_context(struct intel_guc *guc,
1306 INTEL_GUC_ACTION_REGISTER_CONTEXT,
1311 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
1315 static int register_context(struct intel_context *ce, bool loop)
1317 struct intel_guc *guc = ce_to_guc(ce);
1318 u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
1319 ce->guc_id.id * sizeof(struct guc_lrc_desc);
1322 trace_intel_context_register(ce);
1324 ret = __guc_action_register_context(guc, ce->guc_id.id, offset, loop);
1326 unsigned long flags;
1328 spin_lock_irqsave(&ce->guc_state.lock, flags);
1329 set_context_registered(ce);
1330 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1336 static int __guc_action_deregister_context(struct intel_guc *guc,
1340 INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
1344 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
1345 G2H_LEN_DW_DEREGISTER_CONTEXT,
1349 static int deregister_context(struct intel_context *ce, u32 guc_id)
1351 struct intel_guc *guc = ce_to_guc(ce);
1353 trace_intel_context_deregister(ce);
1355 return __guc_action_deregister_context(guc, guc_id);
1358 static intel_engine_mask_t adjust_engine_mask(u8 class, intel_engine_mask_t mask)
1362 return mask >> RCS0;
1363 case VIDEO_ENHANCEMENT_CLASS:
1364 return mask >> VECS0;
1365 case VIDEO_DECODE_CLASS:
1366 return mask >> VCS0;
1367 case COPY_ENGINE_CLASS:
1368 return mask >> BCS0;
1370 MISSING_CASE(class);
1375 static void guc_context_policy_init(struct intel_engine_cs *engine,
1376 struct guc_lrc_desc *desc)
1378 desc->policy_flags = 0;
1380 if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
1381 desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE;
1383 /* NB: For both of these, zero means disabled. */
1384 desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
1385 desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
1388 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
1390 struct intel_engine_cs *engine = ce->engine;
1391 struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
1392 struct intel_guc *guc = &engine->gt->uc.guc;
1393 u32 desc_idx = ce->guc_id.id;
1394 struct guc_lrc_desc *desc;
1395 bool context_registered;
1396 intel_wakeref_t wakeref;
1399 GEM_BUG_ON(!engine->mask);
1400 GEM_BUG_ON(!sched_state_is_init(ce));
1403 * Ensure LRC + CT vmas are is same region as write barrier is done
1404 * based on CT vma region.
1406 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
1407 i915_gem_object_is_lmem(ce->ring->vma->obj));
1409 context_registered = lrc_desc_registered(guc, desc_idx);
1411 reset_lrc_desc(guc, desc_idx);
1412 set_lrc_desc_registered(guc, desc_idx, ce);
1414 desc = __get_lrc_desc(guc, desc_idx);
1415 desc->engine_class = engine_class_to_guc_class(engine->class);
1416 desc->engine_submit_mask = adjust_engine_mask(engine->class,
1418 desc->hw_context_desc = ce->lrc.lrca;
1419 desc->priority = ce->guc_active.prio;
1420 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
1421 guc_context_policy_init(engine, desc);
1424 * The context_lookup xarray is used to determine if the hardware
1425 * context is currently registered. There are two cases in which it
1426 * could be registered either the guc_id has been stolen from another
1427 * context or the lrc descriptor address of this context has changed. In
1428 * either case the context needs to be deregistered with the GuC before
1429 * registering this context.
1431 if (context_registered) {
1433 unsigned long flags;
1435 trace_intel_context_steal_guc_id(ce);
1438 /* Seal race with Reset */
1439 spin_lock_irqsave(&ce->guc_state.lock, flags);
1440 disabled = submission_disabled(guc);
1441 if (likely(!disabled)) {
1442 set_context_wait_for_deregister_to_register(ce);
1443 intel_context_get(ce);
1445 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1446 if (unlikely(disabled)) {
1447 reset_lrc_desc(guc, desc_idx);
1448 return 0; /* Will get registered later */
1452 * If stealing the guc_id, this ce has the same guc_id as the
1453 * context whose guc_id was stolen.
1455 with_intel_runtime_pm(runtime_pm, wakeref)
1456 ret = deregister_context(ce, ce->guc_id.id);
1457 if (unlikely(ret == -ENODEV))
1458 ret = 0; /* Will get registered later */
1460 with_intel_runtime_pm(runtime_pm, wakeref)
1461 ret = register_context(ce, loop);
1462 if (unlikely(ret == -EBUSY)) {
1463 reset_lrc_desc(guc, desc_idx);
1464 } else if (unlikely(ret == -ENODEV)) {
1465 reset_lrc_desc(guc, desc_idx);
1466 ret = 0; /* Will get registered later */
1473 static int __guc_context_pre_pin(struct intel_context *ce,
1474 struct intel_engine_cs *engine,
1475 struct i915_gem_ww_ctx *ww,
1478 return lrc_pre_pin(ce, engine, ww, vaddr);
1481 static int __guc_context_pin(struct intel_context *ce,
1482 struct intel_engine_cs *engine,
1485 if (i915_ggtt_offset(ce->state) !=
1486 (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
1487 set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
1490 * GuC context gets pinned in guc_request_alloc. See that function for
1491 * explaination of why.
1494 return lrc_pin(ce, engine, vaddr);
1497 static int guc_context_pre_pin(struct intel_context *ce,
1498 struct i915_gem_ww_ctx *ww,
1501 return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
1504 static int guc_context_pin(struct intel_context *ce, void *vaddr)
1506 return __guc_context_pin(ce, ce->engine, vaddr);
1509 static void guc_context_unpin(struct intel_context *ce)
1511 struct intel_guc *guc = ce_to_guc(ce);
1513 unpin_guc_id(guc, ce);
1517 static void guc_context_post_unpin(struct intel_context *ce)
1522 static void __guc_context_sched_enable(struct intel_guc *guc,
1523 struct intel_context *ce)
1526 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
1531 trace_intel_context_sched_enable(ce);
1533 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
1534 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
1537 static void __guc_context_sched_disable(struct intel_guc *guc,
1538 struct intel_context *ce,
1542 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
1543 guc_id, /* ce->guc_id.id not stable */
1547 GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
1549 trace_intel_context_sched_disable(ce);
1551 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
1552 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
1555 static void guc_blocked_fence_complete(struct intel_context *ce)
1557 lockdep_assert_held(&ce->guc_state.lock);
1559 if (!i915_sw_fence_done(&ce->guc_state.blocked))
1560 i915_sw_fence_complete(&ce->guc_state.blocked);
1563 static void guc_blocked_fence_reinit(struct intel_context *ce)
1565 lockdep_assert_held(&ce->guc_state.lock);
1566 GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
1569 * This fence is always complete unless a pending schedule disable is
1570 * outstanding. We arm the fence here and complete it when we receive
1571 * the pending schedule disable complete message.
1573 i915_sw_fence_fini(&ce->guc_state.blocked);
1574 i915_sw_fence_reinit(&ce->guc_state.blocked);
1575 i915_sw_fence_await(&ce->guc_state.blocked);
1576 i915_sw_fence_commit(&ce->guc_state.blocked);
1579 static u16 prep_context_pending_disable(struct intel_context *ce)
1581 lockdep_assert_held(&ce->guc_state.lock);
1583 set_context_pending_disable(ce);
1584 clr_context_enabled(ce);
1585 guc_blocked_fence_reinit(ce);
1586 intel_context_get(ce);
1588 return ce->guc_id.id;
1591 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
1593 struct intel_guc *guc = ce_to_guc(ce);
1594 unsigned long flags;
1595 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
1596 intel_wakeref_t wakeref;
1600 spin_lock_irqsave(&ce->guc_state.lock, flags);
1602 incr_context_blocked(ce);
1604 enabled = context_enabled(ce);
1605 if (unlikely(!enabled || submission_disabled(guc))) {
1607 clr_context_enabled(ce);
1608 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1609 return &ce->guc_state.blocked;
1613 * We add +2 here as the schedule disable complete CTB handler calls
1614 * intel_context_sched_disable_unpin (-2 to pin_count).
1616 atomic_add(2, &ce->pin_count);
1618 guc_id = prep_context_pending_disable(ce);
1620 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1622 with_intel_runtime_pm(runtime_pm, wakeref)
1623 __guc_context_sched_disable(guc, ce, guc_id);
1625 return &ce->guc_state.blocked;
1628 #define SCHED_STATE_MULTI_BLOCKED_MASK \
1629 (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
1630 #define SCHED_STATE_NO_UNBLOCK \
1631 (SCHED_STATE_MULTI_BLOCKED_MASK | \
1632 SCHED_STATE_PENDING_DISABLE | \
1635 static bool context_cant_unblock(struct intel_context *ce)
1637 lockdep_assert_held(&ce->guc_state.lock);
1639 return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
1640 context_guc_id_invalid(ce) ||
1641 !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
1642 !intel_context_is_pinned(ce);
1645 static void guc_context_unblock(struct intel_context *ce)
1647 struct intel_guc *guc = ce_to_guc(ce);
1648 unsigned long flags;
1649 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
1650 intel_wakeref_t wakeref;
1653 GEM_BUG_ON(context_enabled(ce));
1655 spin_lock_irqsave(&ce->guc_state.lock, flags);
1657 if (unlikely(submission_disabled(guc) ||
1658 context_cant_unblock(ce))) {
1662 set_context_pending_enable(ce);
1663 set_context_enabled(ce);
1664 intel_context_get(ce);
1667 decr_context_blocked(ce);
1669 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1672 with_intel_runtime_pm(runtime_pm, wakeref)
1673 __guc_context_sched_enable(guc, ce);
1677 static void guc_context_cancel_request(struct intel_context *ce,
1678 struct i915_request *rq)
1680 if (i915_sw_fence_signaled(&rq->submit)) {
1681 struct i915_sw_fence *fence;
1683 intel_context_get(ce);
1684 fence = guc_context_block(ce);
1685 i915_sw_fence_wait(fence);
1686 if (!i915_request_completed(rq)) {
1687 __i915_request_skip(rq);
1688 guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
1693 * XXX: Racey if context is reset, see comment in
1694 * __guc_reset_context().
1696 flush_work(&ce_to_guc(ce)->ct.requests.worker);
1698 guc_context_unblock(ce);
1699 intel_context_put(ce);
1703 static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
1705 u32 preemption_timeout)
1708 INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
1713 intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
1716 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
1718 struct intel_guc *guc = ce_to_guc(ce);
1719 struct intel_runtime_pm *runtime_pm =
1720 &ce->engine->gt->i915->runtime_pm;
1721 intel_wakeref_t wakeref;
1722 unsigned long flags;
1724 guc_flush_submissions(guc);
1726 spin_lock_irqsave(&ce->guc_state.lock, flags);
1727 set_context_banned(ce);
1729 if (submission_disabled(guc) ||
1730 (!context_enabled(ce) && !context_pending_disable(ce))) {
1731 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1733 guc_cancel_context_requests(ce);
1734 intel_engine_signal_breadcrumbs(ce->engine);
1735 } else if (!context_pending_disable(ce)) {
1739 * We add +2 here as the schedule disable complete CTB handler
1740 * calls intel_context_sched_disable_unpin (-2 to pin_count).
1742 atomic_add(2, &ce->pin_count);
1744 guc_id = prep_context_pending_disable(ce);
1745 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1748 * In addition to disabling scheduling, set the preemption
1749 * timeout to the minimum value (1 us) so the banned context
1750 * gets kicked off the HW ASAP.
1752 with_intel_runtime_pm(runtime_pm, wakeref) {
1753 __guc_context_set_preemption_timeout(guc, guc_id, 1);
1754 __guc_context_sched_disable(guc, ce, guc_id);
1757 if (!context_guc_id_invalid(ce))
1758 with_intel_runtime_pm(runtime_pm, wakeref)
1759 __guc_context_set_preemption_timeout(guc,
1762 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1766 static void guc_context_sched_disable(struct intel_context *ce)
1768 struct intel_guc *guc = ce_to_guc(ce);
1769 unsigned long flags;
1770 struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
1771 intel_wakeref_t wakeref;
1774 spin_lock_irqsave(&ce->guc_state.lock, flags);
1777 * We have to check if the context has been disabled by another thread,
1778 * check if submssion has been disabled to seal a race with reset and
1779 * finally check if any more requests have been committed to the
1780 * context ensursing that a request doesn't slip through the
1781 * 'context_pending_disable' fence.
1783 if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
1784 context_has_committed_requests(ce))) {
1785 clr_context_enabled(ce);
1786 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1789 guc_id = prep_context_pending_disable(ce);
1791 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1793 with_intel_runtime_pm(runtime_pm, wakeref)
1794 __guc_context_sched_disable(guc, ce, guc_id);
1798 intel_context_sched_disable_unpin(ce);
1801 static inline void guc_lrc_desc_unpin(struct intel_context *ce)
1803 struct intel_guc *guc = ce_to_guc(ce);
1805 GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
1806 GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
1807 GEM_BUG_ON(context_enabled(ce));
1809 deregister_context(ce, ce->guc_id.id);
1812 static void __guc_context_destroy(struct intel_context *ce)
1814 GEM_BUG_ON(ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
1815 ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
1816 ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
1817 ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
1818 GEM_BUG_ON(ce->guc_state.number_committed_requests);
1821 intel_context_fini(ce);
1823 if (intel_engine_is_virtual(ce->engine)) {
1824 struct guc_virtual_engine *ve =
1825 container_of(ce, typeof(*ve), context);
1827 if (ve->base.breadcrumbs)
1828 intel_breadcrumbs_put(ve->base.breadcrumbs);
1832 intel_context_free(ce);
1836 static void guc_context_destroy(struct kref *kref)
1838 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
1839 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
1840 struct intel_guc *guc = ce_to_guc(ce);
1841 intel_wakeref_t wakeref;
1842 unsigned long flags;
1846 * If the guc_id is invalid this context has been stolen and we can free
1847 * it immediately. Also can be freed immediately if the context is not
1848 * registered with the GuC or the GuC is in the middle of a reset.
1850 if (context_guc_id_invalid(ce)) {
1851 __guc_context_destroy(ce);
1853 } else if (submission_disabled(guc) ||
1854 !lrc_desc_registered(guc, ce->guc_id.id)) {
1855 release_guc_id(guc, ce);
1856 __guc_context_destroy(ce);
1861 * We have to acquire the context spinlock and check guc_id again, if it
1862 * is valid it hasn't been stolen and needs to be deregistered. We
1863 * delete this context from the list of unpinned guc_id available to
1864 * steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB
1865 * returns indicating this context has been deregistered the guc_id is
1866 * returned to the pool of available guc_id.
1868 spin_lock_irqsave(&guc->contexts_lock, flags);
1869 if (context_guc_id_invalid(ce)) {
1870 spin_unlock_irqrestore(&guc->contexts_lock, flags);
1871 __guc_context_destroy(ce);
1875 if (!list_empty(&ce->guc_id.link))
1876 list_del_init(&ce->guc_id.link);
1877 spin_unlock_irqrestore(&guc->contexts_lock, flags);
1879 /* Seal race with Reset */
1880 spin_lock_irqsave(&ce->guc_state.lock, flags);
1881 disabled = submission_disabled(guc);
1882 if (likely(!disabled)) {
1883 set_context_destroyed(ce);
1884 clr_context_registered(ce);
1886 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1887 if (unlikely(disabled)) {
1888 release_guc_id(guc, ce);
1889 __guc_context_destroy(ce);
1894 * We defer GuC context deregistration until the context is destroyed
1895 * in order to save on CTBs. With this optimization ideally we only need
1896 * 1 CTB to register the context during the first pin and 1 CTB to
1897 * deregister the context when the context is destroyed. Without this
1898 * optimization, a CTB would be needed every pin & unpin.
1900 * XXX: Need to acqiure the runtime wakeref as this can be triggered
1901 * from context_free_worker when runtime wakeref is not held.
1902 * guc_lrc_desc_unpin requires the runtime as a GuC register is written
1903 * in H2G CTB to deregister the context. A future patch may defer this
1904 * H2G CTB if the runtime wakeref is zero.
1906 with_intel_runtime_pm(runtime_pm, wakeref)
1907 guc_lrc_desc_unpin(ce);
1910 static int guc_context_alloc(struct intel_context *ce)
1912 return lrc_alloc(ce, ce->engine);
1915 static void guc_context_set_prio(struct intel_guc *guc,
1916 struct intel_context *ce,
1920 INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
1925 GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
1926 prio > GUC_CLIENT_PRIORITY_NORMAL);
1927 lockdep_assert_held(&ce->guc_active.lock);
1929 if (ce->guc_active.prio == prio || submission_disabled(guc) ||
1930 !context_registered(ce)) {
1931 ce->guc_active.prio = prio;
1935 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
1937 ce->guc_active.prio = prio;
1938 trace_intel_context_set_prio(ce);
1941 static inline u8 map_i915_prio_to_guc_prio(int prio)
1943 if (prio == I915_PRIORITY_NORMAL)
1944 return GUC_CLIENT_PRIORITY_KMD_NORMAL;
1945 else if (prio < I915_PRIORITY_NORMAL)
1946 return GUC_CLIENT_PRIORITY_NORMAL;
1947 else if (prio < I915_PRIORITY_DISPLAY)
1948 return GUC_CLIENT_PRIORITY_HIGH;
1950 return GUC_CLIENT_PRIORITY_KMD_HIGH;
1953 static inline void add_context_inflight_prio(struct intel_context *ce,
1956 lockdep_assert_held(&ce->guc_active.lock);
1957 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
1959 ++ce->guc_active.prio_count[guc_prio];
1961 /* Overflow protection */
1962 GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
1965 static inline void sub_context_inflight_prio(struct intel_context *ce,
1968 lockdep_assert_held(&ce->guc_active.lock);
1969 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
1971 /* Underflow protection */
1972 GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
1974 --ce->guc_active.prio_count[guc_prio];
1977 static inline void update_context_prio(struct intel_context *ce)
1979 struct intel_guc *guc = &ce->engine->gt->uc.guc;
1982 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
1983 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
1985 lockdep_assert_held(&ce->guc_active.lock);
1987 for (i = 0; i < ARRAY_SIZE(ce->guc_active.prio_count); ++i) {
1988 if (ce->guc_active.prio_count[i]) {
1989 guc_context_set_prio(guc, ce, i);
1995 static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
1997 /* Lower value is higher priority */
1998 return new_guc_prio < old_guc_prio;
2001 static void add_to_context(struct i915_request *rq)
2003 struct intel_context *ce = rq->context;
2004 u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
2006 GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
2008 spin_lock(&ce->guc_active.lock);
2009 list_move_tail(&rq->sched.link, &ce->guc_active.requests);
2011 if (rq->guc_prio == GUC_PRIO_INIT) {
2012 rq->guc_prio = new_guc_prio;
2013 add_context_inflight_prio(ce, rq->guc_prio);
2014 } else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
2015 sub_context_inflight_prio(ce, rq->guc_prio);
2016 rq->guc_prio = new_guc_prio;
2017 add_context_inflight_prio(ce, rq->guc_prio);
2019 update_context_prio(ce);
2021 spin_unlock(&ce->guc_active.lock);
2024 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
2026 lockdep_assert_held(&ce->guc_active.lock);
2028 if (rq->guc_prio != GUC_PRIO_INIT &&
2029 rq->guc_prio != GUC_PRIO_FINI) {
2030 sub_context_inflight_prio(ce, rq->guc_prio);
2031 update_context_prio(ce);
2033 rq->guc_prio = GUC_PRIO_FINI;
2036 static void remove_from_context(struct i915_request *rq)
2038 struct intel_context *ce = rq->context;
2040 spin_lock_irq(&ce->guc_active.lock);
2042 list_del_init(&rq->sched.link);
2043 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
2045 /* Prevent further __await_execution() registering a cb, then flush */
2046 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
2048 guc_prio_fini(rq, ce);
2050 spin_unlock_irq(&ce->guc_active.lock);
2052 spin_lock_irq(&ce->guc_state.lock);
2053 decr_context_committed_requests(ce);
2054 spin_unlock_irq(&ce->guc_state.lock);
2056 atomic_dec(&ce->guc_id.ref);
2057 i915_request_notify_execute_cb_imm(rq);
2060 static const struct intel_context_ops guc_context_ops = {
2061 .alloc = guc_context_alloc,
2063 .pre_pin = guc_context_pre_pin,
2064 .pin = guc_context_pin,
2065 .unpin = guc_context_unpin,
2066 .post_unpin = guc_context_post_unpin,
2068 .ban = guc_context_ban,
2070 .cancel_request = guc_context_cancel_request,
2072 .enter = intel_context_enter_engine,
2073 .exit = intel_context_exit_engine,
2075 .sched_disable = guc_context_sched_disable,
2078 .destroy = guc_context_destroy,
2080 .create_virtual = guc_create_virtual,
2083 static void submit_work_cb(struct irq_work *wrk)
2085 struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
2087 might_lock(&rq->engine->sched_engine->lock);
2088 i915_sw_fence_complete(&rq->submit);
2091 static void __guc_signal_context_fence(struct intel_context *ce)
2093 struct i915_request *rq, *rn;
2095 lockdep_assert_held(&ce->guc_state.lock);
2097 if (!list_empty(&ce->guc_state.fences))
2098 trace_intel_context_fence_release(ce);
2101 * Use an IRQ to ensure locking order of sched_engine->lock ->
2102 * ce->guc_state.lock is preserved.
2104 list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
2106 list_del(&rq->guc_fence_link);
2107 irq_work_queue(&rq->submit_work);
2110 INIT_LIST_HEAD(&ce->guc_state.fences);
2113 static void guc_signal_context_fence(struct intel_context *ce)
2115 unsigned long flags;
2117 spin_lock_irqsave(&ce->guc_state.lock, flags);
2118 clr_context_wait_for_deregister_to_register(ce);
2119 __guc_signal_context_fence(ce);
2120 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2123 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
2125 return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
2126 !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
2127 !submission_disabled(ce_to_guc(ce));
2130 static void guc_context_init(struct intel_context *ce)
2132 const struct i915_gem_context *ctx;
2133 int prio = I915_CONTEXT_DEFAULT_PRIORITY;
2136 ctx = rcu_dereference(ce->gem_context);
2138 prio = ctx->sched.priority;
2141 ce->guc_active.prio = map_i915_prio_to_guc_prio(prio);
2142 set_bit(CONTEXT_GUC_INIT, &ce->flags);
2145 static int guc_request_alloc(struct i915_request *rq)
2147 struct intel_context *ce = rq->context;
2148 struct intel_guc *guc = ce_to_guc(ce);
2149 unsigned long flags;
2152 GEM_BUG_ON(!intel_context_is_pinned(rq->context));
2155 * Flush enough space to reduce the likelihood of waiting after
2156 * we start building the request - in which case we will just
2157 * have to repeat work.
2159 rq->reserved_space += GUC_REQUEST_SIZE;
2162 * Note that after this point, we have committed to using
2163 * this request as it is being used to both track the
2164 * state of engine initialisation and liveness of the
2165 * golden renderstate above. Think twice before you try
2166 * to cancel/unwind this request now.
2169 /* Unconditionally invalidate GPU caches and TLBs. */
2170 ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
2174 rq->reserved_space -= GUC_REQUEST_SIZE;
2176 if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
2177 guc_context_init(ce);
2180 * Call pin_guc_id here rather than in the pinning step as with
2181 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
2182 * guc_id and creating horrible race conditions. This is especially bad
2183 * when guc_id are being stolen due to over subscription. By the time
2184 * this function is reached, it is guaranteed that the guc_id will be
2185 * persistent until the generated request is retired. Thus, sealing these
2186 * race conditions. It is still safe to fail here if guc_id are
2187 * exhausted and return -EAGAIN to the user indicating that they can try
2188 * again in the future.
2190 * There is no need for a lock here as the timeline mutex ensures at
2191 * most one context can be executing this code path at once. The
2192 * guc_id_ref is incremented once for every request in flight and
2193 * decremented on each retire. When it is zero, a lock around the
2194 * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
2196 if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
2199 ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
2200 if (unlikely(ret < 0))
2202 if (context_needs_register(ce, !!ret)) {
2203 ret = guc_lrc_desc_pin(ce, true);
2204 if (unlikely(ret)) { /* unwind */
2205 if (ret == -EPIPE) {
2206 disable_submission(guc);
2207 goto out; /* GPU will be reset */
2209 atomic_dec(&ce->guc_id.ref);
2210 unpin_guc_id(guc, ce);
2215 clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
2219 * We block all requests on this context if a G2H is pending for a
2220 * schedule disable or context deregistration as the GuC will fail a
2221 * schedule enable or context registration if either G2H is pending
2222 * respectfully. Once a G2H returns, the fence is released that is
2223 * blocking these requests (see guc_signal_context_fence).
2225 spin_lock_irqsave(&ce->guc_state.lock, flags);
2226 if (context_wait_for_deregister_to_register(ce) ||
2227 context_pending_disable(ce)) {
2228 init_irq_work(&rq->submit_work, submit_work_cb);
2229 i915_sw_fence_await(&rq->submit);
2231 list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
2233 incr_context_committed_requests(ce);
2234 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2239 static int guc_virtual_context_pre_pin(struct intel_context *ce,
2240 struct i915_gem_ww_ctx *ww,
2243 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
2245 return __guc_context_pre_pin(ce, engine, ww, vaddr);
2248 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
2250 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
2252 return __guc_context_pin(ce, engine, vaddr);
2255 static void guc_virtual_context_enter(struct intel_context *ce)
2257 intel_engine_mask_t tmp, mask = ce->engine->mask;
2258 struct intel_engine_cs *engine;
2260 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
2261 intel_engine_pm_get(engine);
2263 intel_timeline_enter(ce->timeline);
2266 static void guc_virtual_context_exit(struct intel_context *ce)
2268 intel_engine_mask_t tmp, mask = ce->engine->mask;
2269 struct intel_engine_cs *engine;
2271 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
2272 intel_engine_pm_put(engine);
2274 intel_timeline_exit(ce->timeline);
2277 static int guc_virtual_context_alloc(struct intel_context *ce)
2279 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
2281 return lrc_alloc(ce, engine);
2284 static const struct intel_context_ops virtual_guc_context_ops = {
2285 .alloc = guc_virtual_context_alloc,
2287 .pre_pin = guc_virtual_context_pre_pin,
2288 .pin = guc_virtual_context_pin,
2289 .unpin = guc_context_unpin,
2290 .post_unpin = guc_context_post_unpin,
2292 .ban = guc_context_ban,
2294 .cancel_request = guc_context_cancel_request,
2296 .enter = guc_virtual_context_enter,
2297 .exit = guc_virtual_context_exit,
2299 .sched_disable = guc_context_sched_disable,
2301 .destroy = guc_context_destroy,
2303 .get_sibling = guc_virtual_get_sibling,
2307 guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
2309 struct intel_engine_cs *sibling;
2310 intel_engine_mask_t tmp, mask = b->engine_mask;
2311 bool result = false;
2313 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
2314 result |= intel_engine_irq_enable(sibling);
2320 guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
2322 struct intel_engine_cs *sibling;
2323 intel_engine_mask_t tmp, mask = b->engine_mask;
2325 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
2326 intel_engine_irq_disable(sibling);
2329 static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
2334 * In GuC submission mode we do not know which physical engine a request
2335 * will be scheduled on, this creates a problem because the breadcrumb
2336 * interrupt is per physical engine. To work around this we attach
2337 * requests and direct all breadcrumb interrupts to the first instance
2338 * of an engine per class. In addition all breadcrumb interrupts are
2339 * enabled / disabled across an engine class in unison.
2341 for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
2342 struct intel_engine_cs *sibling =
2343 engine->gt->engine_class[engine->class][i];
2346 if (engine->breadcrumbs != sibling->breadcrumbs) {
2347 intel_breadcrumbs_put(engine->breadcrumbs);
2348 engine->breadcrumbs =
2349 intel_breadcrumbs_get(sibling->breadcrumbs);
2355 if (engine->breadcrumbs) {
2356 engine->breadcrumbs->engine_mask |= engine->mask;
2357 engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
2358 engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
2362 static void guc_bump_inflight_request_prio(struct i915_request *rq,
2365 struct intel_context *ce = rq->context;
2366 u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
2368 /* Short circuit function */
2369 if (prio < I915_PRIORITY_NORMAL ||
2370 rq->guc_prio == GUC_PRIO_FINI ||
2371 (rq->guc_prio != GUC_PRIO_INIT &&
2372 !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
2375 spin_lock(&ce->guc_active.lock);
2376 if (rq->guc_prio != GUC_PRIO_FINI) {
2377 if (rq->guc_prio != GUC_PRIO_INIT)
2378 sub_context_inflight_prio(ce, rq->guc_prio);
2379 rq->guc_prio = new_guc_prio;
2380 add_context_inflight_prio(ce, rq->guc_prio);
2381 update_context_prio(ce);
2383 spin_unlock(&ce->guc_active.lock);
2386 static void guc_retire_inflight_request_prio(struct i915_request *rq)
2388 struct intel_context *ce = rq->context;
2390 spin_lock(&ce->guc_active.lock);
2391 guc_prio_fini(rq, ce);
2392 spin_unlock(&ce->guc_active.lock);
2395 static void sanitize_hwsp(struct intel_engine_cs *engine)
2397 struct intel_timeline *tl;
2399 list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
2400 intel_timeline_reset_seqno(tl);
2403 static void guc_sanitize(struct intel_engine_cs *engine)
2406 * Poison residual state on resume, in case the suspend didn't!
2408 * We have to assume that across suspend/resume (or other loss
2409 * of control) that the contents of our pinned buffers has been
2410 * lost, replaced by garbage. Since this doesn't always happen,
2411 * let's poison such state so that we more quickly spot when
2412 * we falsely assume it has been preserved.
2414 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
2415 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
2418 * The kernel_context HWSP is stored in the status_page. As above,
2419 * that may be lost on resume/initialisation, and so we need to
2420 * reset the value in the HWSP.
2422 sanitize_hwsp(engine);
2424 /* And scrub the dirty cachelines for the HWSP */
2425 clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
2428 static void setup_hwsp(struct intel_engine_cs *engine)
2430 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
2432 ENGINE_WRITE_FW(engine,
2434 i915_ggtt_offset(engine->status_page.vma));
2437 static void start_engine(struct intel_engine_cs *engine)
2439 ENGINE_WRITE_FW(engine,
2441 _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
2443 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
2444 ENGINE_POSTING_READ(engine, RING_MI_MODE);
2447 static int guc_resume(struct intel_engine_cs *engine)
2449 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
2451 intel_mocs_init_engine(engine);
2453 intel_breadcrumbs_reset(engine->breadcrumbs);
2456 start_engine(engine);
2461 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
2463 return !sched_engine->tasklet.callback;
2466 static void guc_set_default_submission(struct intel_engine_cs *engine)
2468 engine->submit_request = guc_submit_request;
2471 static inline void guc_kernel_context_pin(struct intel_guc *guc,
2472 struct intel_context *ce)
2474 if (context_guc_id_invalid(ce))
2475 pin_guc_id(guc, ce);
2476 guc_lrc_desc_pin(ce, true);
2479 static inline void guc_init_lrc_mapping(struct intel_guc *guc)
2481 struct intel_gt *gt = guc_to_gt(guc);
2482 struct intel_engine_cs *engine;
2483 enum intel_engine_id id;
2485 /* make sure all descriptors are clean... */
2486 xa_destroy(&guc->context_lookup);
2489 * Some contexts might have been pinned before we enabled GuC
2490 * submission, so we need to add them to the GuC bookeeping.
2491 * Also, after a reset the of the GuC we want to make sure that the
2492 * information shared with GuC is properly reset. The kernel LRCs are
2493 * not attached to the gem_context, so they need to be added separately.
2495 * Note: we purposefully do not check the return of guc_lrc_desc_pin,
2496 * because that function can only fail if a reset is just starting. This
2497 * is at the end of reset so presumably another reset isn't happening
2498 * and even it did this code would be run again.
2501 for_each_engine(engine, gt, id)
2502 if (engine->kernel_context)
2503 guc_kernel_context_pin(guc, engine->kernel_context);
2506 static void guc_release(struct intel_engine_cs *engine)
2508 engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
2510 intel_engine_cleanup_common(engine);
2511 lrc_fini_wa_ctx(engine);
2514 static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
2516 struct intel_engine_cs *e;
2517 intel_engine_mask_t tmp, mask = engine->mask;
2519 for_each_engine_masked(e, engine->gt, mask, tmp)
2523 static void guc_default_vfuncs(struct intel_engine_cs *engine)
2525 /* Default vfuncs which can be overridden by each engine. */
2527 engine->resume = guc_resume;
2529 engine->cops = &guc_context_ops;
2530 engine->request_alloc = guc_request_alloc;
2531 engine->add_active_request = add_to_context;
2532 engine->remove_active_request = remove_from_context;
2534 engine->sched_engine->schedule = i915_schedule;
2536 engine->reset.prepare = guc_reset_nop;
2537 engine->reset.rewind = guc_rewind_nop;
2538 engine->reset.cancel = guc_reset_nop;
2539 engine->reset.finish = guc_reset_nop;
2541 engine->emit_flush = gen8_emit_flush_xcs;
2542 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
2543 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
2544 if (GRAPHICS_VER(engine->i915) >= 12) {
2545 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
2546 engine->emit_flush = gen12_emit_flush_xcs;
2548 engine->set_default_submission = guc_set_default_submission;
2550 engine->flags |= I915_ENGINE_HAS_PREEMPTION;
2551 engine->flags |= I915_ENGINE_HAS_TIMESLICES;
2554 * TODO: GuC supports timeslicing and semaphores as well, but they're
2555 * handled by the firmware so some minor tweaks are required before
2558 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
2561 engine->emit_bb_start = gen8_emit_bb_start;
2564 static void rcs_submission_override(struct intel_engine_cs *engine)
2566 switch (GRAPHICS_VER(engine->i915)) {
2568 engine->emit_flush = gen12_emit_flush_rcs;
2569 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
2572 engine->emit_flush = gen11_emit_flush_rcs;
2573 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
2576 engine->emit_flush = gen8_emit_flush_rcs;
2577 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
2582 static inline void guc_default_irqs(struct intel_engine_cs *engine)
2584 engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
2585 intel_engine_set_irq_handler(engine, cs_irq_handler);
2588 static void guc_sched_engine_destroy(struct kref *kref)
2590 struct i915_sched_engine *sched_engine =
2591 container_of(kref, typeof(*sched_engine), ref);
2592 struct intel_guc *guc = sched_engine->private_data;
2594 guc->sched_engine = NULL;
2595 tasklet_kill(&sched_engine->tasklet); /* flush the callback */
2596 kfree(sched_engine);
2599 int intel_guc_submission_setup(struct intel_engine_cs *engine)
2601 struct drm_i915_private *i915 = engine->i915;
2602 struct intel_guc *guc = &engine->gt->uc.guc;
2605 * The setup relies on several assumptions (e.g. irqs always enabled)
2606 * that are only valid on gen11+
2608 GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
2610 if (!guc->sched_engine) {
2611 guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
2612 if (!guc->sched_engine)
2615 guc->sched_engine->schedule = i915_schedule;
2616 guc->sched_engine->disabled = guc_sched_engine_disabled;
2617 guc->sched_engine->private_data = guc;
2618 guc->sched_engine->destroy = guc_sched_engine_destroy;
2619 guc->sched_engine->bump_inflight_request_prio =
2620 guc_bump_inflight_request_prio;
2621 guc->sched_engine->retire_inflight_request_prio =
2622 guc_retire_inflight_request_prio;
2623 tasklet_setup(&guc->sched_engine->tasklet,
2624 guc_submission_tasklet);
2626 i915_sched_engine_put(engine->sched_engine);
2627 engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
2629 guc_default_vfuncs(engine);
2630 guc_default_irqs(engine);
2631 guc_init_breadcrumbs(engine);
2633 if (engine->class == RENDER_CLASS)
2634 rcs_submission_override(engine);
2636 lrc_init_wa_ctx(engine);
2638 /* Finally, take ownership and responsibility for cleanup! */
2639 engine->sanitize = guc_sanitize;
2640 engine->release = guc_release;
2645 void intel_guc_submission_enable(struct intel_guc *guc)
2647 guc_init_lrc_mapping(guc);
2650 void intel_guc_submission_disable(struct intel_guc *guc)
2652 /* Note: By the time we're here, GuC may have already been reset */
2655 static bool __guc_submission_supported(struct intel_guc *guc)
2657 /* GuC submission is unavailable for pre-Gen11 */
2658 return intel_guc_is_supported(guc) &&
2659 GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
2662 static bool __guc_submission_selected(struct intel_guc *guc)
2664 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
2666 if (!intel_guc_submission_is_supported(guc))
2669 return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
2672 void intel_guc_submission_init_early(struct intel_guc *guc)
2674 guc->submission_supported = __guc_submission_supported(guc);
2675 guc->submission_selected = __guc_submission_selected(guc);
2678 static inline struct intel_context *
2679 g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
2681 struct intel_context *ce;
2683 if (unlikely(desc_idx >= GUC_MAX_LRC_DESCRIPTORS)) {
2684 drm_err(&guc_to_gt(guc)->i915->drm,
2685 "Invalid desc_idx %u", desc_idx);
2689 ce = __get_context(guc, desc_idx);
2690 if (unlikely(!ce)) {
2691 drm_err(&guc_to_gt(guc)->i915->drm,
2692 "Context is NULL, desc_idx %u", desc_idx);
2699 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
2703 struct intel_context *ce;
2704 u32 desc_idx = msg[0];
2706 if (unlikely(len < 1)) {
2707 drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
2711 ce = g2h_context_lookup(guc, desc_idx);
2715 trace_intel_context_deregister_done(ce);
2717 #ifdef CONFIG_DRM_I915_SELFTEST
2718 if (unlikely(ce->drop_deregister)) {
2719 ce->drop_deregister = false;
2724 if (context_wait_for_deregister_to_register(ce)) {
2725 struct intel_runtime_pm *runtime_pm =
2726 &ce->engine->gt->i915->runtime_pm;
2727 intel_wakeref_t wakeref;
2730 * Previous owner of this guc_id has been deregistered, now safe
2731 * register this context.
2733 with_intel_runtime_pm(runtime_pm, wakeref)
2734 register_context(ce, true);
2735 guc_signal_context_fence(ce);
2736 intel_context_put(ce);
2737 } else if (context_destroyed(ce)) {
2738 /* Context has been destroyed */
2739 release_guc_id(guc, ce);
2740 __guc_context_destroy(ce);
2743 decr_outstanding_submission_g2h(guc);
2748 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
2752 struct intel_context *ce;
2753 unsigned long flags;
2754 u32 desc_idx = msg[0];
2756 if (unlikely(len < 2)) {
2757 drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
2761 ce = g2h_context_lookup(guc, desc_idx);
2765 if (unlikely(context_destroyed(ce) ||
2766 (!context_pending_enable(ce) &&
2767 !context_pending_disable(ce)))) {
2768 drm_err(&guc_to_gt(guc)->i915->drm,
2769 "Bad context sched_state 0x%x, desc_idx %u",
2770 ce->guc_state.sched_state, desc_idx);
2774 trace_intel_context_sched_done(ce);
2776 if (context_pending_enable(ce)) {
2777 #ifdef CONFIG_DRM_I915_SELFTEST
2778 if (unlikely(ce->drop_schedule_enable)) {
2779 ce->drop_schedule_enable = false;
2784 spin_lock_irqsave(&ce->guc_state.lock, flags);
2785 clr_context_pending_enable(ce);
2786 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2787 } else if (context_pending_disable(ce)) {
2790 #ifdef CONFIG_DRM_I915_SELFTEST
2791 if (unlikely(ce->drop_schedule_disable)) {
2792 ce->drop_schedule_disable = false;
2798 * Unpin must be done before __guc_signal_context_fence,
2799 * otherwise a race exists between the requests getting
2800 * submitted + retired before this unpin completes resulting in
2801 * the pin_count going to zero and the context still being
2804 intel_context_sched_disable_unpin(ce);
2806 spin_lock_irqsave(&ce->guc_state.lock, flags);
2807 banned = context_banned(ce);
2808 clr_context_banned(ce);
2809 clr_context_pending_disable(ce);
2810 __guc_signal_context_fence(ce);
2811 guc_blocked_fence_complete(ce);
2812 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2815 guc_cancel_context_requests(ce);
2816 intel_engine_signal_breadcrumbs(ce->engine);
2820 decr_outstanding_submission_g2h(guc);
2821 intel_context_put(ce);
2826 static void capture_error_state(struct intel_guc *guc,
2827 struct intel_context *ce)
2829 struct intel_gt *gt = guc_to_gt(guc);
2830 struct drm_i915_private *i915 = gt->i915;
2831 struct intel_engine_cs *engine = __context_to_physical_engine(ce);
2832 intel_wakeref_t wakeref;
2834 intel_engine_set_hung_context(engine, ce);
2835 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2836 i915_capture_error_state(gt, engine->mask);
2837 atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
2840 static void guc_context_replay(struct intel_context *ce)
2842 struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
2844 __guc_reset_context(ce, true);
2845 tasklet_hi_schedule(&sched_engine->tasklet);
2848 static void guc_handle_context_reset(struct intel_guc *guc,
2849 struct intel_context *ce)
2851 trace_intel_context_reset(ce);
2854 * XXX: Racey if request cancellation has occurred, see comment in
2855 * __guc_reset_context().
2857 if (likely(!intel_context_is_banned(ce) &&
2858 !context_blocked(ce))) {
2859 capture_error_state(guc, ce);
2860 guc_context_replay(ce);
2864 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
2865 const u32 *msg, u32 len)
2867 struct intel_context *ce;
2870 if (unlikely(len != 1)) {
2871 drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
2876 ce = g2h_context_lookup(guc, desc_idx);
2880 guc_handle_context_reset(guc, ce);
2885 static struct intel_engine_cs *
2886 guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
2888 struct intel_gt *gt = guc_to_gt(guc);
2889 u8 engine_class = guc_class_to_engine_class(guc_class);
2891 /* Class index is checked in class converter */
2892 GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
2894 return gt->engine_class[engine_class][instance];
2897 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
2898 const u32 *msg, u32 len)
2900 struct intel_engine_cs *engine;
2901 u8 guc_class, instance;
2904 if (unlikely(len != 3)) {
2905 drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
2913 engine = guc_lookup_engine(guc, guc_class, instance);
2914 if (unlikely(!engine)) {
2915 drm_err(&guc_to_gt(guc)->i915->drm,
2916 "Invalid engine %d:%d", guc_class, instance);
2920 intel_gt_handle_error(guc_to_gt(guc), engine->mask,
2922 "GuC failed to reset %s (reason=0x%08x)\n",
2923 engine->name, reason);
2928 void intel_guc_find_hung_context(struct intel_engine_cs *engine)
2930 struct intel_guc *guc = &engine->gt->uc.guc;
2931 struct intel_context *ce;
2932 struct i915_request *rq;
2933 unsigned long index;
2934 unsigned long flags;
2936 /* Reset called during driver load? GuC not yet initialised! */
2937 if (unlikely(!guc_submission_initialized(guc)))
2940 xa_lock_irqsave(&guc->context_lookup, flags);
2941 xa_for_each(&guc->context_lookup, index, ce) {
2942 if (!kref_get_unless_zero(&ce->ref))
2945 xa_unlock(&guc->context_lookup);
2947 if (!intel_context_is_pinned(ce))
2950 if (intel_engine_is_virtual(ce->engine)) {
2951 if (!(ce->engine->mask & engine->mask))
2954 if (ce->engine != engine)
2958 list_for_each_entry(rq, &ce->guc_active.requests, sched.link) {
2959 if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
2962 intel_engine_set_hung_context(engine, ce);
2964 /* Can only cope with one hang at a time... */
2965 intel_context_put(ce);
2966 xa_lock(&guc->context_lookup);
2970 intel_context_put(ce);
2971 xa_lock(&guc->context_lookup);
2974 xa_unlock_irqrestore(&guc->context_lookup, flags);
2977 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
2978 struct i915_request *hung_rq,
2979 struct drm_printer *m)
2981 struct intel_guc *guc = &engine->gt->uc.guc;
2982 struct intel_context *ce;
2983 unsigned long index;
2984 unsigned long flags;
2986 /* Reset called during driver load? GuC not yet initialised! */
2987 if (unlikely(!guc_submission_initialized(guc)))
2990 xa_lock_irqsave(&guc->context_lookup, flags);
2991 xa_for_each(&guc->context_lookup, index, ce) {
2992 if (!kref_get_unless_zero(&ce->ref))
2995 xa_unlock(&guc->context_lookup);
2997 if (!intel_context_is_pinned(ce))
3000 if (intel_engine_is_virtual(ce->engine)) {
3001 if (!(ce->engine->mask & engine->mask))
3004 if (ce->engine != engine)
3008 spin_lock(&ce->guc_active.lock);
3009 intel_engine_dump_active_requests(&ce->guc_active.requests,
3011 spin_unlock(&ce->guc_active.lock);
3014 intel_context_put(ce);
3015 xa_lock(&guc->context_lookup);
3017 xa_unlock_irqrestore(&guc->context_lookup, flags);
3020 void intel_guc_submission_print_info(struct intel_guc *guc,
3021 struct drm_printer *p)
3023 struct i915_sched_engine *sched_engine = guc->sched_engine;
3025 unsigned long flags;
3030 drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
3031 atomic_read(&guc->outstanding_submission_g2h));
3032 drm_printf(p, "GuC tasklet count: %u\n\n",
3033 atomic_read(&sched_engine->tasklet.count));
3035 spin_lock_irqsave(&sched_engine->lock, flags);
3036 drm_printf(p, "Requests in GuC submit tasklet:\n");
3037 for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
3038 struct i915_priolist *pl = to_priolist(rb);
3039 struct i915_request *rq;
3041 priolist_for_each_request(rq, pl)
3042 drm_printf(p, "guc_id=%u, seqno=%llu\n",
3043 rq->context->guc_id.id,
3046 spin_unlock_irqrestore(&sched_engine->lock, flags);
3047 drm_printf(p, "\n");
3050 static inline void guc_log_context_priority(struct drm_printer *p,
3051 struct intel_context *ce)
3055 drm_printf(p, "\t\tPriority: %d\n", ce->guc_active.prio);
3056 drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
3057 for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
3058 i < GUC_CLIENT_PRIORITY_NUM; ++i) {
3059 drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
3060 i, ce->guc_active.prio_count[i]);
3062 drm_printf(p, "\n");
3065 void intel_guc_submission_print_context_info(struct intel_guc *guc,
3066 struct drm_printer *p)
3068 struct intel_context *ce;
3069 unsigned long index;
3070 unsigned long flags;
3072 xa_lock_irqsave(&guc->context_lookup, flags);
3073 xa_for_each(&guc->context_lookup, index, ce) {
3074 drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
3075 drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
3076 drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
3078 ce->lrc_reg_state[CTX_RING_HEAD]);
3079 drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
3081 ce->lrc_reg_state[CTX_RING_TAIL]);
3082 drm_printf(p, "\t\tContext Pin Count: %u\n",
3083 atomic_read(&ce->pin_count));
3084 drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
3085 atomic_read(&ce->guc_id.ref));
3086 drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
3087 ce->guc_state.sched_state);
3089 guc_log_context_priority(p, ce);
3091 xa_unlock_irqrestore(&guc->context_lookup, flags);
3094 static struct intel_context *
3095 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
3097 struct guc_virtual_engine *ve;
3098 struct intel_guc *guc;
3102 ve = kzalloc(sizeof(*ve), GFP_KERNEL);
3104 return ERR_PTR(-ENOMEM);
3106 guc = &siblings[0]->gt->uc.guc;
3108 ve->base.i915 = siblings[0]->i915;
3109 ve->base.gt = siblings[0]->gt;
3110 ve->base.uncore = siblings[0]->uncore;
3113 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
3114 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
3115 ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
3116 ve->base.saturated = ALL_ENGINES;
3118 snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
3120 ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
3122 ve->base.cops = &virtual_guc_context_ops;
3123 ve->base.request_alloc = guc_request_alloc;
3124 ve->base.bump_serial = virtual_guc_bump_serial;
3126 ve->base.submit_request = guc_submit_request;
3128 ve->base.flags = I915_ENGINE_IS_VIRTUAL;
3130 intel_context_init(&ve->context, &ve->base);
3132 for (n = 0; n < count; n++) {
3133 struct intel_engine_cs *sibling = siblings[n];
3135 GEM_BUG_ON(!is_power_of_2(sibling->mask));
3136 if (sibling->mask & ve->base.mask) {
3137 DRM_DEBUG("duplicate %s entry in load balancer\n",
3143 ve->base.mask |= sibling->mask;
3145 if (n != 0 && ve->base.class != sibling->class) {
3146 DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
3147 sibling->class, ve->base.class);
3150 } else if (n == 0) {
3151 ve->base.class = sibling->class;
3152 ve->base.uabi_class = sibling->uabi_class;
3153 snprintf(ve->base.name, sizeof(ve->base.name),
3154 "v%dx%d", ve->base.class, count);
3155 ve->base.context_size = sibling->context_size;
3157 ve->base.add_active_request =
3158 sibling->add_active_request;
3159 ve->base.remove_active_request =
3160 sibling->remove_active_request;
3161 ve->base.emit_bb_start = sibling->emit_bb_start;
3162 ve->base.emit_flush = sibling->emit_flush;
3163 ve->base.emit_init_breadcrumb =
3164 sibling->emit_init_breadcrumb;
3165 ve->base.emit_fini_breadcrumb =
3166 sibling->emit_fini_breadcrumb;
3167 ve->base.emit_fini_breadcrumb_dw =
3168 sibling->emit_fini_breadcrumb_dw;
3169 ve->base.breadcrumbs =
3170 intel_breadcrumbs_get(sibling->breadcrumbs);
3172 ve->base.flags |= sibling->flags;
3174 ve->base.props.timeslice_duration_ms =
3175 sibling->props.timeslice_duration_ms;
3176 ve->base.props.preempt_timeout_ms =
3177 sibling->props.preempt_timeout_ms;
3181 return &ve->context;
3184 intel_context_put(&ve->context);
3185 return ERR_PTR(err);
3188 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
3190 struct intel_engine_cs *engine;
3191 intel_engine_mask_t tmp, mask = ve->mask;
3193 for_each_engine_masked(engine, ve->gt, mask, tmp)
3194 if (READ_ONCE(engine->props.heartbeat_interval_ms))
3200 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3201 #include "selftest_guc.c"