1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include <linux/circ_buf.h>
8 #include "gem/i915_gem_context.h"
9 #include "gem/i915_gem_lmem.h"
10 #include "gt/gen8_engine_cs.h"
11 #include "gt/intel_breadcrumbs.h"
12 #include "gt/intel_context.h"
13 #include "gt/intel_engine_heartbeat.h"
14 #include "gt/intel_engine_pm.h"
15 #include "gt/intel_engine_regs.h"
16 #include "gt/intel_gpu_commands.h"
17 #include "gt/intel_gt.h"
18 #include "gt/intel_gt_clock_utils.h"
19 #include "gt/intel_gt_irq.h"
20 #include "gt/intel_gt_pm.h"
21 #include "gt/intel_gt_regs.h"
22 #include "gt/intel_gt_requests.h"
23 #include "gt/intel_lrc.h"
24 #include "gt/intel_lrc_reg.h"
25 #include "gt/intel_mocs.h"
26 #include "gt/intel_ring.h"
28 #include "intel_guc_ads.h"
29 #include "intel_guc_capture.h"
30 #include "intel_guc_print.h"
31 #include "intel_guc_submission.h"
35 #include "i915_trace.h"
38 * DOC: GuC-based command submission
40 * The Scratch registers:
41 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
42 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
43 * triggers an interrupt on the GuC via another register write (0xC4C8).
44 * Firmware writes a success/fail code back to the action register after
45 * processes the request. The kernel driver polls waiting for this update and
48 * Command Transport buffers (CTBs):
49 * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
50 * - G2H) are a message interface between the i915 and GuC.
52 * Context registration:
53 * Before a context can be submitted it must be registered with the GuC via a
54 * H2G. A unique guc_id is associated with each context. The context is either
55 * registered at request creation time (normal operation) or at submission time
56 * (abnormal operation, e.g. after a reset).
59 * The i915 updates the LRC tail value in memory. The i915 must enable the
60 * scheduling of the context within the GuC for the GuC to actually consider it.
61 * Therefore, the first time a disabled context is submitted we use a schedule
62 * enable H2G, while follow up submissions are done via the context submit H2G,
63 * which informs the GuC that a previously enabled context has new work
67 * To unpin a context a H2G is used to disable scheduling. When the
68 * corresponding G2H returns indicating the scheduling disable operation has
69 * completed it is safe to unpin the context. While a disable is in flight it
70 * isn't safe to resubmit the context so a fence is used to stall all future
71 * requests of that context until the G2H is returned. Because this interaction
72 * with the GuC takes a non-zero amount of time we delay the disabling of
73 * scheduling after the pin count goes to zero by a configurable period of time
74 * (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of
75 * time to resubmit something on the context before doing this costly operation.
76 * This delay is only done if the context isn't closed and the guc_id usage is
77 * less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD).
79 * Context deregistration:
80 * Before a context can be destroyed or if we steal its guc_id we must
81 * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
82 * safe to submit anything to this guc_id until the deregister completes so a
83 * fence is used to stall all requests associated with this guc_id until the
84 * corresponding G2H returns indicating the guc_id has been deregistered.
86 * submission_state.guc_ids:
87 * Unique number associated with private GuC context data passed in during
88 * context registration / submission / deregistration. 64k available. Simple ida
89 * is used for allocation.
92 * If no guc_ids are available they can be stolen from another context at
93 * request creation time if that context is unpinned. If a guc_id can't be found
94 * we punt this problem to the user as we believe this is near impossible to hit
95 * during normal use cases.
98 * In the GuC submission code we have 3 basic spin locks which protect
99 * everything. Details about each below.
102 * This is the submission lock for all contexts that share an i915 schedule
103 * engine (sched_engine), thus only one of the contexts which share a
104 * sched_engine can be submitting at a time. Currently only one sched_engine is
105 * used for all of GuC submission but that could change in the future.
107 * guc->submission_state.lock
108 * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
112 * Protects everything under ce->guc_state. Ensures that a context is in the
113 * correct state before issuing a H2G. e.g. We don't issue a schedule disable
114 * on a disabled context (bad idea), we don't issue a schedule enable when a
115 * schedule disable is in flight, etc... Also protects list of inflight requests
116 * on the context and the priority management state. Lock is individual to each
119 * Lock ordering rules:
120 * sched_engine->lock -> ce->guc_state.lock
121 * guc->submission_state.lock -> ce->guc_state.lock
124 * When a full GT reset is triggered it is assumed that some G2H responses to
125 * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
126 * fatal as we do certain operations upon receiving a G2H (e.g. destroy
127 * contexts, release guc_ids, etc...). When this occurs we can scrub the
128 * context state and cleanup appropriately, however this is quite racey.
129 * To avoid races, the reset code must disable submission before scrubbing for
130 * the missing G2H, while the submission code must check for submission being
131 * disabled and skip sending H2Gs and updating context states when it is. Both
132 * sides must also make sure to hold the relevant locks.
135 /* GuC Virtual Engine */
136 struct guc_virtual_engine {
137 struct intel_engine_cs base;
138 struct intel_context context;
141 static struct intel_context *
142 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
143 unsigned long flags);
145 static struct intel_context *
146 guc_create_parallel(struct intel_engine_cs **engines,
147 unsigned int num_siblings,
150 #define GUC_REQUEST_SIZE 64 /* bytes */
153 * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
154 * per the GuC submission interface. A different allocation algorithm is used
155 * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
156 * partition the guc_id space. We believe the number of multi-lrc contexts in
157 * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
160 #define NUMBER_MULTI_LRC_GUC_ID(guc) \
161 ((guc)->submission_state.num_guc_ids / 16)
164 * Below is a set of functions which control the GuC scheduling state which
167 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
168 #define SCHED_STATE_DESTROYED BIT(1)
169 #define SCHED_STATE_PENDING_DISABLE BIT(2)
170 #define SCHED_STATE_BANNED BIT(3)
171 #define SCHED_STATE_ENABLED BIT(4)
172 #define SCHED_STATE_PENDING_ENABLE BIT(5)
173 #define SCHED_STATE_REGISTERED BIT(6)
174 #define SCHED_STATE_POLICY_REQUIRED BIT(7)
175 #define SCHED_STATE_CLOSED BIT(8)
176 #define SCHED_STATE_BLOCKED_SHIFT 9
177 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
178 #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
180 static inline void init_sched_state(struct intel_context *ce)
182 lockdep_assert_held(&ce->guc_state.lock);
183 ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
187 * Kernel contexts can have SCHED_STATE_REGISTERED after suspend.
188 * A context close can race with the submission path, so SCHED_STATE_CLOSED
189 * can be set immediately before we try to register.
191 #define SCHED_STATE_VALID_INIT \
192 (SCHED_STATE_BLOCKED_MASK | \
193 SCHED_STATE_CLOSED | \
194 SCHED_STATE_REGISTERED)
197 static bool sched_state_is_init(struct intel_context *ce)
199 return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT);
203 context_wait_for_deregister_to_register(struct intel_context *ce)
205 return ce->guc_state.sched_state &
206 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
210 set_context_wait_for_deregister_to_register(struct intel_context *ce)
212 lockdep_assert_held(&ce->guc_state.lock);
213 ce->guc_state.sched_state |=
214 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
218 clr_context_wait_for_deregister_to_register(struct intel_context *ce)
220 lockdep_assert_held(&ce->guc_state.lock);
221 ce->guc_state.sched_state &=
222 ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
226 context_destroyed(struct intel_context *ce)
228 return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
232 set_context_destroyed(struct intel_context *ce)
234 lockdep_assert_held(&ce->guc_state.lock);
235 ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
238 static inline bool context_pending_disable(struct intel_context *ce)
240 return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
243 static inline void set_context_pending_disable(struct intel_context *ce)
245 lockdep_assert_held(&ce->guc_state.lock);
246 ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
249 static inline void clr_context_pending_disable(struct intel_context *ce)
251 lockdep_assert_held(&ce->guc_state.lock);
252 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
255 static inline bool context_banned(struct intel_context *ce)
257 return ce->guc_state.sched_state & SCHED_STATE_BANNED;
260 static inline void set_context_banned(struct intel_context *ce)
262 lockdep_assert_held(&ce->guc_state.lock);
263 ce->guc_state.sched_state |= SCHED_STATE_BANNED;
266 static inline void clr_context_banned(struct intel_context *ce)
268 lockdep_assert_held(&ce->guc_state.lock);
269 ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
272 static inline bool context_enabled(struct intel_context *ce)
274 return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
277 static inline void set_context_enabled(struct intel_context *ce)
279 lockdep_assert_held(&ce->guc_state.lock);
280 ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
283 static inline void clr_context_enabled(struct intel_context *ce)
285 lockdep_assert_held(&ce->guc_state.lock);
286 ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
289 static inline bool context_pending_enable(struct intel_context *ce)
291 return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
294 static inline void set_context_pending_enable(struct intel_context *ce)
296 lockdep_assert_held(&ce->guc_state.lock);
297 ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
300 static inline void clr_context_pending_enable(struct intel_context *ce)
302 lockdep_assert_held(&ce->guc_state.lock);
303 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
306 static inline bool context_registered(struct intel_context *ce)
308 return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
311 static inline void set_context_registered(struct intel_context *ce)
313 lockdep_assert_held(&ce->guc_state.lock);
314 ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
317 static inline void clr_context_registered(struct intel_context *ce)
319 lockdep_assert_held(&ce->guc_state.lock);
320 ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
323 static inline bool context_policy_required(struct intel_context *ce)
325 return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
328 static inline void set_context_policy_required(struct intel_context *ce)
330 lockdep_assert_held(&ce->guc_state.lock);
331 ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
334 static inline void clr_context_policy_required(struct intel_context *ce)
336 lockdep_assert_held(&ce->guc_state.lock);
337 ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
340 static inline bool context_close_done(struct intel_context *ce)
342 return ce->guc_state.sched_state & SCHED_STATE_CLOSED;
345 static inline void set_context_close_done(struct intel_context *ce)
347 lockdep_assert_held(&ce->guc_state.lock);
348 ce->guc_state.sched_state |= SCHED_STATE_CLOSED;
351 static inline u32 context_blocked(struct intel_context *ce)
353 return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
354 SCHED_STATE_BLOCKED_SHIFT;
357 static inline void incr_context_blocked(struct intel_context *ce)
359 lockdep_assert_held(&ce->guc_state.lock);
361 ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
363 GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
366 static inline void decr_context_blocked(struct intel_context *ce)
368 lockdep_assert_held(&ce->guc_state.lock);
370 GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
372 ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
375 static struct intel_context *
376 request_to_scheduling_context(struct i915_request *rq)
378 return intel_context_to_parent(rq->context);
381 static inline bool context_guc_id_invalid(struct intel_context *ce)
383 return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
386 static inline void set_context_guc_id_invalid(struct intel_context *ce)
388 ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
391 static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
393 return &ce->engine->gt->uc.guc;
396 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
398 return rb_entry(rb, struct i915_priolist, node);
402 * When using multi-lrc submission a scratch memory area is reserved in the
403 * parent's context state for the process descriptor, work queue, and handshake
404 * between the parent + children contexts to insert safe preemption points
405 * between each of the BBs. Currently the scratch area is sized to a page.
407 * The layout of this scratch area is below:
409 * + sizeof(struct guc_process_desc) child go
410 * + CACHELINE_BYTES child join[0]
412 * + CACHELINE_BYTES child join[n - 1]
414 * PARENT_SCRATCH_SIZE / 2 work queue start
416 * PARENT_SCRATCH_SIZE - 1 work queue end
418 #define WQ_SIZE (PARENT_SCRATCH_SIZE / 2)
419 #define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
421 struct sync_semaphore {
423 u8 unused[CACHELINE_BYTES - sizeof(u32)];
426 struct parent_scratch {
428 struct guc_sched_wq_desc wq_desc;
429 struct guc_process_desc_v69 pdesc;
432 struct sync_semaphore go;
433 struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
435 u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
436 sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
438 u32 wq[WQ_SIZE / sizeof(u32)];
441 static u32 __get_parent_scratch_offset(struct intel_context *ce)
443 GEM_BUG_ON(!ce->parallel.guc.parent_page);
445 return ce->parallel.guc.parent_page * PAGE_SIZE;
448 static u32 __get_wq_offset(struct intel_context *ce)
450 BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
452 return __get_parent_scratch_offset(ce) + WQ_OFFSET;
455 static struct parent_scratch *
456 __get_parent_scratch(struct intel_context *ce)
458 BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
459 BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
462 * Need to subtract LRC_STATE_OFFSET here as the
463 * parallel.guc.parent_page is the offset into ce->state while
464 * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
466 return (struct parent_scratch *)
468 ((__get_parent_scratch_offset(ce) -
469 LRC_STATE_OFFSET) / sizeof(u32)));
472 static struct guc_process_desc_v69 *
473 __get_process_desc_v69(struct intel_context *ce)
475 struct parent_scratch *ps = __get_parent_scratch(ce);
477 return &ps->descs.pdesc;
480 static struct guc_sched_wq_desc *
481 __get_wq_desc_v70(struct intel_context *ce)
483 struct parent_scratch *ps = __get_parent_scratch(ce);
485 return &ps->descs.wq_desc;
488 static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
491 * Check for space in work queue. Caching a value of head pointer in
492 * intel_context structure in order reduce the number accesses to shared
493 * GPU memory which may be across a PCIe bus.
495 #define AVAILABLE_SPACE \
496 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
497 if (wqi_size > AVAILABLE_SPACE) {
498 ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
500 if (wqi_size > AVAILABLE_SPACE)
503 #undef AVAILABLE_SPACE
505 return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
508 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
510 struct intel_context *ce = xa_load(&guc->context_lookup, id);
512 GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID);
517 static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
519 struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
524 GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
529 static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
534 size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
536 ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
537 (void **)&guc->lrc_desc_pool_vaddr_v69);
544 static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
546 if (!guc->lrc_desc_pool_vaddr_v69)
549 guc->lrc_desc_pool_vaddr_v69 = NULL;
550 i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
553 static inline bool guc_submission_initialized(struct intel_guc *guc)
555 return guc->submission_initialized;
558 static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
560 struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
563 memset(desc, 0, sizeof(*desc));
566 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
568 return __get_context(guc, id);
571 static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
572 struct intel_context *ce)
577 * xarray API doesn't have xa_save_irqsave wrapper, so calling the
578 * lower level functions directly.
580 xa_lock_irqsave(&guc->context_lookup, flags);
581 __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
582 xa_unlock_irqrestore(&guc->context_lookup, flags);
585 static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
589 if (unlikely(!guc_submission_initialized(guc)))
592 _reset_lrc_desc_v69(guc, id);
595 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
596 * the lower level functions directly.
598 xa_lock_irqsave(&guc->context_lookup, flags);
599 __xa_erase(&guc->context_lookup, id);
600 xa_unlock_irqrestore(&guc->context_lookup, flags);
603 static void decr_outstanding_submission_g2h(struct intel_guc *guc)
605 if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
606 wake_up_all(&guc->ct.wq);
609 static int guc_submission_send_busy_loop(struct intel_guc *guc,
616 * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
617 * so we don't handle the case where we don't get a reply because we
618 * aborted the send due to the channel being busy.
620 GEM_BUG_ON(g2h_len_dw && !loop);
623 atomic_inc(&guc->outstanding_submission_g2h);
625 return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
628 int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
633 const int state = interruptible ?
634 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
638 GEM_BUG_ON(timeout < 0);
640 if (!atomic_read(wait_var))
647 prepare_to_wait(&guc->ct.wq, &wait, state);
649 if (!atomic_read(wait_var))
652 if (signal_pending_state(state, current)) {
662 timeout = io_schedule_timeout(timeout);
664 finish_wait(&guc->ct.wq, &wait);
666 return (timeout < 0) ? timeout : 0;
669 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
671 if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
674 return intel_guc_wait_for_pending_msg(guc,
675 &guc->outstanding_submission_g2h,
679 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
680 static int try_context_registration(struct intel_context *ce, bool loop);
682 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
685 struct intel_context *ce = request_to_scheduling_context(rq);
691 lockdep_assert_held(&rq->engine->sched_engine->lock);
694 * Corner case where requests were sitting in the priority list or a
695 * request resubmitted after the context was banned.
697 if (unlikely(!intel_context_is_schedulable(ce))) {
698 i915_request_put(i915_request_mark_eio(rq));
699 intel_engine_signal_breadcrumbs(ce->engine);
703 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
704 GEM_BUG_ON(context_guc_id_invalid(ce));
706 if (context_policy_required(ce)) {
707 err = guc_context_policy_init_v70(ce, false);
712 spin_lock(&ce->guc_state.lock);
715 * The request / context will be run on the hardware when scheduling
716 * gets enabled in the unblock. For multi-lrc we still submit the
717 * context to move the LRC tails.
719 if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
722 enabled = context_enabled(ce) || context_blocked(ce);
725 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
726 action[len++] = ce->guc_id.id;
727 action[len++] = GUC_CONTEXT_ENABLE;
728 set_context_pending_enable(ce);
729 intel_context_get(ce);
730 g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
732 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
733 action[len++] = ce->guc_id.id;
736 err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
737 if (!enabled && !err) {
738 trace_intel_context_sched_enable(ce);
739 atomic_inc(&guc->outstanding_submission_g2h);
740 set_context_enabled(ce);
743 * Without multi-lrc KMD does the submission step (moving the
744 * lrc tail) so enabling scheduling is sufficient to submit the
745 * context. This isn't the case in multi-lrc submission as the
746 * GuC needs to move the tails, hence the need for another H2G
747 * to submit a multi-lrc context after enabling scheduling.
749 if (intel_context_is_parent(ce)) {
750 action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
751 err = intel_guc_send_nb(guc, action, len - 1, 0);
753 } else if (!enabled) {
754 clr_context_pending_enable(ce);
755 intel_context_put(ce);
758 trace_i915_request_guc_submit(rq);
761 spin_unlock(&ce->guc_state.lock);
765 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
767 int ret = __guc_add_request(guc, rq);
769 if (unlikely(ret == -EBUSY)) {
770 guc->stalled_request = rq;
771 guc->submission_stall_reason = STALL_ADD_REQUEST;
777 static inline void guc_set_lrc_tail(struct i915_request *rq)
779 rq->context->lrc_reg_state[CTX_RING_TAIL] =
780 intel_ring_set_tail(rq->ring, rq->tail);
783 static inline int rq_prio(const struct i915_request *rq)
785 return rq->sched.attr.priority;
788 static bool is_multi_lrc_rq(struct i915_request *rq)
790 return intel_context_is_parallel(rq->context);
793 static bool can_merge_rq(struct i915_request *rq,
794 struct i915_request *last)
796 return request_to_scheduling_context(rq) ==
797 request_to_scheduling_context(last);
800 static u32 wq_space_until_wrap(struct intel_context *ce)
802 return (WQ_SIZE - ce->parallel.guc.wqi_tail);
805 static void write_wqi(struct intel_context *ce, u32 wqi_size)
807 BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
810 * Ensure WQI are visible before updating tail
812 intel_guc_write_barrier(ce_to_guc(ce));
814 ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
816 WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
819 static int guc_wq_noop_append(struct intel_context *ce)
821 u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
822 u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
827 GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
829 *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
830 FIELD_PREP(WQ_LEN_MASK, len_dw);
831 ce->parallel.guc.wqi_tail = 0;
836 static int __guc_wq_item_append(struct i915_request *rq)
838 struct intel_context *ce = request_to_scheduling_context(rq);
839 struct intel_context *child;
840 unsigned int wqi_size = (ce->parallel.number_children + 4) *
843 u32 len_dw = (wqi_size / sizeof(u32)) - 1;
846 /* Ensure context is in correct state updating work queue */
847 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
848 GEM_BUG_ON(context_guc_id_invalid(ce));
849 GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
850 GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
852 /* Insert NOOP if this work queue item will wrap the tail pointer. */
853 if (wqi_size > wq_space_until_wrap(ce)) {
854 ret = guc_wq_noop_append(ce);
859 wqi = get_wq_pointer(ce, wqi_size);
863 GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
865 *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
866 FIELD_PREP(WQ_LEN_MASK, len_dw);
867 *wqi++ = ce->lrc.lrca;
868 *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
869 FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
870 *wqi++ = 0; /* fence_id */
871 for_each_child(ce, child)
872 *wqi++ = child->ring->tail / sizeof(u64);
874 write_wqi(ce, wqi_size);
879 static int guc_wq_item_append(struct intel_guc *guc,
880 struct i915_request *rq)
882 struct intel_context *ce = request_to_scheduling_context(rq);
885 if (unlikely(!intel_context_is_schedulable(ce)))
888 ret = __guc_wq_item_append(rq);
889 if (unlikely(ret == -EBUSY)) {
890 guc->stalled_request = rq;
891 guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
897 static bool multi_lrc_submit(struct i915_request *rq)
899 struct intel_context *ce = request_to_scheduling_context(rq);
901 intel_ring_set_tail(rq->ring, rq->tail);
904 * We expect the front end (execbuf IOCTL) to set this flag on the last
905 * request generated from a multi-BB submission. This indicates to the
906 * backend (GuC interface) that we should submit this context thus
907 * submitting all the requests generated in parallel.
909 return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
910 !intel_context_is_schedulable(ce);
913 static int guc_dequeue_one_context(struct intel_guc *guc)
915 struct i915_sched_engine * const sched_engine = guc->sched_engine;
916 struct i915_request *last = NULL;
921 lockdep_assert_held(&sched_engine->lock);
923 if (guc->stalled_request) {
925 last = guc->stalled_request;
927 switch (guc->submission_stall_reason) {
928 case STALL_REGISTER_CONTEXT:
929 goto register_context;
930 case STALL_MOVE_LRC_TAIL:
932 case STALL_ADD_REQUEST:
935 MISSING_CASE(guc->submission_stall_reason);
939 while ((rb = rb_first_cached(&sched_engine->queue))) {
940 struct i915_priolist *p = to_priolist(rb);
941 struct i915_request *rq, *rn;
943 priolist_for_each_request_consume(rq, rn, p) {
944 if (last && !can_merge_rq(rq, last))
945 goto register_context;
947 list_del_init(&rq->sched.link);
949 __i915_request_submit(rq);
951 trace_i915_request_in(rq, 0);
954 if (is_multi_lrc_rq(rq)) {
956 * We need to coalesce all multi-lrc requests in
957 * a relationship into a single H2G. We are
958 * guaranteed that all of these requests will be
959 * submitted sequentially.
961 if (multi_lrc_submit(rq)) {
963 goto register_context;
970 rb_erase_cached(&p->node, &sched_engine->queue);
971 i915_priolist_free(p);
976 struct intel_context *ce = request_to_scheduling_context(last);
978 if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
979 intel_context_is_schedulable(ce))) {
980 ret = try_context_registration(ce, false);
981 if (unlikely(ret == -EPIPE)) {
983 } else if (ret == -EBUSY) {
984 guc->stalled_request = last;
985 guc->submission_stall_reason =
986 STALL_REGISTER_CONTEXT;
987 goto schedule_tasklet;
988 } else if (ret != 0) {
989 GEM_WARN_ON(ret); /* Unexpected */
995 if (is_multi_lrc_rq(last)) {
996 ret = guc_wq_item_append(guc, last);
998 goto schedule_tasklet;
999 } else if (ret != 0) {
1000 GEM_WARN_ON(ret); /* Unexpected */
1004 guc_set_lrc_tail(last);
1008 ret = guc_add_request(guc, last);
1009 if (unlikely(ret == -EPIPE)) {
1011 } else if (ret == -EBUSY) {
1012 goto schedule_tasklet;
1013 } else if (ret != 0) {
1014 GEM_WARN_ON(ret); /* Unexpected */
1019 guc->stalled_request = NULL;
1020 guc->submission_stall_reason = STALL_NONE;
1024 sched_engine->tasklet.callback = NULL;
1025 tasklet_disable_nosync(&sched_engine->tasklet);
1029 tasklet_schedule(&sched_engine->tasklet);
1033 static void guc_submission_tasklet(struct tasklet_struct *t)
1035 struct i915_sched_engine *sched_engine =
1036 from_tasklet(sched_engine, t, tasklet);
1037 unsigned long flags;
1040 spin_lock_irqsave(&sched_engine->lock, flags);
1043 loop = guc_dequeue_one_context(sched_engine->private_data);
1046 i915_sched_engine_reset_on_empty(sched_engine);
1048 spin_unlock_irqrestore(&sched_engine->lock, flags);
1051 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
1053 if (iir & GT_RENDER_USER_INTERRUPT)
1054 intel_engine_signal_breadcrumbs(engine);
1057 static void __guc_context_destroy(struct intel_context *ce);
1058 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1059 static void guc_signal_context_fence(struct intel_context *ce);
1060 static void guc_cancel_context_requests(struct intel_context *ce);
1061 static void guc_blocked_fence_complete(struct intel_context *ce);
1063 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
1065 struct intel_context *ce;
1066 unsigned long index, flags;
1067 bool pending_disable, pending_enable, deregister, destroyed, banned;
1069 xa_lock_irqsave(&guc->context_lookup, flags);
1070 xa_for_each(&guc->context_lookup, index, ce) {
1072 * Corner case where the ref count on the object is zero but and
1073 * deregister G2H was lost. In this case we don't touch the ref
1074 * count and finish the destroy of the context.
1076 bool do_put = kref_get_unless_zero(&ce->ref);
1078 xa_unlock(&guc->context_lookup);
1080 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
1081 (cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) {
1082 /* successful cancel so jump straight to close it */
1083 intel_context_sched_disable_unpin(ce);
1086 spin_lock(&ce->guc_state.lock);
1089 * Once we are at this point submission_disabled() is guaranteed
1090 * to be visible to all callers who set the below flags (see above
1091 * flush and flushes in reset_prepare). If submission_disabled()
1092 * is set, the caller shouldn't set these flags.
1095 destroyed = context_destroyed(ce);
1096 pending_enable = context_pending_enable(ce);
1097 pending_disable = context_pending_disable(ce);
1098 deregister = context_wait_for_deregister_to_register(ce);
1099 banned = context_banned(ce);
1100 init_sched_state(ce);
1102 spin_unlock(&ce->guc_state.lock);
1104 if (pending_enable || destroyed || deregister) {
1105 decr_outstanding_submission_g2h(guc);
1107 guc_signal_context_fence(ce);
1109 intel_gt_pm_put_async(guc_to_gt(guc));
1110 release_guc_id(guc, ce);
1111 __guc_context_destroy(ce);
1113 if (pending_enable || deregister)
1114 intel_context_put(ce);
1117 /* Not mutualy exclusive with above if statement. */
1118 if (pending_disable) {
1119 guc_signal_context_fence(ce);
1121 guc_cancel_context_requests(ce);
1122 intel_engine_signal_breadcrumbs(ce->engine);
1124 intel_context_sched_disable_unpin(ce);
1125 decr_outstanding_submission_g2h(guc);
1127 spin_lock(&ce->guc_state.lock);
1128 guc_blocked_fence_complete(ce);
1129 spin_unlock(&ce->guc_state.lock);
1131 intel_context_put(ce);
1135 intel_context_put(ce);
1136 xa_lock(&guc->context_lookup);
1138 xa_unlock_irqrestore(&guc->context_lookup, flags);
1142 * GuC stores busyness stats for each engine at context in/out boundaries. A
1143 * context 'in' logs execution start time, 'out' adds in -> out delta to total.
1144 * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
1147 * __i915_pmu_event_read samples engine busyness. When sampling, if context id
1148 * is valid (!= ~0) and start is non-zero, the engine is considered to be
1149 * active. For an active engine total busyness = total + (now - start), where
1150 * 'now' is the time at which the busyness is sampled. For inactive engine,
1151 * total busyness = total.
1153 * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
1155 * The start and total values provided by GuC are 32 bits and wrap around in a
1156 * few minutes. Since perf pmu provides busyness as 64 bit monotonically
1157 * increasing ns values, there is a need for this implementation to account for
1158 * overflows and extend the GuC provided values to 64 bits before returning
1159 * busyness to the user. In order to do that, a worker runs periodically at
1160 * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
1161 * 27 seconds for a gt clock frequency of 19.2 MHz).
1164 #define WRAP_TIME_CLKS U32_MAX
1165 #define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
1168 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
1170 u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1171 u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
1173 if (new_start == lower_32_bits(*prev_start))
1177 * When gt is unparked, we update the gt timestamp and start the ping
1178 * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
1179 * is unparked, all switched in contexts will have a start time that is
1180 * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
1182 * If neither gt_stamp nor new_start has rolled over, then the
1183 * gt_stamp_hi does not need to be adjusted, however if one of them has
1184 * rolled over, we need to adjust gt_stamp_hi accordingly.
1186 * The below conditions address the cases of new_start rollover and
1187 * gt_stamp_last rollover respectively.
1189 if (new_start < gt_stamp_last &&
1190 (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
1193 if (new_start > gt_stamp_last &&
1194 (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
1197 *prev_start = ((u64)gt_stamp_hi << 32) | new_start;
1200 #define record_read(map_, field_) \
1201 iosys_map_rd_field(map_, 0, struct guc_engine_usage_record, field_)
1204 * GuC updates shared memory and KMD reads it. Since this is not synchronized,
1205 * we run into a race where the value read is inconsistent. Sometimes the
1206 * inconsistency is in reading the upper MSB bytes of the last_in value when
1207 * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
1208 * 24 bits are zero. Since these are non-zero values, it is non-trivial to
1209 * determine validity of these values. Instead we read the values multiple times
1210 * until they are consistent. In test runs, 3 attempts results in consistent
1211 * values. The upper bound is set to 6 attempts and may need to be tuned as per
1212 * any new occurences.
1214 static void __get_engine_usage_record(struct intel_engine_cs *engine,
1215 u32 *last_in, u32 *id, u32 *total)
1217 struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
1221 *last_in = record_read(&rec_map, last_switch_in_stamp);
1222 *id = record_read(&rec_map, current_context_index);
1223 *total = record_read(&rec_map, total_runtime);
1225 if (record_read(&rec_map, last_switch_in_stamp) == *last_in &&
1226 record_read(&rec_map, current_context_index) == *id &&
1227 record_read(&rec_map, total_runtime) == *total)
1232 static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
1234 struct intel_engine_guc_stats *stats = &engine->stats.guc;
1235 struct intel_guc *guc = &engine->gt->uc.guc;
1236 u32 last_switch, ctx_id, total;
1238 lockdep_assert_held(&guc->timestamp.lock);
1240 __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
1242 stats->running = ctx_id != ~0U && last_switch;
1244 __extend_last_switch(guc, &stats->start_gt_clk, last_switch);
1247 * Instead of adjusting the total for overflow, just add the
1248 * difference from previous sample stats->total_gt_clks
1250 if (total && total != ~0U) {
1251 stats->total_gt_clks += (u32)(total - stats->prev_total);
1252 stats->prev_total = total;
1256 static u32 gpm_timestamp_shift(struct intel_gt *gt)
1258 intel_wakeref_t wakeref;
1261 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
1262 reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
1264 shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
1265 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
1270 static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
1272 struct intel_gt *gt = guc_to_gt(guc);
1273 u32 gt_stamp_lo, gt_stamp_hi;
1276 lockdep_assert_held(&guc->timestamp.lock);
1278 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1279 gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0,
1280 MISC_STATUS1) >> guc->timestamp.shift;
1281 gt_stamp_lo = lower_32_bits(gpm_ts);
1284 if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
1287 guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
1291 * Unlike the execlist mode of submission total and active times are in terms of
1292 * gt clocks. The *now parameter is retained to return the cpu time at which the
1293 * busyness was sampled.
1295 static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
1297 struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
1298 struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
1299 struct intel_gt *gt = engine->gt;
1300 struct intel_guc *guc = >->uc.guc;
1301 u64 total, gt_stamp_saved;
1302 unsigned long flags;
1306 spin_lock_irqsave(&guc->timestamp.lock, flags);
1309 * If a reset happened, we risk reading partially updated engine
1310 * busyness from GuC, so we just use the driver stored copy of busyness.
1311 * Synchronize with gt reset using reset_count and the
1312 * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
1313 * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
1314 * usable by checking the flag afterwards.
1316 reset_count = i915_reset_count(gpu_error);
1317 in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags);
1322 * The active busyness depends on start_gt_clk and gt_stamp.
1323 * gt_stamp is updated by i915 only when gt is awake and the
1324 * start_gt_clk is derived from GuC state. To get a consistent
1325 * view of activity, we query the GuC state only if gt is awake.
1327 if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
1328 stats_saved = *stats;
1329 gt_stamp_saved = guc->timestamp.gt_stamp;
1331 * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
1332 * start_gt_clk' calculation below for active engines.
1334 guc_update_engine_gt_clks(engine);
1335 guc_update_pm_timestamp(guc, now);
1336 intel_gt_pm_put_async(gt);
1337 if (i915_reset_count(gpu_error) != reset_count) {
1338 *stats = stats_saved;
1339 guc->timestamp.gt_stamp = gt_stamp_saved;
1343 total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
1344 if (stats->running) {
1345 u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
1347 total += intel_gt_clock_interval_to_ns(gt, clk);
1350 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1352 return ns_to_ktime(total);
1355 static void guc_enable_busyness_worker(struct intel_guc *guc)
1357 mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay);
1360 static void guc_cancel_busyness_worker(struct intel_guc *guc)
1362 cancel_delayed_work_sync(&guc->timestamp.work);
1365 static void __reset_guc_busyness_stats(struct intel_guc *guc)
1367 struct intel_gt *gt = guc_to_gt(guc);
1368 struct intel_engine_cs *engine;
1369 enum intel_engine_id id;
1370 unsigned long flags;
1373 guc_cancel_busyness_worker(guc);
1375 spin_lock_irqsave(&guc->timestamp.lock, flags);
1377 guc_update_pm_timestamp(guc, &unused);
1378 for_each_engine(engine, gt, id) {
1379 guc_update_engine_gt_clks(engine);
1380 engine->stats.guc.prev_total = 0;
1383 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1386 static void __update_guc_busyness_stats(struct intel_guc *guc)
1388 struct intel_gt *gt = guc_to_gt(guc);
1389 struct intel_engine_cs *engine;
1390 enum intel_engine_id id;
1391 unsigned long flags;
1394 guc->timestamp.last_stat_jiffies = jiffies;
1396 spin_lock_irqsave(&guc->timestamp.lock, flags);
1398 guc_update_pm_timestamp(guc, &unused);
1399 for_each_engine(engine, gt, id)
1400 guc_update_engine_gt_clks(engine);
1402 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1405 static void __guc_context_update_stats(struct intel_context *ce)
1407 struct intel_guc *guc = ce_to_guc(ce);
1408 unsigned long flags;
1410 spin_lock_irqsave(&guc->timestamp.lock, flags);
1411 lrc_update_runtime(ce);
1412 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1415 static void guc_context_update_stats(struct intel_context *ce)
1417 if (!intel_context_pin_if_active(ce))
1420 __guc_context_update_stats(ce);
1421 intel_context_unpin(ce);
1424 static void guc_timestamp_ping(struct work_struct *wrk)
1426 struct intel_guc *guc = container_of(wrk, typeof(*guc),
1427 timestamp.work.work);
1428 struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
1429 struct intel_gt *gt = guc_to_gt(guc);
1430 struct intel_context *ce;
1431 intel_wakeref_t wakeref;
1432 unsigned long index;
1436 * Synchronize with gt reset to make sure the worker does not
1437 * corrupt the engine/guc stats. NB: can't actually block waiting
1438 * for a reset to complete as the reset requires flushing out
1439 * this worker thread if started. So waiting would deadlock.
1441 ret = intel_gt_reset_trylock(gt, &srcu);
1445 with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
1446 __update_guc_busyness_stats(guc);
1448 /* adjust context stats for overflow */
1449 xa_for_each(&guc->context_lookup, index, ce)
1450 guc_context_update_stats(ce);
1452 intel_gt_reset_unlock(gt, srcu);
1454 guc_enable_busyness_worker(guc);
1457 static int guc_action_enable_usage_stats(struct intel_guc *guc)
1459 u32 offset = intel_guc_engine_usage_offset(guc);
1461 INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
1466 return intel_guc_send(guc, action, ARRAY_SIZE(action));
1469 static int guc_init_engine_stats(struct intel_guc *guc)
1471 struct intel_gt *gt = guc_to_gt(guc);
1472 intel_wakeref_t wakeref;
1475 with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
1476 ret = guc_action_enable_usage_stats(guc);
1479 guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
1481 guc_enable_busyness_worker(guc);
1486 static void guc_fini_engine_stats(struct intel_guc *guc)
1488 guc_cancel_busyness_worker(guc);
1491 void intel_guc_busyness_park(struct intel_gt *gt)
1493 struct intel_guc *guc = >->uc.guc;
1495 if (!guc_submission_initialized(guc))
1499 * There is a race with suspend flow where the worker runs after suspend
1500 * and causes an unclaimed register access warning. Cancel the worker
1501 * synchronously here.
1503 guc_cancel_busyness_worker(guc);
1506 * Before parking, we should sample engine busyness stats if we need to.
1507 * We can skip it if we are less than half a ping from the last time we
1508 * sampled the busyness stats.
1510 if (guc->timestamp.last_stat_jiffies &&
1511 !time_after(jiffies, guc->timestamp.last_stat_jiffies +
1512 (guc->timestamp.ping_delay / 2)))
1515 __update_guc_busyness_stats(guc);
1518 void intel_guc_busyness_unpark(struct intel_gt *gt)
1520 struct intel_guc *guc = >->uc.guc;
1521 unsigned long flags;
1524 if (!guc_submission_initialized(guc))
1527 spin_lock_irqsave(&guc->timestamp.lock, flags);
1528 guc_update_pm_timestamp(guc, &unused);
1529 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1530 guc_enable_busyness_worker(guc);
1534 submission_disabled(struct intel_guc *guc)
1536 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1538 return unlikely(!sched_engine ||
1539 !__tasklet_is_enabled(&sched_engine->tasklet) ||
1540 intel_gt_is_wedged(guc_to_gt(guc)));
1543 static void disable_submission(struct intel_guc *guc)
1545 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1547 if (__tasklet_is_enabled(&sched_engine->tasklet)) {
1548 GEM_BUG_ON(!guc->ct.enabled);
1549 __tasklet_disable_sync_once(&sched_engine->tasklet);
1550 sched_engine->tasklet.callback = NULL;
1554 static void enable_submission(struct intel_guc *guc)
1556 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1557 unsigned long flags;
1559 spin_lock_irqsave(&guc->sched_engine->lock, flags);
1560 sched_engine->tasklet.callback = guc_submission_tasklet;
1561 wmb(); /* Make sure callback visible */
1562 if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
1563 __tasklet_enable(&sched_engine->tasklet)) {
1564 GEM_BUG_ON(!guc->ct.enabled);
1566 /* And kick in case we missed a new request submission. */
1567 tasklet_hi_schedule(&sched_engine->tasklet);
1569 spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
1572 static void guc_flush_submissions(struct intel_guc *guc)
1574 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1575 unsigned long flags;
1577 spin_lock_irqsave(&sched_engine->lock, flags);
1578 spin_unlock_irqrestore(&sched_engine->lock, flags);
1581 static void guc_flush_destroyed_contexts(struct intel_guc *guc);
1583 void intel_guc_submission_reset_prepare(struct intel_guc *guc)
1585 if (unlikely(!guc_submission_initialized(guc))) {
1586 /* Reset called during driver load? GuC not yet initialised! */
1590 intel_gt_park_heartbeats(guc_to_gt(guc));
1591 disable_submission(guc);
1592 guc->interrupts.disable(guc);
1593 __reset_guc_busyness_stats(guc);
1595 /* Flush IRQ handler */
1596 spin_lock_irq(guc_to_gt(guc)->irq_lock);
1597 spin_unlock_irq(guc_to_gt(guc)->irq_lock);
1599 guc_flush_submissions(guc);
1600 guc_flush_destroyed_contexts(guc);
1601 flush_work(&guc->ct.requests.worker);
1603 scrub_guc_desc_for_outstanding_g2h(guc);
1606 static struct intel_engine_cs *
1607 guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
1609 struct intel_engine_cs *engine;
1610 intel_engine_mask_t tmp, mask = ve->mask;
1611 unsigned int num_siblings = 0;
1613 for_each_engine_masked(engine, ve->gt, mask, tmp)
1614 if (num_siblings++ == sibling)
1620 static inline struct intel_engine_cs *
1621 __context_to_physical_engine(struct intel_context *ce)
1623 struct intel_engine_cs *engine = ce->engine;
1625 if (intel_engine_is_virtual(engine))
1626 engine = guc_virtual_get_sibling(engine, 0);
1631 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
1633 struct intel_engine_cs *engine = __context_to_physical_engine(ce);
1635 if (!intel_context_is_schedulable(ce))
1638 GEM_BUG_ON(!intel_context_is_pinned(ce));
1641 * We want a simple context + ring to execute the breadcrumb update.
1642 * We cannot rely on the context being intact across the GPU hang,
1643 * so clear it and rebuild just what we need for the breadcrumb.
1644 * All pending requests for this context will be zapped, and any
1645 * future request will be after userspace has had the opportunity
1646 * to recreate its own state.
1649 lrc_init_regs(ce, engine, true);
1651 /* Rerun the request; its payload has been neutered (if guilty). */
1652 lrc_update_regs(ce, engine, head);
1655 static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
1658 * Wa_22011802037: In addition to stopping the cs, we need
1659 * to wait for any pending mi force wakeups
1661 if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
1662 (GRAPHICS_VER(engine->i915) >= 11 &&
1663 GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70))) {
1664 intel_engine_stop_cs(engine);
1665 intel_engine_wait_for_pending_mi_fw(engine);
1669 static void guc_reset_nop(struct intel_engine_cs *engine)
1673 static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
1678 __unwind_incomplete_requests(struct intel_context *ce)
1680 struct i915_request *rq, *rn;
1681 struct list_head *pl;
1682 int prio = I915_PRIORITY_INVALID;
1683 struct i915_sched_engine * const sched_engine =
1684 ce->engine->sched_engine;
1685 unsigned long flags;
1687 spin_lock_irqsave(&sched_engine->lock, flags);
1688 spin_lock(&ce->guc_state.lock);
1689 list_for_each_entry_safe_reverse(rq, rn,
1690 &ce->guc_state.requests,
1692 if (i915_request_completed(rq))
1695 list_del_init(&rq->sched.link);
1696 __i915_request_unsubmit(rq);
1698 /* Push the request back into the queue for later resubmission. */
1699 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
1700 if (rq_prio(rq) != prio) {
1702 pl = i915_sched_lookup_priolist(sched_engine, prio);
1704 GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
1706 list_add(&rq->sched.link, pl);
1707 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1709 spin_unlock(&ce->guc_state.lock);
1710 spin_unlock_irqrestore(&sched_engine->lock, flags);
1713 static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
1716 struct i915_request *rq;
1717 unsigned long flags;
1719 int i, number_children = ce->parallel.number_children;
1720 struct intel_context *parent = ce;
1722 GEM_BUG_ON(intel_context_is_child(ce));
1724 intel_context_get(ce);
1727 * GuC will implicitly mark the context as non-schedulable when it sends
1728 * the reset notification. Make sure our state reflects this change. The
1729 * context will be marked enabled on resubmission.
1731 spin_lock_irqsave(&ce->guc_state.lock, flags);
1732 clr_context_enabled(ce);
1733 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1736 * For each context in the relationship find the hanging request
1737 * resetting each context / request as needed
1739 for (i = 0; i < number_children + 1; ++i) {
1740 if (!intel_context_is_pinned(ce))
1744 rq = intel_context_get_active_request(ce);
1746 head = ce->ring->tail;
1750 if (i915_request_started(rq))
1751 guilty = stalled & ce->engine->mask;
1753 GEM_BUG_ON(i915_active_is_idle(&ce->active));
1754 head = intel_ring_wrap(ce->ring, rq->head);
1756 __i915_request_reset(rq, guilty);
1757 i915_request_put(rq);
1759 guc_reset_state(ce, head, guilty);
1761 if (i != number_children)
1762 ce = list_next_entry(ce, parallel.child_link);
1765 __unwind_incomplete_requests(parent);
1766 intel_context_put(parent);
1769 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
1771 struct intel_context *ce;
1772 unsigned long index;
1773 unsigned long flags;
1775 if (unlikely(!guc_submission_initialized(guc))) {
1776 /* Reset called during driver load? GuC not yet initialised! */
1780 xa_lock_irqsave(&guc->context_lookup, flags);
1781 xa_for_each(&guc->context_lookup, index, ce) {
1782 if (!kref_get_unless_zero(&ce->ref))
1785 xa_unlock(&guc->context_lookup);
1787 if (intel_context_is_pinned(ce) &&
1788 !intel_context_is_child(ce))
1789 __guc_reset_context(ce, stalled);
1791 intel_context_put(ce);
1793 xa_lock(&guc->context_lookup);
1795 xa_unlock_irqrestore(&guc->context_lookup, flags);
1797 /* GuC is blown away, drop all references to contexts */
1798 xa_destroy(&guc->context_lookup);
1801 static void guc_cancel_context_requests(struct intel_context *ce)
1803 struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
1804 struct i915_request *rq;
1805 unsigned long flags;
1807 /* Mark all executing requests as skipped. */
1808 spin_lock_irqsave(&sched_engine->lock, flags);
1809 spin_lock(&ce->guc_state.lock);
1810 list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
1811 i915_request_put(i915_request_mark_eio(rq));
1812 spin_unlock(&ce->guc_state.lock);
1813 spin_unlock_irqrestore(&sched_engine->lock, flags);
1817 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
1819 struct i915_request *rq, *rn;
1821 unsigned long flags;
1823 /* Can be called during boot if GuC fails to load */
1828 * Before we call engine->cancel_requests(), we should have exclusive
1829 * access to the submission state. This is arranged for us by the
1830 * caller disabling the interrupt generation, the tasklet and other
1831 * threads that may then access the same state, giving us a free hand
1832 * to reset state. However, we still need to let lockdep be aware that
1833 * we know this state may be accessed in hardirq context, so we
1834 * disable the irq around this manipulation and we want to keep
1835 * the spinlock focused on its duties and not accidentally conflate
1836 * coverage to the submission's irq state. (Similarly, although we
1837 * shouldn't need to disable irq around the manipulation of the
1838 * submission's irq state, we also wish to remind ourselves that
1841 spin_lock_irqsave(&sched_engine->lock, flags);
1843 /* Flush the queued requests to the timeline list (for retiring). */
1844 while ((rb = rb_first_cached(&sched_engine->queue))) {
1845 struct i915_priolist *p = to_priolist(rb);
1847 priolist_for_each_request_consume(rq, rn, p) {
1848 list_del_init(&rq->sched.link);
1850 __i915_request_submit(rq);
1852 i915_request_put(i915_request_mark_eio(rq));
1855 rb_erase_cached(&p->node, &sched_engine->queue);
1856 i915_priolist_free(p);
1859 /* Remaining _unready_ requests will be nop'ed when submitted */
1861 sched_engine->queue_priority_hint = INT_MIN;
1862 sched_engine->queue = RB_ROOT_CACHED;
1864 spin_unlock_irqrestore(&sched_engine->lock, flags);
1867 void intel_guc_submission_cancel_requests(struct intel_guc *guc)
1869 struct intel_context *ce;
1870 unsigned long index;
1871 unsigned long flags;
1873 xa_lock_irqsave(&guc->context_lookup, flags);
1874 xa_for_each(&guc->context_lookup, index, ce) {
1875 if (!kref_get_unless_zero(&ce->ref))
1878 xa_unlock(&guc->context_lookup);
1880 if (intel_context_is_pinned(ce) &&
1881 !intel_context_is_child(ce))
1882 guc_cancel_context_requests(ce);
1884 intel_context_put(ce);
1886 xa_lock(&guc->context_lookup);
1888 xa_unlock_irqrestore(&guc->context_lookup, flags);
1890 guc_cancel_sched_engine_requests(guc->sched_engine);
1892 /* GuC is blown away, drop all references to contexts */
1893 xa_destroy(&guc->context_lookup);
1896 void intel_guc_submission_reset_finish(struct intel_guc *guc)
1898 /* Reset called during driver load or during wedge? */
1899 if (unlikely(!guc_submission_initialized(guc) ||
1900 intel_gt_is_wedged(guc_to_gt(guc)))) {
1905 * Technically possible for either of these values to be non-zero here,
1906 * but very unlikely + harmless. Regardless let's add a warn so we can
1907 * see in CI if this happens frequently / a precursor to taking down the
1910 GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
1911 atomic_set(&guc->outstanding_submission_g2h, 0);
1913 intel_guc_global_policies_update(guc);
1914 enable_submission(guc);
1915 intel_gt_unpark_heartbeats(guc_to_gt(guc));
1918 static void destroyed_worker_func(struct work_struct *w);
1919 static void reset_fail_worker_func(struct work_struct *w);
1922 * Set up the memory resources to be shared with the GuC (via the GGTT)
1923 * at firmware loading time.
1925 int intel_guc_submission_init(struct intel_guc *guc)
1927 struct intel_gt *gt = guc_to_gt(guc);
1930 if (guc->submission_initialized)
1933 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 0, 0)) {
1934 ret = guc_lrc_desc_pool_create_v69(guc);
1939 guc->submission_state.guc_ids_bitmap =
1940 bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
1941 if (!guc->submission_state.guc_ids_bitmap) {
1946 guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
1947 guc->timestamp.shift = gpm_timestamp_shift(gt);
1948 guc->submission_initialized = true;
1953 guc_lrc_desc_pool_destroy_v69(guc);
1958 void intel_guc_submission_fini(struct intel_guc *guc)
1960 if (!guc->submission_initialized)
1963 guc_flush_destroyed_contexts(guc);
1964 guc_lrc_desc_pool_destroy_v69(guc);
1965 i915_sched_engine_put(guc->sched_engine);
1966 bitmap_free(guc->submission_state.guc_ids_bitmap);
1967 guc->submission_initialized = false;
1970 static inline void queue_request(struct i915_sched_engine *sched_engine,
1971 struct i915_request *rq,
1974 GEM_BUG_ON(!list_empty(&rq->sched.link));
1975 list_add_tail(&rq->sched.link,
1976 i915_sched_lookup_priolist(sched_engine, prio));
1977 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1978 tasklet_hi_schedule(&sched_engine->tasklet);
1981 static int guc_bypass_tasklet_submit(struct intel_guc *guc,
1982 struct i915_request *rq)
1986 __i915_request_submit(rq);
1988 trace_i915_request_in(rq, 0);
1990 if (is_multi_lrc_rq(rq)) {
1991 if (multi_lrc_submit(rq)) {
1992 ret = guc_wq_item_append(guc, rq);
1994 ret = guc_add_request(guc, rq);
1997 guc_set_lrc_tail(rq);
1998 ret = guc_add_request(guc, rq);
2001 if (unlikely(ret == -EPIPE))
2002 disable_submission(guc);
2007 static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
2009 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
2010 struct intel_context *ce = request_to_scheduling_context(rq);
2012 return submission_disabled(guc) || guc->stalled_request ||
2013 !i915_sched_engine_is_empty(sched_engine) ||
2014 !ctx_id_mapped(guc, ce->guc_id.id);
2017 static void guc_submit_request(struct i915_request *rq)
2019 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
2020 struct intel_guc *guc = &rq->engine->gt->uc.guc;
2021 unsigned long flags;
2023 /* Will be called from irq-context when using foreign fences. */
2024 spin_lock_irqsave(&sched_engine->lock, flags);
2026 if (need_tasklet(guc, rq))
2027 queue_request(sched_engine, rq, rq_prio(rq));
2028 else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
2029 tasklet_hi_schedule(&sched_engine->tasklet);
2031 spin_unlock_irqrestore(&sched_engine->lock, flags);
2034 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
2038 GEM_BUG_ON(intel_context_is_child(ce));
2040 if (intel_context_is_parent(ce))
2041 ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
2042 NUMBER_MULTI_LRC_GUC_ID(guc),
2043 order_base_2(ce->parallel.number_children
2046 ret = ida_simple_get(&guc->submission_state.guc_ids,
2047 NUMBER_MULTI_LRC_GUC_ID(guc),
2048 guc->submission_state.num_guc_ids,
2049 GFP_KERNEL | __GFP_RETRY_MAYFAIL |
2051 if (unlikely(ret < 0))
2054 if (!intel_context_is_parent(ce))
2055 ++guc->submission_state.guc_ids_in_use;
2057 ce->guc_id.id = ret;
2061 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
2063 GEM_BUG_ON(intel_context_is_child(ce));
2065 if (!context_guc_id_invalid(ce)) {
2066 if (intel_context_is_parent(ce)) {
2067 bitmap_release_region(guc->submission_state.guc_ids_bitmap,
2069 order_base_2(ce->parallel.number_children
2072 --guc->submission_state.guc_ids_in_use;
2073 ida_simple_remove(&guc->submission_state.guc_ids,
2076 clr_ctx_id_mapping(guc, ce->guc_id.id);
2077 set_context_guc_id_invalid(ce);
2079 if (!list_empty(&ce->guc_id.link))
2080 list_del_init(&ce->guc_id.link);
2083 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
2085 unsigned long flags;
2087 spin_lock_irqsave(&guc->submission_state.lock, flags);
2088 __release_guc_id(guc, ce);
2089 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2092 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
2094 struct intel_context *cn;
2096 lockdep_assert_held(&guc->submission_state.lock);
2097 GEM_BUG_ON(intel_context_is_child(ce));
2098 GEM_BUG_ON(intel_context_is_parent(ce));
2100 if (!list_empty(&guc->submission_state.guc_id_list)) {
2101 cn = list_first_entry(&guc->submission_state.guc_id_list,
2102 struct intel_context,
2105 GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
2106 GEM_BUG_ON(context_guc_id_invalid(cn));
2107 GEM_BUG_ON(intel_context_is_child(cn));
2108 GEM_BUG_ON(intel_context_is_parent(cn));
2110 list_del_init(&cn->guc_id.link);
2111 ce->guc_id.id = cn->guc_id.id;
2113 spin_lock(&cn->guc_state.lock);
2114 clr_context_registered(cn);
2115 spin_unlock(&cn->guc_state.lock);
2117 set_context_guc_id_invalid(cn);
2119 #ifdef CONFIG_DRM_I915_SELFTEST
2120 guc->number_guc_id_stolen++;
2129 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
2133 lockdep_assert_held(&guc->submission_state.lock);
2134 GEM_BUG_ON(intel_context_is_child(ce));
2136 ret = new_guc_id(guc, ce);
2137 if (unlikely(ret < 0)) {
2138 if (intel_context_is_parent(ce))
2141 ret = steal_guc_id(guc, ce);
2146 if (intel_context_is_parent(ce)) {
2147 struct intel_context *child;
2150 for_each_child(ce, child)
2151 child->guc_id.id = ce->guc_id.id + i++;
2157 #define PIN_GUC_ID_TRIES 4
2158 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2161 unsigned long flags, tries = PIN_GUC_ID_TRIES;
2163 GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
2166 spin_lock_irqsave(&guc->submission_state.lock, flags);
2168 might_lock(&ce->guc_state.lock);
2170 if (context_guc_id_invalid(ce)) {
2171 ret = assign_guc_id(guc, ce);
2174 ret = 1; /* Indidcates newly assigned guc_id */
2176 if (!list_empty(&ce->guc_id.link))
2177 list_del_init(&ce->guc_id.link);
2178 atomic_inc(&ce->guc_id.ref);
2181 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2184 * -EAGAIN indicates no guc_id are available, let's retire any
2185 * outstanding requests to see if that frees up a guc_id. If the first
2186 * retire didn't help, insert a sleep with the timeslice duration before
2187 * attempting to retire more requests. Double the sleep period each
2188 * subsequent pass before finally giving up. The sleep period has max of
2189 * 100ms and minimum of 1ms.
2191 if (ret == -EAGAIN && --tries) {
2192 if (PIN_GUC_ID_TRIES - tries > 1) {
2193 unsigned int timeslice_shifted =
2194 ce->engine->props.timeslice_duration_ms <<
2195 (PIN_GUC_ID_TRIES - tries - 2);
2196 unsigned int max = min_t(unsigned int, 100,
2199 msleep(max_t(unsigned int, max, 1));
2201 intel_gt_retire_requests(guc_to_gt(guc));
2208 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2210 unsigned long flags;
2212 GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
2213 GEM_BUG_ON(intel_context_is_child(ce));
2215 if (unlikely(context_guc_id_invalid(ce) ||
2216 intel_context_is_parent(ce)))
2219 spin_lock_irqsave(&guc->submission_state.lock, flags);
2220 if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
2221 !atomic_read(&ce->guc_id.ref))
2222 list_add_tail(&ce->guc_id.link,
2223 &guc->submission_state.guc_id_list);
2224 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2227 static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
2228 struct intel_context *ce,
2233 struct intel_context *child;
2234 u32 action[4 + MAX_ENGINE_INSTANCE];
2237 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2239 action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2240 action[len++] = guc_id;
2241 action[len++] = ce->parallel.number_children + 1;
2242 action[len++] = offset;
2243 for_each_child(ce, child) {
2244 offset += sizeof(struct guc_lrc_desc_v69);
2245 action[len++] = offset;
2248 return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2251 static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
2252 struct intel_context *ce,
2253 struct guc_ctxt_registration_info *info,
2256 struct intel_context *child;
2257 u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
2261 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2263 action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2264 action[len++] = info->flags;
2265 action[len++] = info->context_idx;
2266 action[len++] = info->engine_class;
2267 action[len++] = info->engine_submit_mask;
2268 action[len++] = info->wq_desc_lo;
2269 action[len++] = info->wq_desc_hi;
2270 action[len++] = info->wq_base_lo;
2271 action[len++] = info->wq_base_hi;
2272 action[len++] = info->wq_size;
2273 action[len++] = ce->parallel.number_children + 1;
2274 action[len++] = info->hwlrca_lo;
2275 action[len++] = info->hwlrca_hi;
2277 next_id = info->context_idx + 1;
2278 for_each_child(ce, child) {
2279 GEM_BUG_ON(next_id++ != child->guc_id.id);
2282 * NB: GuC interface supports 64 bit LRCA even though i915/HW
2283 * only supports 32 bit currently.
2285 action[len++] = lower_32_bits(child->lrc.lrca);
2286 action[len++] = upper_32_bits(child->lrc.lrca);
2289 GEM_BUG_ON(len > ARRAY_SIZE(action));
2291 return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2294 static int __guc_action_register_context_v69(struct intel_guc *guc,
2300 INTEL_GUC_ACTION_REGISTER_CONTEXT,
2305 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2309 static int __guc_action_register_context_v70(struct intel_guc *guc,
2310 struct guc_ctxt_registration_info *info,
2314 INTEL_GUC_ACTION_REGISTER_CONTEXT,
2318 info->engine_submit_mask,
2328 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2332 static void prepare_context_registration_info_v69(struct intel_context *ce);
2333 static void prepare_context_registration_info_v70(struct intel_context *ce,
2334 struct guc_ctxt_registration_info *info);
2337 register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
2339 u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
2340 ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
2342 prepare_context_registration_info_v69(ce);
2344 if (intel_context_is_parent(ce))
2345 return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
2348 return __guc_action_register_context_v69(guc, ce->guc_id.id,
2353 register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
2355 struct guc_ctxt_registration_info info;
2357 prepare_context_registration_info_v70(ce, &info);
2359 if (intel_context_is_parent(ce))
2360 return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
2362 return __guc_action_register_context_v70(guc, &info, loop);
2365 static int register_context(struct intel_context *ce, bool loop)
2367 struct intel_guc *guc = ce_to_guc(ce);
2370 GEM_BUG_ON(intel_context_is_child(ce));
2371 trace_intel_context_register(ce);
2373 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
2374 ret = register_context_v70(guc, ce, loop);
2376 ret = register_context_v69(guc, ce, loop);
2379 unsigned long flags;
2381 spin_lock_irqsave(&ce->guc_state.lock, flags);
2382 set_context_registered(ce);
2383 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2385 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
2386 guc_context_policy_init_v70(ce, loop);
2392 static int __guc_action_deregister_context(struct intel_guc *guc,
2396 INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
2400 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2401 G2H_LEN_DW_DEREGISTER_CONTEXT,
2405 static int deregister_context(struct intel_context *ce, u32 guc_id)
2407 struct intel_guc *guc = ce_to_guc(ce);
2409 GEM_BUG_ON(intel_context_is_child(ce));
2410 trace_intel_context_deregister(ce);
2412 return __guc_action_deregister_context(guc, guc_id);
2415 static inline void clear_children_join_go_memory(struct intel_context *ce)
2417 struct parent_scratch *ps = __get_parent_scratch(ce);
2420 ps->go.semaphore = 0;
2421 for (i = 0; i < ce->parallel.number_children + 1; ++i)
2422 ps->join[i].semaphore = 0;
2425 static inline u32 get_children_go_value(struct intel_context *ce)
2427 return __get_parent_scratch(ce)->go.semaphore;
2430 static inline u32 get_children_join_value(struct intel_context *ce,
2433 return __get_parent_scratch(ce)->join[child_index].semaphore;
2436 struct context_policy {
2438 struct guc_update_context_policy h2g;
2441 static u32 __guc_context_policy_action_size(struct context_policy *policy)
2443 size_t bytes = sizeof(policy->h2g.header) +
2444 (sizeof(policy->h2g.klv[0]) * policy->count);
2446 return bytes / sizeof(u32);
2449 static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id)
2451 policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
2452 policy->h2g.header.ctx_id = guc_id;
2456 #define MAKE_CONTEXT_POLICY_ADD(func, id) \
2457 static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \
2459 GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
2460 policy->h2g.klv[policy->count].kl = \
2461 FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
2462 FIELD_PREP(GUC_KLV_0_LEN, 1); \
2463 policy->h2g.klv[policy->count].value = data; \
2467 MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
2468 MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
2469 MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
2470 MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
2472 #undef MAKE_CONTEXT_POLICY_ADD
2474 static int __guc_context_set_context_policies(struct intel_guc *guc,
2475 struct context_policy *policy,
2478 return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g,
2479 __guc_context_policy_action_size(policy),
2483 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
2485 struct intel_engine_cs *engine = ce->engine;
2486 struct intel_guc *guc = &engine->gt->uc.guc;
2487 struct context_policy policy;
2488 u32 execution_quantum;
2489 u32 preemption_timeout;
2490 unsigned long flags;
2493 /* NB: For both of these, zero means disabled. */
2494 GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
2495 execution_quantum));
2496 GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
2497 preemption_timeout));
2498 execution_quantum = engine->props.timeslice_duration_ms * 1000;
2499 preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2501 __guc_context_policy_start_klv(&policy, ce->guc_id.id);
2503 __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
2504 __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
2505 __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
2507 if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2508 __guc_context_policy_add_preempt_to_idle(&policy, 1);
2510 ret = __guc_context_set_context_policies(guc, &policy, loop);
2512 spin_lock_irqsave(&ce->guc_state.lock, flags);
2514 set_context_policy_required(ce);
2516 clr_context_policy_required(ce);
2517 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2522 static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
2523 struct guc_lrc_desc_v69 *desc)
2525 desc->policy_flags = 0;
2527 if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2528 desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
2530 /* NB: For both of these, zero means disabled. */
2531 GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
2532 desc->execution_quantum));
2533 GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
2534 desc->preemption_timeout));
2535 desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
2536 desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2539 static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
2542 * this matches the mapping we do in map_i915_prio_to_guc_prio()
2543 * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
2549 case GUC_CLIENT_PRIORITY_KMD_NORMAL:
2550 return GEN12_CTX_PRIORITY_NORMAL;
2551 case GUC_CLIENT_PRIORITY_NORMAL:
2552 return GEN12_CTX_PRIORITY_LOW;
2553 case GUC_CLIENT_PRIORITY_HIGH:
2554 case GUC_CLIENT_PRIORITY_KMD_HIGH:
2555 return GEN12_CTX_PRIORITY_HIGH;
2559 static void prepare_context_registration_info_v69(struct intel_context *ce)
2561 struct intel_engine_cs *engine = ce->engine;
2562 struct intel_guc *guc = &engine->gt->uc.guc;
2563 u32 ctx_id = ce->guc_id.id;
2564 struct guc_lrc_desc_v69 *desc;
2565 struct intel_context *child;
2567 GEM_BUG_ON(!engine->mask);
2570 * Ensure LRC + CT vmas are is same region as write barrier is done
2571 * based on CT vma region.
2573 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2574 i915_gem_object_is_lmem(ce->ring->vma->obj));
2576 desc = __get_lrc_desc_v69(guc, ctx_id);
2578 desc->engine_class = engine_class_to_guc_class(engine->class);
2579 desc->engine_submit_mask = engine->logical_mask;
2580 desc->hw_context_desc = ce->lrc.lrca;
2581 desc->priority = ce->guc_state.prio;
2582 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2583 guc_context_policy_init_v69(engine, desc);
2586 * If context is a parent, we need to register a process descriptor
2587 * describing a work queue and register all child contexts.
2589 if (intel_context_is_parent(ce)) {
2590 struct guc_process_desc_v69 *pdesc;
2592 ce->parallel.guc.wqi_tail = 0;
2593 ce->parallel.guc.wqi_head = 0;
2595 desc->process_desc = i915_ggtt_offset(ce->state) +
2596 __get_parent_scratch_offset(ce);
2597 desc->wq_addr = i915_ggtt_offset(ce->state) +
2598 __get_wq_offset(ce);
2599 desc->wq_size = WQ_SIZE;
2601 pdesc = __get_process_desc_v69(ce);
2602 memset(pdesc, 0, sizeof(*(pdesc)));
2603 pdesc->stage_id = ce->guc_id.id;
2604 pdesc->wq_base_addr = desc->wq_addr;
2605 pdesc->wq_size_bytes = desc->wq_size;
2606 pdesc->wq_status = WQ_STATUS_ACTIVE;
2608 ce->parallel.guc.wq_head = &pdesc->head;
2609 ce->parallel.guc.wq_tail = &pdesc->tail;
2610 ce->parallel.guc.wq_status = &pdesc->wq_status;
2612 for_each_child(ce, child) {
2613 desc = __get_lrc_desc_v69(guc, child->guc_id.id);
2615 desc->engine_class =
2616 engine_class_to_guc_class(engine->class);
2617 desc->hw_context_desc = child->lrc.lrca;
2618 desc->priority = ce->guc_state.prio;
2619 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2620 guc_context_policy_init_v69(engine, desc);
2623 clear_children_join_go_memory(ce);
2627 static void prepare_context_registration_info_v70(struct intel_context *ce,
2628 struct guc_ctxt_registration_info *info)
2630 struct intel_engine_cs *engine = ce->engine;
2631 struct intel_guc *guc = &engine->gt->uc.guc;
2632 u32 ctx_id = ce->guc_id.id;
2634 GEM_BUG_ON(!engine->mask);
2637 * Ensure LRC + CT vmas are is same region as write barrier is done
2638 * based on CT vma region.
2640 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2641 i915_gem_object_is_lmem(ce->ring->vma->obj));
2643 memset(info, 0, sizeof(*info));
2644 info->context_idx = ctx_id;
2645 info->engine_class = engine_class_to_guc_class(engine->class);
2646 info->engine_submit_mask = engine->logical_mask;
2648 * NB: GuC interface supports 64 bit LRCA even though i915/HW
2649 * only supports 32 bit currently.
2651 info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
2652 info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
2653 if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
2654 info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
2655 info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
2658 * If context is a parent, we need to register a process descriptor
2659 * describing a work queue and register all child contexts.
2661 if (intel_context_is_parent(ce)) {
2662 struct guc_sched_wq_desc *wq_desc;
2663 u64 wq_desc_offset, wq_base_offset;
2665 ce->parallel.guc.wqi_tail = 0;
2666 ce->parallel.guc.wqi_head = 0;
2668 wq_desc_offset = i915_ggtt_offset(ce->state) +
2669 __get_parent_scratch_offset(ce);
2670 wq_base_offset = i915_ggtt_offset(ce->state) +
2671 __get_wq_offset(ce);
2672 info->wq_desc_lo = lower_32_bits(wq_desc_offset);
2673 info->wq_desc_hi = upper_32_bits(wq_desc_offset);
2674 info->wq_base_lo = lower_32_bits(wq_base_offset);
2675 info->wq_base_hi = upper_32_bits(wq_base_offset);
2676 info->wq_size = WQ_SIZE;
2678 wq_desc = __get_wq_desc_v70(ce);
2679 memset(wq_desc, 0, sizeof(*wq_desc));
2680 wq_desc->wq_status = WQ_STATUS_ACTIVE;
2682 ce->parallel.guc.wq_head = &wq_desc->head;
2683 ce->parallel.guc.wq_tail = &wq_desc->tail;
2684 ce->parallel.guc.wq_status = &wq_desc->wq_status;
2686 clear_children_join_go_memory(ce);
2690 static int try_context_registration(struct intel_context *ce, bool loop)
2692 struct intel_engine_cs *engine = ce->engine;
2693 struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
2694 struct intel_guc *guc = &engine->gt->uc.guc;
2695 intel_wakeref_t wakeref;
2696 u32 ctx_id = ce->guc_id.id;
2697 bool context_registered;
2700 GEM_BUG_ON(!sched_state_is_init(ce));
2702 context_registered = ctx_id_mapped(guc, ctx_id);
2704 clr_ctx_id_mapping(guc, ctx_id);
2705 set_ctx_id_mapping(guc, ctx_id, ce);
2708 * The context_lookup xarray is used to determine if the hardware
2709 * context is currently registered. There are two cases in which it
2710 * could be registered either the guc_id has been stolen from another
2711 * context or the lrc descriptor address of this context has changed. In
2712 * either case the context needs to be deregistered with the GuC before
2713 * registering this context.
2715 if (context_registered) {
2717 unsigned long flags;
2719 trace_intel_context_steal_guc_id(ce);
2722 /* Seal race with Reset */
2723 spin_lock_irqsave(&ce->guc_state.lock, flags);
2724 disabled = submission_disabled(guc);
2725 if (likely(!disabled)) {
2726 set_context_wait_for_deregister_to_register(ce);
2727 intel_context_get(ce);
2729 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2730 if (unlikely(disabled)) {
2731 clr_ctx_id_mapping(guc, ctx_id);
2732 return 0; /* Will get registered later */
2736 * If stealing the guc_id, this ce has the same guc_id as the
2737 * context whose guc_id was stolen.
2739 with_intel_runtime_pm(runtime_pm, wakeref)
2740 ret = deregister_context(ce, ce->guc_id.id);
2741 if (unlikely(ret == -ENODEV))
2742 ret = 0; /* Will get registered later */
2744 with_intel_runtime_pm(runtime_pm, wakeref)
2745 ret = register_context(ce, loop);
2746 if (unlikely(ret == -EBUSY)) {
2747 clr_ctx_id_mapping(guc, ctx_id);
2748 } else if (unlikely(ret == -ENODEV)) {
2749 clr_ctx_id_mapping(guc, ctx_id);
2750 ret = 0; /* Will get registered later */
2757 static int __guc_context_pre_pin(struct intel_context *ce,
2758 struct intel_engine_cs *engine,
2759 struct i915_gem_ww_ctx *ww,
2762 return lrc_pre_pin(ce, engine, ww, vaddr);
2765 static int __guc_context_pin(struct intel_context *ce,
2766 struct intel_engine_cs *engine,
2769 if (i915_ggtt_offset(ce->state) !=
2770 (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
2771 set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
2774 * GuC context gets pinned in guc_request_alloc. See that function for
2775 * explaination of why.
2778 return lrc_pin(ce, engine, vaddr);
2781 static int guc_context_pre_pin(struct intel_context *ce,
2782 struct i915_gem_ww_ctx *ww,
2785 return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
2788 static int guc_context_pin(struct intel_context *ce, void *vaddr)
2790 int ret = __guc_context_pin(ce, ce->engine, vaddr);
2792 if (likely(!ret && !intel_context_is_barrier(ce)))
2793 intel_engine_pm_get(ce->engine);
2798 static void guc_context_unpin(struct intel_context *ce)
2800 struct intel_guc *guc = ce_to_guc(ce);
2802 __guc_context_update_stats(ce);
2803 unpin_guc_id(guc, ce);
2806 if (likely(!intel_context_is_barrier(ce)))
2807 intel_engine_pm_put_async(ce->engine);
2810 static void guc_context_post_unpin(struct intel_context *ce)
2815 static void __guc_context_sched_enable(struct intel_guc *guc,
2816 struct intel_context *ce)
2819 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2824 trace_intel_context_sched_enable(ce);
2826 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2827 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2830 static void __guc_context_sched_disable(struct intel_guc *guc,
2831 struct intel_context *ce,
2835 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2836 guc_id, /* ce->guc_id.id not stable */
2840 GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID);
2842 GEM_BUG_ON(intel_context_is_child(ce));
2843 trace_intel_context_sched_disable(ce);
2845 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2846 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2849 static void guc_blocked_fence_complete(struct intel_context *ce)
2851 lockdep_assert_held(&ce->guc_state.lock);
2853 if (!i915_sw_fence_done(&ce->guc_state.blocked))
2854 i915_sw_fence_complete(&ce->guc_state.blocked);
2857 static void guc_blocked_fence_reinit(struct intel_context *ce)
2859 lockdep_assert_held(&ce->guc_state.lock);
2860 GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
2863 * This fence is always complete unless a pending schedule disable is
2864 * outstanding. We arm the fence here and complete it when we receive
2865 * the pending schedule disable complete message.
2867 i915_sw_fence_fini(&ce->guc_state.blocked);
2868 i915_sw_fence_reinit(&ce->guc_state.blocked);
2869 i915_sw_fence_await(&ce->guc_state.blocked);
2870 i915_sw_fence_commit(&ce->guc_state.blocked);
2873 static u16 prep_context_pending_disable(struct intel_context *ce)
2875 lockdep_assert_held(&ce->guc_state.lock);
2877 set_context_pending_disable(ce);
2878 clr_context_enabled(ce);
2879 guc_blocked_fence_reinit(ce);
2880 intel_context_get(ce);
2882 return ce->guc_id.id;
2885 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
2887 struct intel_guc *guc = ce_to_guc(ce);
2888 unsigned long flags;
2889 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2890 intel_wakeref_t wakeref;
2894 GEM_BUG_ON(intel_context_is_child(ce));
2896 spin_lock_irqsave(&ce->guc_state.lock, flags);
2898 incr_context_blocked(ce);
2900 enabled = context_enabled(ce);
2901 if (unlikely(!enabled || submission_disabled(guc))) {
2903 clr_context_enabled(ce);
2904 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2905 return &ce->guc_state.blocked;
2909 * We add +2 here as the schedule disable complete CTB handler calls
2910 * intel_context_sched_disable_unpin (-2 to pin_count).
2912 atomic_add(2, &ce->pin_count);
2914 guc_id = prep_context_pending_disable(ce);
2916 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2918 with_intel_runtime_pm(runtime_pm, wakeref)
2919 __guc_context_sched_disable(guc, ce, guc_id);
2921 return &ce->guc_state.blocked;
2924 #define SCHED_STATE_MULTI_BLOCKED_MASK \
2925 (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
2926 #define SCHED_STATE_NO_UNBLOCK \
2927 (SCHED_STATE_MULTI_BLOCKED_MASK | \
2928 SCHED_STATE_PENDING_DISABLE | \
2931 static bool context_cant_unblock(struct intel_context *ce)
2933 lockdep_assert_held(&ce->guc_state.lock);
2935 return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
2936 context_guc_id_invalid(ce) ||
2937 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
2938 !intel_context_is_pinned(ce);
2941 static void guc_context_unblock(struct intel_context *ce)
2943 struct intel_guc *guc = ce_to_guc(ce);
2944 unsigned long flags;
2945 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2946 intel_wakeref_t wakeref;
2949 GEM_BUG_ON(context_enabled(ce));
2950 GEM_BUG_ON(intel_context_is_child(ce));
2952 spin_lock_irqsave(&ce->guc_state.lock, flags);
2954 if (unlikely(submission_disabled(guc) ||
2955 context_cant_unblock(ce))) {
2959 set_context_pending_enable(ce);
2960 set_context_enabled(ce);
2961 intel_context_get(ce);
2964 decr_context_blocked(ce);
2966 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2969 with_intel_runtime_pm(runtime_pm, wakeref)
2970 __guc_context_sched_enable(guc, ce);
2974 static void guc_context_cancel_request(struct intel_context *ce,
2975 struct i915_request *rq)
2977 struct intel_context *block_context =
2978 request_to_scheduling_context(rq);
2980 if (i915_sw_fence_signaled(&rq->submit)) {
2981 struct i915_sw_fence *fence;
2983 intel_context_get(ce);
2984 fence = guc_context_block(block_context);
2985 i915_sw_fence_wait(fence);
2986 if (!i915_request_completed(rq)) {
2987 __i915_request_skip(rq);
2988 guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
2992 guc_context_unblock(block_context);
2993 intel_context_put(ce);
2997 static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
2999 u32 preemption_timeout)
3001 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
3002 struct context_policy policy;
3004 __guc_context_policy_start_klv(&policy, guc_id);
3005 __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
3006 __guc_context_set_context_policies(guc, &policy, true);
3009 INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
3014 intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
3019 guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
3020 unsigned int preempt_timeout_ms)
3022 struct intel_guc *guc = ce_to_guc(ce);
3023 struct intel_runtime_pm *runtime_pm =
3024 &ce->engine->gt->i915->runtime_pm;
3025 intel_wakeref_t wakeref;
3026 unsigned long flags;
3028 GEM_BUG_ON(intel_context_is_child(ce));
3030 guc_flush_submissions(guc);
3032 spin_lock_irqsave(&ce->guc_state.lock, flags);
3033 set_context_banned(ce);
3035 if (submission_disabled(guc) ||
3036 (!context_enabled(ce) && !context_pending_disable(ce))) {
3037 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3039 guc_cancel_context_requests(ce);
3040 intel_engine_signal_breadcrumbs(ce->engine);
3041 } else if (!context_pending_disable(ce)) {
3045 * We add +2 here as the schedule disable complete CTB handler
3046 * calls intel_context_sched_disable_unpin (-2 to pin_count).
3048 atomic_add(2, &ce->pin_count);
3050 guc_id = prep_context_pending_disable(ce);
3051 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3054 * In addition to disabling scheduling, set the preemption
3055 * timeout to the minimum value (1 us) so the banned context
3056 * gets kicked off the HW ASAP.
3058 with_intel_runtime_pm(runtime_pm, wakeref) {
3059 __guc_context_set_preemption_timeout(guc, guc_id,
3060 preempt_timeout_ms);
3061 __guc_context_sched_disable(guc, ce, guc_id);
3064 if (!context_guc_id_invalid(ce))
3065 with_intel_runtime_pm(runtime_pm, wakeref)
3066 __guc_context_set_preemption_timeout(guc,
3068 preempt_timeout_ms);
3069 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3073 static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
3074 unsigned long flags)
3075 __releases(ce->guc_state.lock)
3077 struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
3078 intel_wakeref_t wakeref;
3081 lockdep_assert_held(&ce->guc_state.lock);
3082 guc_id = prep_context_pending_disable(ce);
3084 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3086 with_intel_runtime_pm(runtime_pm, wakeref)
3087 __guc_context_sched_disable(guc, ce, guc_id);
3090 static bool bypass_sched_disable(struct intel_guc *guc,
3091 struct intel_context *ce)
3093 lockdep_assert_held(&ce->guc_state.lock);
3094 GEM_BUG_ON(intel_context_is_child(ce));
3096 if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
3097 !ctx_id_mapped(guc, ce->guc_id.id)) {
3098 clr_context_enabled(ce);
3102 return !context_enabled(ce);
3105 static void __delay_sched_disable(struct work_struct *wrk)
3107 struct intel_context *ce =
3108 container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work);
3109 struct intel_guc *guc = ce_to_guc(ce);
3110 unsigned long flags;
3112 spin_lock_irqsave(&ce->guc_state.lock, flags);
3114 if (bypass_sched_disable(guc, ce)) {
3115 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3116 intel_context_sched_disable_unpin(ce);
3118 do_sched_disable(guc, ce, flags);
3122 static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
3125 * parent contexts are perma-pinned, if we are unpinning do schedule
3126 * disable immediately.
3128 if (intel_context_is_parent(ce))
3132 * If we are beyond the threshold for avail guc_ids, do schedule disable immediately.
3134 return guc->submission_state.guc_ids_in_use >
3135 guc->submission_state.sched_disable_gucid_threshold;
3138 static void guc_context_sched_disable(struct intel_context *ce)
3140 struct intel_guc *guc = ce_to_guc(ce);
3141 u64 delay = guc->submission_state.sched_disable_delay_ms;
3142 unsigned long flags;
3144 spin_lock_irqsave(&ce->guc_state.lock, flags);
3146 if (bypass_sched_disable(guc, ce)) {
3147 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3148 intel_context_sched_disable_unpin(ce);
3149 } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
3151 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3152 mod_delayed_work(system_unbound_wq,
3153 &ce->guc_state.sched_disable_delay_work,
3154 msecs_to_jiffies(delay));
3156 do_sched_disable(guc, ce, flags);
3160 static void guc_context_close(struct intel_context *ce)
3162 unsigned long flags;
3164 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
3165 cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))
3166 __delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work);
3168 spin_lock_irqsave(&ce->guc_state.lock, flags);
3169 set_context_close_done(ce);
3170 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3173 static inline void guc_lrc_desc_unpin(struct intel_context *ce)
3175 struct intel_guc *guc = ce_to_guc(ce);
3176 struct intel_gt *gt = guc_to_gt(guc);
3177 unsigned long flags;
3180 GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
3181 GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
3182 GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
3183 GEM_BUG_ON(context_enabled(ce));
3185 /* Seal race with Reset */
3186 spin_lock_irqsave(&ce->guc_state.lock, flags);
3187 disabled = submission_disabled(guc);
3188 if (likely(!disabled)) {
3189 __intel_gt_pm_get(gt);
3190 set_context_destroyed(ce);
3191 clr_context_registered(ce);
3193 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3194 if (unlikely(disabled)) {
3195 release_guc_id(guc, ce);
3196 __guc_context_destroy(ce);
3200 deregister_context(ce, ce->guc_id.id);
3203 static void __guc_context_destroy(struct intel_context *ce)
3205 GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
3206 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
3207 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
3208 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
3211 intel_context_fini(ce);
3213 if (intel_engine_is_virtual(ce->engine)) {
3214 struct guc_virtual_engine *ve =
3215 container_of(ce, typeof(*ve), context);
3217 if (ve->base.breadcrumbs)
3218 intel_breadcrumbs_put(ve->base.breadcrumbs);
3222 intel_context_free(ce);
3226 static void guc_flush_destroyed_contexts(struct intel_guc *guc)
3228 struct intel_context *ce;
3229 unsigned long flags;
3231 GEM_BUG_ON(!submission_disabled(guc) &&
3232 guc_submission_initialized(guc));
3234 while (!list_empty(&guc->submission_state.destroyed_contexts)) {
3235 spin_lock_irqsave(&guc->submission_state.lock, flags);
3236 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
3237 struct intel_context,
3240 list_del_init(&ce->destroyed_link);
3241 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3246 release_guc_id(guc, ce);
3247 __guc_context_destroy(ce);
3251 static void deregister_destroyed_contexts(struct intel_guc *guc)
3253 struct intel_context *ce;
3254 unsigned long flags;
3256 while (!list_empty(&guc->submission_state.destroyed_contexts)) {
3257 spin_lock_irqsave(&guc->submission_state.lock, flags);
3258 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
3259 struct intel_context,
3262 list_del_init(&ce->destroyed_link);
3263 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3268 guc_lrc_desc_unpin(ce);
3272 static void destroyed_worker_func(struct work_struct *w)
3274 struct intel_guc *guc = container_of(w, struct intel_guc,
3275 submission_state.destroyed_worker);
3276 struct intel_gt *gt = guc_to_gt(guc);
3279 with_intel_gt_pm(gt, tmp)
3280 deregister_destroyed_contexts(guc);
3283 static void guc_context_destroy(struct kref *kref)
3285 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3286 struct intel_guc *guc = ce_to_guc(ce);
3287 unsigned long flags;
3291 * If the guc_id is invalid this context has been stolen and we can free
3292 * it immediately. Also can be freed immediately if the context is not
3293 * registered with the GuC or the GuC is in the middle of a reset.
3295 spin_lock_irqsave(&guc->submission_state.lock, flags);
3296 destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
3297 !ctx_id_mapped(guc, ce->guc_id.id);
3298 if (likely(!destroy)) {
3299 if (!list_empty(&ce->guc_id.link))
3300 list_del_init(&ce->guc_id.link);
3301 list_add_tail(&ce->destroyed_link,
3302 &guc->submission_state.destroyed_contexts);
3304 __release_guc_id(guc, ce);
3306 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3307 if (unlikely(destroy)) {
3308 __guc_context_destroy(ce);
3313 * We use a worker to issue the H2G to deregister the context as we can
3314 * take the GT PM for the first time which isn't allowed from an atomic
3317 queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
3320 static int guc_context_alloc(struct intel_context *ce)
3322 return lrc_alloc(ce, ce->engine);
3325 static void __guc_context_set_prio(struct intel_guc *guc,
3326 struct intel_context *ce)
3328 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
3329 struct context_policy policy;
3331 __guc_context_policy_start_klv(&policy, ce->guc_id.id);
3332 __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
3333 __guc_context_set_context_policies(guc, &policy, true);
3336 INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
3341 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
3345 static void guc_context_set_prio(struct intel_guc *guc,
3346 struct intel_context *ce,
3349 GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
3350 prio > GUC_CLIENT_PRIORITY_NORMAL);
3351 lockdep_assert_held(&ce->guc_state.lock);
3353 if (ce->guc_state.prio == prio || submission_disabled(guc) ||
3354 !context_registered(ce)) {
3355 ce->guc_state.prio = prio;
3359 ce->guc_state.prio = prio;
3360 __guc_context_set_prio(guc, ce);
3362 trace_intel_context_set_prio(ce);
3365 static inline u8 map_i915_prio_to_guc_prio(int prio)
3367 if (prio == I915_PRIORITY_NORMAL)
3368 return GUC_CLIENT_PRIORITY_KMD_NORMAL;
3369 else if (prio < I915_PRIORITY_NORMAL)
3370 return GUC_CLIENT_PRIORITY_NORMAL;
3371 else if (prio < I915_PRIORITY_DISPLAY)
3372 return GUC_CLIENT_PRIORITY_HIGH;
3374 return GUC_CLIENT_PRIORITY_KMD_HIGH;
3377 static inline void add_context_inflight_prio(struct intel_context *ce,
3380 lockdep_assert_held(&ce->guc_state.lock);
3381 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
3383 ++ce->guc_state.prio_count[guc_prio];
3385 /* Overflow protection */
3386 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
3389 static inline void sub_context_inflight_prio(struct intel_context *ce,
3392 lockdep_assert_held(&ce->guc_state.lock);
3393 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
3395 /* Underflow protection */
3396 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
3398 --ce->guc_state.prio_count[guc_prio];
3401 static inline void update_context_prio(struct intel_context *ce)
3403 struct intel_guc *guc = &ce->engine->gt->uc.guc;
3406 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
3407 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
3409 lockdep_assert_held(&ce->guc_state.lock);
3411 for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
3412 if (ce->guc_state.prio_count[i]) {
3413 guc_context_set_prio(guc, ce, i);
3419 static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
3421 /* Lower value is higher priority */
3422 return new_guc_prio < old_guc_prio;
3425 static void add_to_context(struct i915_request *rq)
3427 struct intel_context *ce = request_to_scheduling_context(rq);
3428 u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
3430 GEM_BUG_ON(intel_context_is_child(ce));
3431 GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
3433 spin_lock(&ce->guc_state.lock);
3434 list_move_tail(&rq->sched.link, &ce->guc_state.requests);
3436 if (rq->guc_prio == GUC_PRIO_INIT) {
3437 rq->guc_prio = new_guc_prio;
3438 add_context_inflight_prio(ce, rq->guc_prio);
3439 } else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
3440 sub_context_inflight_prio(ce, rq->guc_prio);
3441 rq->guc_prio = new_guc_prio;
3442 add_context_inflight_prio(ce, rq->guc_prio);
3444 update_context_prio(ce);
3446 spin_unlock(&ce->guc_state.lock);
3449 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
3451 lockdep_assert_held(&ce->guc_state.lock);
3453 if (rq->guc_prio != GUC_PRIO_INIT &&
3454 rq->guc_prio != GUC_PRIO_FINI) {
3455 sub_context_inflight_prio(ce, rq->guc_prio);
3456 update_context_prio(ce);
3458 rq->guc_prio = GUC_PRIO_FINI;
3461 static void remove_from_context(struct i915_request *rq)
3463 struct intel_context *ce = request_to_scheduling_context(rq);
3465 GEM_BUG_ON(intel_context_is_child(ce));
3467 spin_lock_irq(&ce->guc_state.lock);
3469 list_del_init(&rq->sched.link);
3470 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
3472 /* Prevent further __await_execution() registering a cb, then flush */
3473 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
3475 guc_prio_fini(rq, ce);
3477 spin_unlock_irq(&ce->guc_state.lock);
3479 atomic_dec(&ce->guc_id.ref);
3480 i915_request_notify_execute_cb_imm(rq);
3483 static const struct intel_context_ops guc_context_ops = {
3484 .flags = COPS_RUNTIME_CYCLES,
3485 .alloc = guc_context_alloc,
3487 .close = guc_context_close,
3489 .pre_pin = guc_context_pre_pin,
3490 .pin = guc_context_pin,
3491 .unpin = guc_context_unpin,
3492 .post_unpin = guc_context_post_unpin,
3494 .revoke = guc_context_revoke,
3496 .cancel_request = guc_context_cancel_request,
3498 .enter = intel_context_enter_engine,
3499 .exit = intel_context_exit_engine,
3501 .sched_disable = guc_context_sched_disable,
3503 .update_stats = guc_context_update_stats,
3506 .destroy = guc_context_destroy,
3508 .create_virtual = guc_create_virtual,
3509 .create_parallel = guc_create_parallel,
3512 static void submit_work_cb(struct irq_work *wrk)
3514 struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
3516 might_lock(&rq->engine->sched_engine->lock);
3517 i915_sw_fence_complete(&rq->submit);
3520 static void __guc_signal_context_fence(struct intel_context *ce)
3522 struct i915_request *rq, *rn;
3524 lockdep_assert_held(&ce->guc_state.lock);
3526 if (!list_empty(&ce->guc_state.fences))
3527 trace_intel_context_fence_release(ce);
3530 * Use an IRQ to ensure locking order of sched_engine->lock ->
3531 * ce->guc_state.lock is preserved.
3533 list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
3535 list_del(&rq->guc_fence_link);
3536 irq_work_queue(&rq->submit_work);
3539 INIT_LIST_HEAD(&ce->guc_state.fences);
3542 static void guc_signal_context_fence(struct intel_context *ce)
3544 unsigned long flags;
3546 GEM_BUG_ON(intel_context_is_child(ce));
3548 spin_lock_irqsave(&ce->guc_state.lock, flags);
3549 clr_context_wait_for_deregister_to_register(ce);
3550 __guc_signal_context_fence(ce);
3551 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3554 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
3556 return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
3557 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
3558 !submission_disabled(ce_to_guc(ce));
3561 static void guc_context_init(struct intel_context *ce)
3563 const struct i915_gem_context *ctx;
3564 int prio = I915_CONTEXT_DEFAULT_PRIORITY;
3567 ctx = rcu_dereference(ce->gem_context);
3569 prio = ctx->sched.priority;
3572 ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
3574 INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay_work,
3575 __delay_sched_disable);
3577 set_bit(CONTEXT_GUC_INIT, &ce->flags);
3580 static int guc_request_alloc(struct i915_request *rq)
3582 struct intel_context *ce = request_to_scheduling_context(rq);
3583 struct intel_guc *guc = ce_to_guc(ce);
3584 unsigned long flags;
3587 GEM_BUG_ON(!intel_context_is_pinned(rq->context));
3590 * Flush enough space to reduce the likelihood of waiting after
3591 * we start building the request - in which case we will just
3592 * have to repeat work.
3594 rq->reserved_space += GUC_REQUEST_SIZE;
3597 * Note that after this point, we have committed to using
3598 * this request as it is being used to both track the
3599 * state of engine initialisation and liveness of the
3600 * golden renderstate above. Think twice before you try
3601 * to cancel/unwind this request now.
3604 /* Unconditionally invalidate GPU caches and TLBs. */
3605 ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
3609 rq->reserved_space -= GUC_REQUEST_SIZE;
3611 if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
3612 guc_context_init(ce);
3615 * If the context gets closed while the execbuf is ongoing, the context
3616 * close code will race with the below code to cancel the delayed work.
3617 * If the context close wins the race and cancels the work, it will
3618 * immediately call the sched disable (see guc_context_close), so there
3619 * is a chance we can get past this check while the sched_disable code
3620 * is being executed. To make sure that code completes before we check
3621 * the status further down, we wait for the close process to complete.
3622 * Else, this code path could send a request down thinking that the
3623 * context is still in a schedule-enable mode while the GuC ends up
3624 * dropping the request completely because the disable did go from the
3625 * context_close path right to GuC just prior. In the event the CT is
3626 * full, we could potentially need to wait up to 1.5 seconds.
3628 if (cancel_delayed_work_sync(&ce->guc_state.sched_disable_delay_work))
3629 intel_context_sched_disable_unpin(ce);
3630 else if (intel_context_is_closed(ce))
3631 if (wait_for(context_close_done(ce), 1500))
3632 guc_warn(guc, "timed out waiting on context sched close before realloc\n");
3634 * Call pin_guc_id here rather than in the pinning step as with
3635 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
3636 * guc_id and creating horrible race conditions. This is especially bad
3637 * when guc_id are being stolen due to over subscription. By the time
3638 * this function is reached, it is guaranteed that the guc_id will be
3639 * persistent until the generated request is retired. Thus, sealing these
3640 * race conditions. It is still safe to fail here if guc_id are
3641 * exhausted and return -EAGAIN to the user indicating that they can try
3642 * again in the future.
3644 * There is no need for a lock here as the timeline mutex ensures at
3645 * most one context can be executing this code path at once. The
3646 * guc_id_ref is incremented once for every request in flight and
3647 * decremented on each retire. When it is zero, a lock around the
3648 * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
3650 if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
3653 ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
3654 if (unlikely(ret < 0))
3656 if (context_needs_register(ce, !!ret)) {
3657 ret = try_context_registration(ce, true);
3658 if (unlikely(ret)) { /* unwind */
3659 if (ret == -EPIPE) {
3660 disable_submission(guc);
3661 goto out; /* GPU will be reset */
3663 atomic_dec(&ce->guc_id.ref);
3664 unpin_guc_id(guc, ce);
3669 clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
3673 * We block all requests on this context if a G2H is pending for a
3674 * schedule disable or context deregistration as the GuC will fail a
3675 * schedule enable or context registration if either G2H is pending
3676 * respectfully. Once a G2H returns, the fence is released that is
3677 * blocking these requests (see guc_signal_context_fence).
3679 spin_lock_irqsave(&ce->guc_state.lock, flags);
3680 if (context_wait_for_deregister_to_register(ce) ||
3681 context_pending_disable(ce)) {
3682 init_irq_work(&rq->submit_work, submit_work_cb);
3683 i915_sw_fence_await(&rq->submit);
3685 list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
3687 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3692 static int guc_virtual_context_pre_pin(struct intel_context *ce,
3693 struct i915_gem_ww_ctx *ww,
3696 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3698 return __guc_context_pre_pin(ce, engine, ww, vaddr);
3701 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
3703 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3704 int ret = __guc_context_pin(ce, engine, vaddr);
3705 intel_engine_mask_t tmp, mask = ce->engine->mask;
3708 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3709 intel_engine_pm_get(engine);
3714 static void guc_virtual_context_unpin(struct intel_context *ce)
3716 intel_engine_mask_t tmp, mask = ce->engine->mask;
3717 struct intel_engine_cs *engine;
3718 struct intel_guc *guc = ce_to_guc(ce);
3720 GEM_BUG_ON(context_enabled(ce));
3721 GEM_BUG_ON(intel_context_is_barrier(ce));
3723 unpin_guc_id(guc, ce);
3726 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3727 intel_engine_pm_put_async(engine);
3730 static void guc_virtual_context_enter(struct intel_context *ce)
3732 intel_engine_mask_t tmp, mask = ce->engine->mask;
3733 struct intel_engine_cs *engine;
3735 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3736 intel_engine_pm_get(engine);
3738 intel_timeline_enter(ce->timeline);
3741 static void guc_virtual_context_exit(struct intel_context *ce)
3743 intel_engine_mask_t tmp, mask = ce->engine->mask;
3744 struct intel_engine_cs *engine;
3746 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3747 intel_engine_pm_put(engine);
3749 intel_timeline_exit(ce->timeline);
3752 static int guc_virtual_context_alloc(struct intel_context *ce)
3754 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3756 return lrc_alloc(ce, engine);
3759 static const struct intel_context_ops virtual_guc_context_ops = {
3760 .flags = COPS_RUNTIME_CYCLES,
3761 .alloc = guc_virtual_context_alloc,
3763 .close = guc_context_close,
3765 .pre_pin = guc_virtual_context_pre_pin,
3766 .pin = guc_virtual_context_pin,
3767 .unpin = guc_virtual_context_unpin,
3768 .post_unpin = guc_context_post_unpin,
3770 .revoke = guc_context_revoke,
3772 .cancel_request = guc_context_cancel_request,
3774 .enter = guc_virtual_context_enter,
3775 .exit = guc_virtual_context_exit,
3777 .sched_disable = guc_context_sched_disable,
3778 .update_stats = guc_context_update_stats,
3780 .destroy = guc_context_destroy,
3782 .get_sibling = guc_virtual_get_sibling,
3785 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
3787 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3788 struct intel_guc *guc = ce_to_guc(ce);
3791 GEM_BUG_ON(!intel_context_is_parent(ce));
3792 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3794 ret = pin_guc_id(guc, ce);
3795 if (unlikely(ret < 0))
3798 return __guc_context_pin(ce, engine, vaddr);
3801 static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
3803 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3805 GEM_BUG_ON(!intel_context_is_child(ce));
3806 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3808 __intel_context_pin(ce->parallel.parent);
3809 return __guc_context_pin(ce, engine, vaddr);
3812 static void guc_parent_context_unpin(struct intel_context *ce)
3814 struct intel_guc *guc = ce_to_guc(ce);
3816 GEM_BUG_ON(context_enabled(ce));
3817 GEM_BUG_ON(intel_context_is_barrier(ce));
3818 GEM_BUG_ON(!intel_context_is_parent(ce));
3819 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3821 unpin_guc_id(guc, ce);
3825 static void guc_child_context_unpin(struct intel_context *ce)
3827 GEM_BUG_ON(context_enabled(ce));
3828 GEM_BUG_ON(intel_context_is_barrier(ce));
3829 GEM_BUG_ON(!intel_context_is_child(ce));
3830 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3835 static void guc_child_context_post_unpin(struct intel_context *ce)
3837 GEM_BUG_ON(!intel_context_is_child(ce));
3838 GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
3839 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3842 intel_context_unpin(ce->parallel.parent);
3845 static void guc_child_context_destroy(struct kref *kref)
3847 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3849 __guc_context_destroy(ce);
3852 static const struct intel_context_ops virtual_parent_context_ops = {
3853 .alloc = guc_virtual_context_alloc,
3855 .close = guc_context_close,
3857 .pre_pin = guc_context_pre_pin,
3858 .pin = guc_parent_context_pin,
3859 .unpin = guc_parent_context_unpin,
3860 .post_unpin = guc_context_post_unpin,
3862 .revoke = guc_context_revoke,
3864 .cancel_request = guc_context_cancel_request,
3866 .enter = guc_virtual_context_enter,
3867 .exit = guc_virtual_context_exit,
3869 .sched_disable = guc_context_sched_disable,
3871 .destroy = guc_context_destroy,
3873 .get_sibling = guc_virtual_get_sibling,
3876 static const struct intel_context_ops virtual_child_context_ops = {
3877 .alloc = guc_virtual_context_alloc,
3879 .pre_pin = guc_context_pre_pin,
3880 .pin = guc_child_context_pin,
3881 .unpin = guc_child_context_unpin,
3882 .post_unpin = guc_child_context_post_unpin,
3884 .cancel_request = guc_context_cancel_request,
3886 .enter = guc_virtual_context_enter,
3887 .exit = guc_virtual_context_exit,
3889 .destroy = guc_child_context_destroy,
3891 .get_sibling = guc_virtual_get_sibling,
3895 * The below override of the breadcrumbs is enabled when the user configures a
3896 * context for parallel submission (multi-lrc, parent-child).
3898 * The overridden breadcrumbs implements an algorithm which allows the GuC to
3899 * safely preempt all the hw contexts configured for parallel submission
3900 * between each BB. The contract between the i915 and GuC is if the parent
3901 * context can be preempted, all the children can be preempted, and the GuC will
3902 * always try to preempt the parent before the children. A handshake between the
3903 * parent / children breadcrumbs ensures the i915 holds up its end of the deal
3904 * creating a window to preempt between each set of BBs.
3906 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
3907 u64 offset, u32 len,
3908 const unsigned int flags);
3909 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
3910 u64 offset, u32 len,
3911 const unsigned int flags);
3913 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
3916 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
3919 static struct intel_context *
3920 guc_create_parallel(struct intel_engine_cs **engines,
3921 unsigned int num_siblings,
3924 struct intel_engine_cs **siblings = NULL;
3925 struct intel_context *parent = NULL, *ce, *err;
3928 siblings = kmalloc_array(num_siblings,
3932 return ERR_PTR(-ENOMEM);
3934 for (i = 0; i < width; ++i) {
3935 for (j = 0; j < num_siblings; ++j)
3936 siblings[j] = engines[i * num_siblings + j];
3938 ce = intel_engine_create_virtual(siblings, num_siblings,
3947 parent->ops = &virtual_parent_context_ops;
3949 ce->ops = &virtual_child_context_ops;
3950 intel_context_bind_parent_child(parent, ce);
3954 parent->parallel.fence_context = dma_fence_context_alloc(1);
3956 parent->engine->emit_bb_start =
3957 emit_bb_start_parent_no_preempt_mid_batch;
3958 parent->engine->emit_fini_breadcrumb =
3959 emit_fini_breadcrumb_parent_no_preempt_mid_batch;
3960 parent->engine->emit_fini_breadcrumb_dw =
3961 12 + 4 * parent->parallel.number_children;
3962 for_each_child(parent, ce) {
3963 ce->engine->emit_bb_start =
3964 emit_bb_start_child_no_preempt_mid_batch;
3965 ce->engine->emit_fini_breadcrumb =
3966 emit_fini_breadcrumb_child_no_preempt_mid_batch;
3967 ce->engine->emit_fini_breadcrumb_dw = 16;
3975 intel_context_put(parent);
3981 guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
3983 struct intel_engine_cs *sibling;
3984 intel_engine_mask_t tmp, mask = b->engine_mask;
3985 bool result = false;
3987 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3988 result |= intel_engine_irq_enable(sibling);
3994 guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
3996 struct intel_engine_cs *sibling;
3997 intel_engine_mask_t tmp, mask = b->engine_mask;
3999 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
4000 intel_engine_irq_disable(sibling);
4003 static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
4008 * In GuC submission mode we do not know which physical engine a request
4009 * will be scheduled on, this creates a problem because the breadcrumb
4010 * interrupt is per physical engine. To work around this we attach
4011 * requests and direct all breadcrumb interrupts to the first instance
4012 * of an engine per class. In addition all breadcrumb interrupts are
4013 * enabled / disabled across an engine class in unison.
4015 for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
4016 struct intel_engine_cs *sibling =
4017 engine->gt->engine_class[engine->class][i];
4020 if (engine->breadcrumbs != sibling->breadcrumbs) {
4021 intel_breadcrumbs_put(engine->breadcrumbs);
4022 engine->breadcrumbs =
4023 intel_breadcrumbs_get(sibling->breadcrumbs);
4029 if (engine->breadcrumbs) {
4030 engine->breadcrumbs->engine_mask |= engine->mask;
4031 engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
4032 engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
4036 static void guc_bump_inflight_request_prio(struct i915_request *rq,
4039 struct intel_context *ce = request_to_scheduling_context(rq);
4040 u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
4042 /* Short circuit function */
4043 if (prio < I915_PRIORITY_NORMAL ||
4044 rq->guc_prio == GUC_PRIO_FINI ||
4045 (rq->guc_prio != GUC_PRIO_INIT &&
4046 !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
4049 spin_lock(&ce->guc_state.lock);
4050 if (rq->guc_prio != GUC_PRIO_FINI) {
4051 if (rq->guc_prio != GUC_PRIO_INIT)
4052 sub_context_inflight_prio(ce, rq->guc_prio);
4053 rq->guc_prio = new_guc_prio;
4054 add_context_inflight_prio(ce, rq->guc_prio);
4055 update_context_prio(ce);
4057 spin_unlock(&ce->guc_state.lock);
4060 static void guc_retire_inflight_request_prio(struct i915_request *rq)
4062 struct intel_context *ce = request_to_scheduling_context(rq);
4064 spin_lock(&ce->guc_state.lock);
4065 guc_prio_fini(rq, ce);
4066 spin_unlock(&ce->guc_state.lock);
4069 static void sanitize_hwsp(struct intel_engine_cs *engine)
4071 struct intel_timeline *tl;
4073 list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
4074 intel_timeline_reset_seqno(tl);
4077 static void guc_sanitize(struct intel_engine_cs *engine)
4080 * Poison residual state on resume, in case the suspend didn't!
4082 * We have to assume that across suspend/resume (or other loss
4083 * of control) that the contents of our pinned buffers has been
4084 * lost, replaced by garbage. Since this doesn't always happen,
4085 * let's poison such state so that we more quickly spot when
4086 * we falsely assume it has been preserved.
4088 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
4089 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
4092 * The kernel_context HWSP is stored in the status_page. As above,
4093 * that may be lost on resume/initialisation, and so we need to
4094 * reset the value in the HWSP.
4096 sanitize_hwsp(engine);
4098 /* And scrub the dirty cachelines for the HWSP */
4099 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
4101 intel_engine_reset_pinned_contexts(engine);
4104 static void setup_hwsp(struct intel_engine_cs *engine)
4106 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
4108 ENGINE_WRITE_FW(engine,
4110 i915_ggtt_offset(engine->status_page.vma));
4113 static void start_engine(struct intel_engine_cs *engine)
4115 ENGINE_WRITE_FW(engine,
4117 _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
4119 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
4120 ENGINE_POSTING_READ(engine, RING_MI_MODE);
4123 static int guc_resume(struct intel_engine_cs *engine)
4125 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
4127 intel_mocs_init_engine(engine);
4129 intel_breadcrumbs_reset(engine->breadcrumbs);
4132 start_engine(engine);
4134 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
4135 xehp_enable_ccs_engines(engine);
4140 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
4142 return !sched_engine->tasklet.callback;
4145 static void guc_set_default_submission(struct intel_engine_cs *engine)
4147 engine->submit_request = guc_submit_request;
4150 static inline int guc_kernel_context_pin(struct intel_guc *guc,
4151 struct intel_context *ce)
4156 * Note: we purposefully do not check the returns below because
4157 * the registration can only fail if a reset is just starting.
4158 * This is called at the end of reset so presumably another reset
4159 * isn't happening and even it did this code would be run again.
4162 if (context_guc_id_invalid(ce)) {
4163 ret = pin_guc_id(guc, ce);
4169 if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
4170 guc_context_init(ce);
4172 ret = try_context_registration(ce, true);
4174 unpin_guc_id(guc, ce);
4179 static inline int guc_init_submission(struct intel_guc *guc)
4181 struct intel_gt *gt = guc_to_gt(guc);
4182 struct intel_engine_cs *engine;
4183 enum intel_engine_id id;
4185 /* make sure all descriptors are clean... */
4186 xa_destroy(&guc->context_lookup);
4189 * A reset might have occurred while we had a pending stalled request,
4190 * so make sure we clean that up.
4192 guc->stalled_request = NULL;
4193 guc->submission_stall_reason = STALL_NONE;
4196 * Some contexts might have been pinned before we enabled GuC
4197 * submission, so we need to add them to the GuC bookeeping.
4198 * Also, after a reset the of the GuC we want to make sure that the
4199 * information shared with GuC is properly reset. The kernel LRCs are
4200 * not attached to the gem_context, so they need to be added separately.
4202 for_each_engine(engine, gt, id) {
4203 struct intel_context *ce;
4205 list_for_each_entry(ce, &engine->pinned_contexts_list,
4206 pinned_contexts_link) {
4207 int ret = guc_kernel_context_pin(guc, ce);
4210 /* No point in trying to clean up as i915 will wedge on failure */
4219 static void guc_release(struct intel_engine_cs *engine)
4221 engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
4223 intel_engine_cleanup_common(engine);
4224 lrc_fini_wa_ctx(engine);
4227 static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
4229 struct intel_engine_cs *e;
4230 intel_engine_mask_t tmp, mask = engine->mask;
4232 for_each_engine_masked(e, engine->gt, mask, tmp)
4236 static void guc_default_vfuncs(struct intel_engine_cs *engine)
4238 /* Default vfuncs which can be overridden by each engine. */
4240 engine->resume = guc_resume;
4242 engine->cops = &guc_context_ops;
4243 engine->request_alloc = guc_request_alloc;
4244 engine->add_active_request = add_to_context;
4245 engine->remove_active_request = remove_from_context;
4247 engine->sched_engine->schedule = i915_schedule;
4249 engine->reset.prepare = guc_engine_reset_prepare;
4250 engine->reset.rewind = guc_rewind_nop;
4251 engine->reset.cancel = guc_reset_nop;
4252 engine->reset.finish = guc_reset_nop;
4254 engine->emit_flush = gen8_emit_flush_xcs;
4255 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
4256 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
4257 if (GRAPHICS_VER(engine->i915) >= 12) {
4258 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
4259 engine->emit_flush = gen12_emit_flush_xcs;
4261 engine->set_default_submission = guc_set_default_submission;
4262 engine->busyness = guc_engine_busyness;
4264 engine->flags |= I915_ENGINE_SUPPORTS_STATS;
4265 engine->flags |= I915_ENGINE_HAS_PREEMPTION;
4266 engine->flags |= I915_ENGINE_HAS_TIMESLICES;
4268 /* Wa_14014475959:dg2 */
4269 if (engine->class == COMPUTE_CLASS)
4270 if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
4271 IS_DG2(engine->i915))
4272 engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
4275 * TODO: GuC supports timeslicing and semaphores as well, but they're
4276 * handled by the firmware so some minor tweaks are required before
4279 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
4282 engine->emit_bb_start = gen8_emit_bb_start;
4283 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
4284 engine->emit_bb_start = xehp_emit_bb_start;
4287 static void rcs_submission_override(struct intel_engine_cs *engine)
4289 switch (GRAPHICS_VER(engine->i915)) {
4291 engine->emit_flush = gen12_emit_flush_rcs;
4292 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
4295 engine->emit_flush = gen11_emit_flush_rcs;
4296 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
4299 engine->emit_flush = gen8_emit_flush_rcs;
4300 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
4305 static inline void guc_default_irqs(struct intel_engine_cs *engine)
4307 engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
4308 intel_engine_set_irq_handler(engine, cs_irq_handler);
4311 static void guc_sched_engine_destroy(struct kref *kref)
4313 struct i915_sched_engine *sched_engine =
4314 container_of(kref, typeof(*sched_engine), ref);
4315 struct intel_guc *guc = sched_engine->private_data;
4317 guc->sched_engine = NULL;
4318 tasklet_kill(&sched_engine->tasklet); /* flush the callback */
4319 kfree(sched_engine);
4322 int intel_guc_submission_setup(struct intel_engine_cs *engine)
4324 struct drm_i915_private *i915 = engine->i915;
4325 struct intel_guc *guc = &engine->gt->uc.guc;
4328 * The setup relies on several assumptions (e.g. irqs always enabled)
4329 * that are only valid on gen11+
4331 GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
4333 if (!guc->sched_engine) {
4334 guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
4335 if (!guc->sched_engine)
4338 guc->sched_engine->schedule = i915_schedule;
4339 guc->sched_engine->disabled = guc_sched_engine_disabled;
4340 guc->sched_engine->private_data = guc;
4341 guc->sched_engine->destroy = guc_sched_engine_destroy;
4342 guc->sched_engine->bump_inflight_request_prio =
4343 guc_bump_inflight_request_prio;
4344 guc->sched_engine->retire_inflight_request_prio =
4345 guc_retire_inflight_request_prio;
4346 tasklet_setup(&guc->sched_engine->tasklet,
4347 guc_submission_tasklet);
4349 i915_sched_engine_put(engine->sched_engine);
4350 engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
4352 guc_default_vfuncs(engine);
4353 guc_default_irqs(engine);
4354 guc_init_breadcrumbs(engine);
4356 if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
4357 rcs_submission_override(engine);
4359 lrc_init_wa_ctx(engine);
4361 /* Finally, take ownership and responsibility for cleanup! */
4362 engine->sanitize = guc_sanitize;
4363 engine->release = guc_release;
4368 struct scheduling_policy {
4370 u32 max_words, num_words;
4373 struct guc_update_scheduling_policy h2g;
4376 static u32 __guc_scheduling_policy_action_size(struct scheduling_policy *policy)
4378 u32 *start = (void *)&policy->h2g;
4379 u32 *end = policy->h2g.data + policy->num_words;
4380 size_t delta = end - start;
4385 static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct scheduling_policy *policy)
4387 policy->h2g.header.action = INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
4388 policy->max_words = ARRAY_SIZE(policy->h2g.data);
4389 policy->num_words = 0;
4395 static void __guc_scheduling_policy_add_klv(struct scheduling_policy *policy,
4396 u32 action, u32 *data, u32 len)
4398 u32 *klv_ptr = policy->h2g.data + policy->num_words;
4400 GEM_BUG_ON((policy->num_words + 1 + len) > policy->max_words);
4401 *(klv_ptr++) = FIELD_PREP(GUC_KLV_0_KEY, action) |
4402 FIELD_PREP(GUC_KLV_0_LEN, len);
4403 memcpy(klv_ptr, data, sizeof(u32) * len);
4404 policy->num_words += 1 + len;
4408 static int __guc_action_set_scheduling_policies(struct intel_guc *guc,
4409 struct scheduling_policy *policy)
4413 ret = intel_guc_send(guc, (u32 *)&policy->h2g,
4414 __guc_scheduling_policy_action_size(policy));
4416 guc_probe_error(guc, "Failed to configure global scheduling policies: %pe!\n",
4421 if (ret != policy->count) {
4422 guc_warn(guc, "global scheduler policy processed %d of %d KLVs!",
4423 ret, policy->count);
4424 if (ret > policy->count)
4431 static int guc_init_global_schedule_policy(struct intel_guc *guc)
4433 struct scheduling_policy policy;
4434 struct intel_gt *gt = guc_to_gt(guc);
4435 intel_wakeref_t wakeref;
4438 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
4441 __guc_scheduling_policy_start_klv(&policy);
4443 with_intel_runtime_pm(>->i915->runtime_pm, wakeref) {
4445 GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION,
4446 GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO,
4449 __guc_scheduling_policy_add_klv(&policy,
4450 GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD,
4451 yield, ARRAY_SIZE(yield));
4453 ret = __guc_action_set_scheduling_policies(guc, &policy);
4459 static void guc_route_semaphores(struct intel_guc *guc, bool to_guc)
4461 struct intel_gt *gt = guc_to_gt(guc);
4464 if (GRAPHICS_VER(gt->i915) < 12)
4468 val = GUC_SEM_INTR_ROUTE_TO_GUC | GUC_SEM_INTR_ENABLE_ALL;
4472 intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val);
4475 int intel_guc_submission_enable(struct intel_guc *guc)
4479 /* Semaphore interrupt enable and route to GuC */
4480 guc_route_semaphores(guc, true);
4482 ret = guc_init_submission(guc);
4486 ret = guc_init_engine_stats(guc);
4490 ret = guc_init_global_schedule_policy(guc);
4497 guc_fini_engine_stats(guc);
4499 guc_route_semaphores(guc, false);
4503 /* Note: By the time we're here, GuC may have already been reset */
4504 void intel_guc_submission_disable(struct intel_guc *guc)
4506 guc_cancel_busyness_worker(guc);
4508 /* Semaphore interrupt disable and route to host */
4509 guc_route_semaphores(guc, false);
4512 static bool __guc_submission_supported(struct intel_guc *guc)
4514 /* GuC submission is unavailable for pre-Gen11 */
4515 return intel_guc_is_supported(guc) &&
4516 GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
4519 static bool __guc_submission_selected(struct intel_guc *guc)
4521 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
4523 if (!intel_guc_submission_is_supported(guc))
4526 return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
4529 int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc)
4531 return guc->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc);
4535 * This default value of 33 milisecs (+1 milisec round up) ensures 30fps or higher
4536 * workloads are able to enjoy the latency reduction when delaying the schedule-disable
4537 * operation. This matches the 30fps game-render + encode (real world) workload this
4538 * knob was tested against.
4540 #define SCHED_DISABLE_DELAY_MS 34
4543 * A threshold of 75% is a reasonable starting point considering that real world apps
4544 * generally don't get anywhere near this.
4546 #define NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(__guc) \
4547 (((intel_guc_sched_disable_gucid_threshold_max(guc)) * 3) / 4)
4549 void intel_guc_submission_init_early(struct intel_guc *guc)
4551 xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
4553 spin_lock_init(&guc->submission_state.lock);
4554 INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
4555 ida_init(&guc->submission_state.guc_ids);
4556 INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
4557 INIT_WORK(&guc->submission_state.destroyed_worker,
4558 destroyed_worker_func);
4559 INIT_WORK(&guc->submission_state.reset_fail_worker,
4560 reset_fail_worker_func);
4562 spin_lock_init(&guc->timestamp.lock);
4563 INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
4565 guc->submission_state.sched_disable_delay_ms = SCHED_DISABLE_DELAY_MS;
4566 guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
4567 guc->submission_state.sched_disable_gucid_threshold =
4568 NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(guc);
4569 guc->submission_supported = __guc_submission_supported(guc);
4570 guc->submission_selected = __guc_submission_selected(guc);
4573 static inline struct intel_context *
4574 g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
4576 struct intel_context *ce;
4578 if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
4579 guc_err(guc, "Invalid ctx_id %u\n", ctx_id);
4583 ce = __get_context(guc, ctx_id);
4584 if (unlikely(!ce)) {
4585 guc_err(guc, "Context is NULL, ctx_id %u\n", ctx_id);
4589 if (unlikely(intel_context_is_child(ce))) {
4590 guc_err(guc, "Context is child, ctx_id %u\n", ctx_id);
4597 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
4601 struct intel_context *ce;
4604 if (unlikely(len < 1)) {
4605 guc_err(guc, "Invalid length %u\n", len);
4610 ce = g2h_context_lookup(guc, ctx_id);
4614 trace_intel_context_deregister_done(ce);
4616 #ifdef CONFIG_DRM_I915_SELFTEST
4617 if (unlikely(ce->drop_deregister)) {
4618 ce->drop_deregister = false;
4623 if (context_wait_for_deregister_to_register(ce)) {
4624 struct intel_runtime_pm *runtime_pm =
4625 &ce->engine->gt->i915->runtime_pm;
4626 intel_wakeref_t wakeref;
4629 * Previous owner of this guc_id has been deregistered, now safe
4630 * register this context.
4632 with_intel_runtime_pm(runtime_pm, wakeref)
4633 register_context(ce, true);
4634 guc_signal_context_fence(ce);
4635 intel_context_put(ce);
4636 } else if (context_destroyed(ce)) {
4637 /* Context has been destroyed */
4638 intel_gt_pm_put_async(guc_to_gt(guc));
4639 release_guc_id(guc, ce);
4640 __guc_context_destroy(ce);
4643 decr_outstanding_submission_g2h(guc);
4648 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
4652 struct intel_context *ce;
4653 unsigned long flags;
4656 if (unlikely(len < 2)) {
4657 guc_err(guc, "Invalid length %u\n", len);
4662 ce = g2h_context_lookup(guc, ctx_id);
4666 if (unlikely(context_destroyed(ce) ||
4667 (!context_pending_enable(ce) &&
4668 !context_pending_disable(ce)))) {
4669 guc_err(guc, "Bad context sched_state 0x%x, ctx_id %u\n",
4670 ce->guc_state.sched_state, ctx_id);
4674 trace_intel_context_sched_done(ce);
4676 if (context_pending_enable(ce)) {
4677 #ifdef CONFIG_DRM_I915_SELFTEST
4678 if (unlikely(ce->drop_schedule_enable)) {
4679 ce->drop_schedule_enable = false;
4684 spin_lock_irqsave(&ce->guc_state.lock, flags);
4685 clr_context_pending_enable(ce);
4686 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
4687 } else if (context_pending_disable(ce)) {
4690 #ifdef CONFIG_DRM_I915_SELFTEST
4691 if (unlikely(ce->drop_schedule_disable)) {
4692 ce->drop_schedule_disable = false;
4698 * Unpin must be done before __guc_signal_context_fence,
4699 * otherwise a race exists between the requests getting
4700 * submitted + retired before this unpin completes resulting in
4701 * the pin_count going to zero and the context still being
4704 intel_context_sched_disable_unpin(ce);
4706 spin_lock_irqsave(&ce->guc_state.lock, flags);
4707 banned = context_banned(ce);
4708 clr_context_banned(ce);
4709 clr_context_pending_disable(ce);
4710 __guc_signal_context_fence(ce);
4711 guc_blocked_fence_complete(ce);
4712 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
4715 guc_cancel_context_requests(ce);
4716 intel_engine_signal_breadcrumbs(ce->engine);
4720 decr_outstanding_submission_g2h(guc);
4721 intel_context_put(ce);
4726 static void capture_error_state(struct intel_guc *guc,
4727 struct intel_context *ce)
4729 struct intel_gt *gt = guc_to_gt(guc);
4730 struct drm_i915_private *i915 = gt->i915;
4731 intel_wakeref_t wakeref;
4732 intel_engine_mask_t engine_mask;
4734 if (intel_engine_is_virtual(ce->engine)) {
4735 struct intel_engine_cs *e;
4736 intel_engine_mask_t tmp, virtual_mask = ce->engine->mask;
4739 for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) {
4740 bool match = intel_guc_capture_is_matching_engine(gt, ce, e);
4743 intel_engine_set_hung_context(e, ce);
4744 engine_mask |= e->mask;
4745 atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
4750 guc_warn(guc, "No matching physical engine capture for virtual engine context 0x%04X / %s",
4751 ce->guc_id.id, ce->engine->name);
4755 intel_engine_set_hung_context(ce->engine, ce);
4756 engine_mask = ce->engine->mask;
4757 atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
4760 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
4761 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
4764 static void guc_context_replay(struct intel_context *ce)
4766 struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
4768 __guc_reset_context(ce, ce->engine->mask);
4769 tasklet_hi_schedule(&sched_engine->tasklet);
4772 static void guc_handle_context_reset(struct intel_guc *guc,
4773 struct intel_context *ce)
4775 trace_intel_context_reset(ce);
4777 guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
4778 ce->guc_id.id, ce->engine->name,
4779 str_yes_no(intel_context_is_exiting(ce)),
4780 str_yes_no(intel_context_is_banned(ce)));
4782 if (likely(intel_context_is_schedulable(ce))) {
4783 capture_error_state(guc, ce);
4784 guc_context_replay(ce);
4786 guc_info(guc, "Ignoring context reset notification of exiting context 0x%04X on %s",
4787 ce->guc_id.id, ce->engine->name);
4791 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
4792 const u32 *msg, u32 len)
4794 struct intel_context *ce;
4795 unsigned long flags;
4798 if (unlikely(len != 1)) {
4799 guc_err(guc, "Invalid length %u", len);
4806 * The context lookup uses the xarray but lookups only require an RCU lock
4807 * not the full spinlock. So take the lock explicitly and keep it until the
4808 * context has been reference count locked to ensure it can't be destroyed
4809 * asynchronously until the reset is done.
4811 xa_lock_irqsave(&guc->context_lookup, flags);
4812 ce = g2h_context_lookup(guc, ctx_id);
4814 intel_context_get(ce);
4815 xa_unlock_irqrestore(&guc->context_lookup, flags);
4820 guc_handle_context_reset(guc, ce);
4821 intel_context_put(ce);
4826 int intel_guc_error_capture_process_msg(struct intel_guc *guc,
4827 const u32 *msg, u32 len)
4831 if (unlikely(len != 1)) {
4832 guc_dbg(guc, "Invalid length %u", len);
4836 status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
4837 if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
4838 guc_warn(guc, "No space for error capture");
4840 intel_guc_capture_process(guc);
4845 struct intel_engine_cs *
4846 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
4848 struct intel_gt *gt = guc_to_gt(guc);
4849 u8 engine_class = guc_class_to_engine_class(guc_class);
4851 /* Class index is checked in class converter */
4852 GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
4854 return gt->engine_class[engine_class][instance];
4857 static void reset_fail_worker_func(struct work_struct *w)
4859 struct intel_guc *guc = container_of(w, struct intel_guc,
4860 submission_state.reset_fail_worker);
4861 struct intel_gt *gt = guc_to_gt(guc);
4862 intel_engine_mask_t reset_fail_mask;
4863 unsigned long flags;
4865 spin_lock_irqsave(&guc->submission_state.lock, flags);
4866 reset_fail_mask = guc->submission_state.reset_fail_mask;
4867 guc->submission_state.reset_fail_mask = 0;
4868 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
4870 if (likely(reset_fail_mask)) {
4871 struct intel_engine_cs *engine;
4872 enum intel_engine_id id;
4875 * GuC is toast at this point - it dead loops after sending the failed
4876 * reset notification. So need to manually determine the guilty context.
4877 * Note that it should be reliable to do this here because the GuC is
4878 * toast and will not be scheduling behind the KMD's back.
4880 for_each_engine_masked(engine, gt, reset_fail_mask, id)
4881 intel_guc_find_hung_context(engine);
4883 intel_gt_handle_error(gt, reset_fail_mask,
4885 "GuC failed to reset engine mask=0x%x",
4890 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
4891 const u32 *msg, u32 len)
4893 struct intel_engine_cs *engine;
4894 u8 guc_class, instance;
4896 unsigned long flags;
4898 if (unlikely(len != 3)) {
4899 guc_err(guc, "Invalid length %u", len);
4907 engine = intel_guc_lookup_engine(guc, guc_class, instance);
4908 if (unlikely(!engine)) {
4909 guc_err(guc, "Invalid engine %d:%d", guc_class, instance);
4914 * This is an unexpected failure of a hardware feature. So, log a real
4915 * error message not just the informational that comes with the reset.
4917 guc_err(guc, "Engine reset failed on %d:%d (%s) because 0x%08X",
4918 guc_class, instance, engine->name, reason);
4920 spin_lock_irqsave(&guc->submission_state.lock, flags);
4921 guc->submission_state.reset_fail_mask |= engine->mask;
4922 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
4925 * A GT reset flushes this worker queue (G2H handler) so we must use
4926 * another worker to trigger a GT reset.
4928 queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
4933 void intel_guc_find_hung_context(struct intel_engine_cs *engine)
4935 struct intel_guc *guc = &engine->gt->uc.guc;
4936 struct intel_context *ce;
4937 struct i915_request *rq;
4938 unsigned long index;
4939 unsigned long flags;
4941 /* Reset called during driver load? GuC not yet initialised! */
4942 if (unlikely(!guc_submission_initialized(guc)))
4945 xa_lock_irqsave(&guc->context_lookup, flags);
4946 xa_for_each(&guc->context_lookup, index, ce) {
4949 if (!kref_get_unless_zero(&ce->ref))
4952 xa_unlock(&guc->context_lookup);
4954 if (!intel_context_is_pinned(ce))
4957 if (intel_engine_is_virtual(ce->engine)) {
4958 if (!(ce->engine->mask & engine->mask))
4961 if (ce->engine != engine)
4966 spin_lock(&ce->guc_state.lock);
4967 list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
4968 if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
4974 spin_unlock(&ce->guc_state.lock);
4977 intel_engine_set_hung_context(engine, ce);
4979 /* Can only cope with one hang at a time... */
4980 intel_context_put(ce);
4981 xa_lock(&guc->context_lookup);
4986 intel_context_put(ce);
4987 xa_lock(&guc->context_lookup);
4990 xa_unlock_irqrestore(&guc->context_lookup, flags);
4993 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
4994 struct i915_request *hung_rq,
4995 struct drm_printer *m)
4997 struct intel_guc *guc = &engine->gt->uc.guc;
4998 struct intel_context *ce;
4999 unsigned long index;
5000 unsigned long flags;
5002 /* Reset called during driver load? GuC not yet initialised! */
5003 if (unlikely(!guc_submission_initialized(guc)))
5006 xa_lock_irqsave(&guc->context_lookup, flags);
5007 xa_for_each(&guc->context_lookup, index, ce) {
5008 if (!kref_get_unless_zero(&ce->ref))
5011 xa_unlock(&guc->context_lookup);
5013 if (!intel_context_is_pinned(ce))
5016 if (intel_engine_is_virtual(ce->engine)) {
5017 if (!(ce->engine->mask & engine->mask))
5020 if (ce->engine != engine)
5024 spin_lock(&ce->guc_state.lock);
5025 intel_engine_dump_active_requests(&ce->guc_state.requests,
5027 spin_unlock(&ce->guc_state.lock);
5030 intel_context_put(ce);
5031 xa_lock(&guc->context_lookup);
5033 xa_unlock_irqrestore(&guc->context_lookup, flags);
5036 void intel_guc_submission_print_info(struct intel_guc *guc,
5037 struct drm_printer *p)
5039 struct i915_sched_engine *sched_engine = guc->sched_engine;
5041 unsigned long flags;
5046 drm_printf(p, "GuC Submission API Version: %d.%d.%d\n",
5047 guc->submission_version.major, guc->submission_version.minor,
5048 guc->submission_version.patch);
5049 drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
5050 atomic_read(&guc->outstanding_submission_g2h));
5051 drm_printf(p, "GuC tasklet count: %u\n",
5052 atomic_read(&sched_engine->tasklet.count));
5054 spin_lock_irqsave(&sched_engine->lock, flags);
5055 drm_printf(p, "Requests in GuC submit tasklet:\n");
5056 for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
5057 struct i915_priolist *pl = to_priolist(rb);
5058 struct i915_request *rq;
5060 priolist_for_each_request(rq, pl)
5061 drm_printf(p, "guc_id=%u, seqno=%llu\n",
5062 rq->context->guc_id.id,
5065 spin_unlock_irqrestore(&sched_engine->lock, flags);
5066 drm_printf(p, "\n");
5069 static inline void guc_log_context_priority(struct drm_printer *p,
5070 struct intel_context *ce)
5074 drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
5075 drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
5076 for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
5077 i < GUC_CLIENT_PRIORITY_NUM; ++i) {
5078 drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
5079 i, ce->guc_state.prio_count[i]);
5081 drm_printf(p, "\n");
5084 static inline void guc_log_context(struct drm_printer *p,
5085 struct intel_context *ce)
5087 drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
5088 drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
5089 drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
5091 ce->lrc_reg_state[CTX_RING_HEAD]);
5092 drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
5094 ce->lrc_reg_state[CTX_RING_TAIL]);
5095 drm_printf(p, "\t\tContext Pin Count: %u\n",
5096 atomic_read(&ce->pin_count));
5097 drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
5098 atomic_read(&ce->guc_id.ref));
5099 drm_printf(p, "\t\tSchedule State: 0x%x\n",
5100 ce->guc_state.sched_state);
5103 void intel_guc_submission_print_context_info(struct intel_guc *guc,
5104 struct drm_printer *p)
5106 struct intel_context *ce;
5107 unsigned long index;
5108 unsigned long flags;
5110 xa_lock_irqsave(&guc->context_lookup, flags);
5111 xa_for_each(&guc->context_lookup, index, ce) {
5112 GEM_BUG_ON(intel_context_is_child(ce));
5114 guc_log_context(p, ce);
5115 guc_log_context_priority(p, ce);
5117 if (intel_context_is_parent(ce)) {
5118 struct intel_context *child;
5120 drm_printf(p, "\t\tNumber children: %u\n",
5121 ce->parallel.number_children);
5123 if (ce->parallel.guc.wq_status) {
5124 drm_printf(p, "\t\tWQI Head: %u\n",
5125 READ_ONCE(*ce->parallel.guc.wq_head));
5126 drm_printf(p, "\t\tWQI Tail: %u\n",
5127 READ_ONCE(*ce->parallel.guc.wq_tail));
5128 drm_printf(p, "\t\tWQI Status: %u\n",
5129 READ_ONCE(*ce->parallel.guc.wq_status));
5132 if (ce->engine->emit_bb_start ==
5133 emit_bb_start_parent_no_preempt_mid_batch) {
5136 drm_printf(p, "\t\tChildren Go: %u\n",
5137 get_children_go_value(ce));
5138 for (i = 0; i < ce->parallel.number_children; ++i)
5139 drm_printf(p, "\t\tChildren Join: %u\n",
5140 get_children_join_value(ce, i));
5143 for_each_child(ce, child)
5144 guc_log_context(p, child);
5147 xa_unlock_irqrestore(&guc->context_lookup, flags);
5150 static inline u32 get_children_go_addr(struct intel_context *ce)
5152 GEM_BUG_ON(!intel_context_is_parent(ce));
5154 return i915_ggtt_offset(ce->state) +
5155 __get_parent_scratch_offset(ce) +
5156 offsetof(struct parent_scratch, go.semaphore);
5159 static inline u32 get_children_join_addr(struct intel_context *ce,
5162 GEM_BUG_ON(!intel_context_is_parent(ce));
5164 return i915_ggtt_offset(ce->state) +
5165 __get_parent_scratch_offset(ce) +
5166 offsetof(struct parent_scratch, join[child_index].semaphore);
5169 #define PARENT_GO_BB 1
5170 #define PARENT_GO_FINI_BREADCRUMB 0
5171 #define CHILD_GO_BB 1
5172 #define CHILD_GO_FINI_BREADCRUMB 0
5173 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
5174 u64 offset, u32 len,
5175 const unsigned int flags)
5177 struct intel_context *ce = rq->context;
5181 GEM_BUG_ON(!intel_context_is_parent(ce));
5183 cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
5187 /* Wait on children */
5188 for (i = 0; i < ce->parallel.number_children; ++i) {
5189 *cs++ = (MI_SEMAPHORE_WAIT |
5190 MI_SEMAPHORE_GLOBAL_GTT |
5192 MI_SEMAPHORE_SAD_EQ_SDD);
5193 *cs++ = PARENT_GO_BB;
5194 *cs++ = get_children_join_addr(ce, i);
5198 /* Turn off preemption */
5199 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
5202 /* Tell children go */
5203 cs = gen8_emit_ggtt_write(cs,
5205 get_children_go_addr(ce),
5209 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
5210 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
5211 *cs++ = lower_32_bits(offset);
5212 *cs++ = upper_32_bits(offset);
5215 intel_ring_advance(rq, cs);
5220 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
5221 u64 offset, u32 len,
5222 const unsigned int flags)
5224 struct intel_context *ce = rq->context;
5225 struct intel_context *parent = intel_context_to_parent(ce);
5228 GEM_BUG_ON(!intel_context_is_child(ce));
5230 cs = intel_ring_begin(rq, 12);
5235 cs = gen8_emit_ggtt_write(cs,
5237 get_children_join_addr(parent,
5238 ce->parallel.child_index),
5241 /* Wait on parent for go */
5242 *cs++ = (MI_SEMAPHORE_WAIT |
5243 MI_SEMAPHORE_GLOBAL_GTT |
5245 MI_SEMAPHORE_SAD_EQ_SDD);
5246 *cs++ = CHILD_GO_BB;
5247 *cs++ = get_children_go_addr(parent);
5250 /* Turn off preemption */
5251 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
5254 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
5255 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
5256 *cs++ = lower_32_bits(offset);
5257 *cs++ = upper_32_bits(offset);
5259 intel_ring_advance(rq, cs);
5265 __emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
5268 struct intel_context *ce = rq->context;
5271 GEM_BUG_ON(!intel_context_is_parent(ce));
5273 /* Wait on children */
5274 for (i = 0; i < ce->parallel.number_children; ++i) {
5275 *cs++ = (MI_SEMAPHORE_WAIT |
5276 MI_SEMAPHORE_GLOBAL_GTT |
5278 MI_SEMAPHORE_SAD_EQ_SDD);
5279 *cs++ = PARENT_GO_FINI_BREADCRUMB;
5280 *cs++ = get_children_join_addr(ce, i);
5284 /* Turn on preemption */
5285 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
5288 /* Tell children go */
5289 cs = gen8_emit_ggtt_write(cs,
5290 CHILD_GO_FINI_BREADCRUMB,
5291 get_children_go_addr(ce),
5298 * If this true, a submission of multi-lrc requests had an error and the
5299 * requests need to be skipped. The front end (execuf IOCTL) should've called
5300 * i915_request_skip which squashes the BB but we still need to emit the fini
5301 * breadrcrumbs seqno write. At this point we don't know how many of the
5302 * requests in the multi-lrc submission were generated so we can't do the
5303 * handshake between the parent and children (e.g. if 4 requests should be
5304 * generated but 2nd hit an error only 1 would be seen by the GuC backend).
5305 * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
5306 * has occurred on any of the requests in submission / relationship.
5308 static inline bool skip_handshake(struct i915_request *rq)
5310 return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
5313 #define NON_SKIP_LEN 6
5315 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
5318 struct intel_context *ce = rq->context;
5319 __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
5320 __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
5322 GEM_BUG_ON(!intel_context_is_parent(ce));
5324 if (unlikely(skip_handshake(rq))) {
5326 * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
5327 * the NON_SKIP_LEN comes from the length of the emits below.
5329 memset(cs, 0, sizeof(u32) *
5330 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
5331 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
5333 cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
5336 /* Emit fini breadcrumb */
5337 before_fini_breadcrumb_user_interrupt_cs = cs;
5338 cs = gen8_emit_ggtt_write(cs,
5340 i915_request_active_timeline(rq)->hwsp_offset,
5343 /* User interrupt */
5344 *cs++ = MI_USER_INTERRUPT;
5347 /* Ensure our math for skip + emit is correct */
5348 GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
5350 GEM_BUG_ON(start_fini_breadcrumb_cs +
5351 ce->engine->emit_fini_breadcrumb_dw != cs);
5353 rq->tail = intel_ring_offset(rq, cs);
5359 __emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
5362 struct intel_context *ce = rq->context;
5363 struct intel_context *parent = intel_context_to_parent(ce);
5365 GEM_BUG_ON(!intel_context_is_child(ce));
5367 /* Turn on preemption */
5368 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
5372 cs = gen8_emit_ggtt_write(cs,
5373 PARENT_GO_FINI_BREADCRUMB,
5374 get_children_join_addr(parent,
5375 ce->parallel.child_index),
5378 /* Wait parent on for go */
5379 *cs++ = (MI_SEMAPHORE_WAIT |
5380 MI_SEMAPHORE_GLOBAL_GTT |
5382 MI_SEMAPHORE_SAD_EQ_SDD);
5383 *cs++ = CHILD_GO_FINI_BREADCRUMB;
5384 *cs++ = get_children_go_addr(parent);
5391 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
5394 struct intel_context *ce = rq->context;
5395 __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
5396 __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
5398 GEM_BUG_ON(!intel_context_is_child(ce));
5400 if (unlikely(skip_handshake(rq))) {
5402 * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
5403 * the NON_SKIP_LEN comes from the length of the emits below.
5405 memset(cs, 0, sizeof(u32) *
5406 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
5407 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
5409 cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
5412 /* Emit fini breadcrumb */
5413 before_fini_breadcrumb_user_interrupt_cs = cs;
5414 cs = gen8_emit_ggtt_write(cs,
5416 i915_request_active_timeline(rq)->hwsp_offset,
5419 /* User interrupt */
5420 *cs++ = MI_USER_INTERRUPT;
5423 /* Ensure our math for skip + emit is correct */
5424 GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
5426 GEM_BUG_ON(start_fini_breadcrumb_cs +
5427 ce->engine->emit_fini_breadcrumb_dw != cs);
5429 rq->tail = intel_ring_offset(rq, cs);
5436 static struct intel_context *
5437 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
5438 unsigned long flags)
5440 struct guc_virtual_engine *ve;
5441 struct intel_guc *guc;
5445 ve = kzalloc(sizeof(*ve), GFP_KERNEL);
5447 return ERR_PTR(-ENOMEM);
5449 guc = &siblings[0]->gt->uc.guc;
5451 ve->base.i915 = siblings[0]->i915;
5452 ve->base.gt = siblings[0]->gt;
5453 ve->base.uncore = siblings[0]->uncore;
5456 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
5457 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
5458 ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
5459 ve->base.saturated = ALL_ENGINES;
5461 snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
5463 ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
5465 ve->base.cops = &virtual_guc_context_ops;
5466 ve->base.request_alloc = guc_request_alloc;
5467 ve->base.bump_serial = virtual_guc_bump_serial;
5469 ve->base.submit_request = guc_submit_request;
5471 ve->base.flags = I915_ENGINE_IS_VIRTUAL;
5473 intel_context_init(&ve->context, &ve->base);
5475 for (n = 0; n < count; n++) {
5476 struct intel_engine_cs *sibling = siblings[n];
5478 GEM_BUG_ON(!is_power_of_2(sibling->mask));
5479 if (sibling->mask & ve->base.mask) {
5480 guc_dbg(guc, "duplicate %s entry in load balancer\n",
5486 ve->base.mask |= sibling->mask;
5487 ve->base.logical_mask |= sibling->logical_mask;
5489 if (n != 0 && ve->base.class != sibling->class) {
5490 guc_dbg(guc, "invalid mixing of engine class, sibling %d, already %d\n",
5491 sibling->class, ve->base.class);
5494 } else if (n == 0) {
5495 ve->base.class = sibling->class;
5496 ve->base.uabi_class = sibling->uabi_class;
5497 snprintf(ve->base.name, sizeof(ve->base.name),
5498 "v%dx%d", ve->base.class, count);
5499 ve->base.context_size = sibling->context_size;
5501 ve->base.add_active_request =
5502 sibling->add_active_request;
5503 ve->base.remove_active_request =
5504 sibling->remove_active_request;
5505 ve->base.emit_bb_start = sibling->emit_bb_start;
5506 ve->base.emit_flush = sibling->emit_flush;
5507 ve->base.emit_init_breadcrumb =
5508 sibling->emit_init_breadcrumb;
5509 ve->base.emit_fini_breadcrumb =
5510 sibling->emit_fini_breadcrumb;
5511 ve->base.emit_fini_breadcrumb_dw =
5512 sibling->emit_fini_breadcrumb_dw;
5513 ve->base.breadcrumbs =
5514 intel_breadcrumbs_get(sibling->breadcrumbs);
5516 ve->base.flags |= sibling->flags;
5518 ve->base.props.timeslice_duration_ms =
5519 sibling->props.timeslice_duration_ms;
5520 ve->base.props.preempt_timeout_ms =
5521 sibling->props.preempt_timeout_ms;
5525 return &ve->context;
5528 intel_context_put(&ve->context);
5529 return ERR_PTR(err);
5532 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
5534 struct intel_engine_cs *engine;
5535 intel_engine_mask_t tmp, mask = ve->mask;
5537 for_each_engine_masked(engine, ve->gt, mask, tmp)
5538 if (READ_ONCE(engine->props.heartbeat_interval_ms))
5544 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5545 #include "selftest_guc.c"
5546 #include "selftest_guc_multi_lrc.c"
5547 #include "selftest_guc_hangcheck.c"