1 // SPDX-License-Identifier: MIT
3 * Copyright © 2016 Intel Corporation
6 #include <drm/drm_print.h>
8 #include "gem/i915_gem_context.h"
10 #include "i915_cmd_parser.h"
12 #include "intel_breadcrumbs.h"
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_engine_regs.h"
17 #include "intel_engine_user.h"
18 #include "intel_execlists_submission.h"
20 #include "intel_gt_requests.h"
21 #include "intel_gt_pm.h"
22 #include "intel_lrc_reg.h"
23 #include "intel_reset.h"
24 #include "intel_ring.h"
25 #include "uc/intel_guc_submission.h"
27 /* Haswell does have the CXT_SIZE register however it does not appear to be
28 * valid. Now, docs explain in dwords what is in the context object. The full
29 * size is 70720 bytes, however, the power context and execlist context will
30 * never be saved (power context is stored elsewhere, and execlists don't work
31 * on HSW) - so the final size, including the extra state required for the
32 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
34 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
36 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
37 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
38 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
39 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
41 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
43 #define MAX_MMIO_BASES 3
47 /* mmio bases table *must* be sorted in reverse graphics_ver order */
48 struct engine_mmio_base {
51 } mmio_bases[MAX_MMIO_BASES];
54 static const struct engine_info intel_engines[] = {
56 .class = RENDER_CLASS,
59 { .graphics_ver = 1, .base = RENDER_RING_BASE }
63 .class = COPY_ENGINE_CLASS,
66 { .graphics_ver = 6, .base = BLT_RING_BASE }
70 .class = VIDEO_DECODE_CLASS,
73 { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
74 { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
75 { .graphics_ver = 4, .base = BSD_RING_BASE }
79 .class = VIDEO_DECODE_CLASS,
82 { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
83 { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
87 .class = VIDEO_DECODE_CLASS,
90 { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
94 .class = VIDEO_DECODE_CLASS,
97 { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
101 .class = VIDEO_DECODE_CLASS,
104 { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
108 .class = VIDEO_DECODE_CLASS,
111 { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
115 .class = VIDEO_DECODE_CLASS,
118 { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
122 .class = VIDEO_DECODE_CLASS,
125 { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
129 .class = VIDEO_ENHANCEMENT_CLASS,
132 { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
133 { .graphics_ver = 7, .base = VEBOX_RING_BASE }
137 .class = VIDEO_ENHANCEMENT_CLASS,
140 { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
144 .class = VIDEO_ENHANCEMENT_CLASS,
147 { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
151 .class = VIDEO_ENHANCEMENT_CLASS,
154 { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
160 * intel_engine_context_size() - return the size of the context for an engine
162 * @class: engine class
164 * Each engine class may require a different amount of space for a context
167 * Return: size (in bytes) of an engine class specific context image
169 * Note: this size includes the HWSP, which is part of the context image
170 * in LRC mode, but does not include the "shared data page" used with
171 * GuC submission. The caller should account for this if using the GuC.
173 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
175 struct intel_uncore *uncore = gt->uncore;
178 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
182 switch (GRAPHICS_VER(gt->i915)) {
184 MISSING_CASE(GRAPHICS_VER(gt->i915));
185 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
188 return GEN11_LR_CONTEXT_RENDER_SIZE;
190 return GEN9_LR_CONTEXT_RENDER_SIZE;
192 return GEN8_LR_CONTEXT_RENDER_SIZE;
194 if (IS_HASWELL(gt->i915))
195 return HSW_CXT_TOTAL_SIZE;
197 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
198 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
201 cxt_size = intel_uncore_read(uncore, CXT_SIZE);
202 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
207 * There is a discrepancy here between the size reported
208 * by the register and the size of the context layout
209 * in the docs. Both are described as authorative!
211 * The discrepancy is on the order of a few cachelines,
212 * but the total is under one page (4k), which is our
213 * minimum allocation anyway so it should all come
216 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
217 drm_dbg(>->i915->drm,
218 "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
219 GRAPHICS_VER(gt->i915), cxt_size * 64,
221 return round_up(cxt_size * 64, PAGE_SIZE);
224 /* For the special day when i810 gets merged. */
232 case VIDEO_DECODE_CLASS:
233 case VIDEO_ENHANCEMENT_CLASS:
234 case COPY_ENGINE_CLASS:
235 if (GRAPHICS_VER(gt->i915) < 8)
237 return GEN8_LR_CONTEXT_OTHER_SIZE;
241 static u32 __engine_mmio_base(struct drm_i915_private *i915,
242 const struct engine_mmio_base *bases)
246 for (i = 0; i < MAX_MMIO_BASES; i++)
247 if (GRAPHICS_VER(i915) >= bases[i].graphics_ver)
250 GEM_BUG_ON(i == MAX_MMIO_BASES);
251 GEM_BUG_ON(!bases[i].base);
253 return bases[i].base;
256 static void __sprint_engine_name(struct intel_engine_cs *engine)
259 * Before we know what the uABI name for this engine will be,
260 * we still would like to keep track of this engine in the debug logs.
261 * We throw in a ' here as a reminder that this isn't its final name.
263 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
264 intel_engine_class_repr(engine->class),
265 engine->instance) >= sizeof(engine->name));
268 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
271 * Though they added more rings on g4x/ilk, they did not add
272 * per-engine HWSTAM until gen6.
274 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
277 if (GRAPHICS_VER(engine->i915) >= 3)
278 ENGINE_WRITE(engine, RING_HWSTAM, mask);
280 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
283 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
285 /* Mask off all writes into the unknown HWSP */
286 intel_engine_set_hwsp_writemask(engine, ~0u);
289 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
291 GEM_DEBUG_WARN_ON(iir);
294 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
297 const struct engine_info *info = &intel_engines[id];
298 struct drm_i915_private *i915 = gt->i915;
299 struct intel_engine_cs *engine;
302 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
303 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
304 BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1));
305 BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1));
307 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
310 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
313 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
316 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
319 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
323 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
325 INIT_LIST_HEAD(&engine->pinned_contexts_list);
327 engine->legacy_idx = INVALID_ENGINE;
328 engine->mask = BIT(id);
329 if (GRAPHICS_VER(gt->i915) >= 11) {
330 static const u32 engine_reset_domains[] = {
331 [RCS0] = GEN11_GRDOM_RENDER,
332 [BCS0] = GEN11_GRDOM_BLT,
333 [VCS0] = GEN11_GRDOM_MEDIA,
334 [VCS1] = GEN11_GRDOM_MEDIA2,
335 [VCS2] = GEN11_GRDOM_MEDIA3,
336 [VCS3] = GEN11_GRDOM_MEDIA4,
337 [VCS4] = GEN11_GRDOM_MEDIA5,
338 [VCS5] = GEN11_GRDOM_MEDIA6,
339 [VCS6] = GEN11_GRDOM_MEDIA7,
340 [VCS7] = GEN11_GRDOM_MEDIA8,
341 [VECS0] = GEN11_GRDOM_VECS,
342 [VECS1] = GEN11_GRDOM_VECS2,
343 [VECS2] = GEN11_GRDOM_VECS3,
344 [VECS3] = GEN11_GRDOM_VECS4,
346 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
347 !engine_reset_domains[id]);
348 engine->reset_domain = engine_reset_domains[id];
350 static const u32 engine_reset_domains[] = {
351 [RCS0] = GEN6_GRDOM_RENDER,
352 [BCS0] = GEN6_GRDOM_BLT,
353 [VCS0] = GEN6_GRDOM_MEDIA,
354 [VCS1] = GEN8_GRDOM_MEDIA2,
355 [VECS0] = GEN6_GRDOM_VECS,
357 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
358 !engine_reset_domains[id]);
359 engine->reset_domain = engine_reset_domains[id];
363 engine->uncore = gt->uncore;
364 guc_class = engine_class_to_guc_class(info->class);
365 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
366 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
368 engine->irq_handler = nop_irq_handler;
370 engine->class = info->class;
371 engine->instance = info->instance;
372 engine->logical_mask = BIT(logical_instance);
373 __sprint_engine_name(engine);
375 engine->props.heartbeat_interval_ms =
376 CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
377 engine->props.max_busywait_duration_ns =
378 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
379 engine->props.preempt_timeout_ms =
380 CONFIG_DRM_I915_PREEMPT_TIMEOUT;
381 engine->props.stop_timeout_ms =
382 CONFIG_DRM_I915_STOP_TIMEOUT;
383 engine->props.timeslice_duration_ms =
384 CONFIG_DRM_I915_TIMESLICE_DURATION;
386 /* Override to uninterruptible for OpenCL workloads. */
387 if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
388 engine->props.preempt_timeout_ms = 0;
390 engine->defaults = engine->props; /* never to change again */
392 engine->context_size = intel_engine_context_size(gt, engine->class);
393 if (WARN_ON(engine->context_size > BIT(20)))
394 engine->context_size = 0;
395 if (engine->context_size)
396 DRIVER_CAPS(i915)->has_logical_contexts = true;
398 ewma__engine_latency_init(&engine->latency);
399 seqcount_init(&engine->stats.execlists.lock);
401 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
403 /* Scrub mmio state on takeover */
404 intel_engine_sanitize_mmio(engine);
406 gt->engine_class[info->class][info->instance] = engine;
407 gt->engine[id] = engine;
412 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
414 struct drm_i915_private *i915 = engine->i915;
416 if (engine->class == VIDEO_DECODE_CLASS) {
418 * HEVC support is present on first engine instance
419 * before Gen11 and on all instances afterwards.
421 if (GRAPHICS_VER(i915) >= 11 ||
422 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
423 engine->uabi_capabilities |=
424 I915_VIDEO_CLASS_CAPABILITY_HEVC;
427 * SFC block is present only on even logical engine
430 if ((GRAPHICS_VER(i915) >= 11 &&
431 (engine->gt->info.vdbox_sfc_access &
432 BIT(engine->instance))) ||
433 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
434 engine->uabi_capabilities |=
435 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
436 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
437 if (GRAPHICS_VER(i915) >= 9 &&
438 engine->gt->info.sfc_mask & BIT(engine->instance))
439 engine->uabi_capabilities |=
440 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
444 static void intel_setup_engine_capabilities(struct intel_gt *gt)
446 struct intel_engine_cs *engine;
447 enum intel_engine_id id;
449 for_each_engine(engine, gt, id)
450 __setup_engine_capabilities(engine);
454 * intel_engines_release() - free the resources allocated for Command Streamers
455 * @gt: pointer to struct intel_gt
457 void intel_engines_release(struct intel_gt *gt)
459 struct intel_engine_cs *engine;
460 enum intel_engine_id id;
463 * Before we release the resources held by engine, we must be certain
464 * that the HW is no longer accessing them -- having the GPU scribble
465 * to or read from a page being used for something else causes no end
468 * The GPU should be reset by this point, but assume the worst just
469 * in case we aborted before completely initialising the engines.
471 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
472 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
473 __intel_gt_reset(gt, ALL_ENGINES);
475 /* Decouple the backend; but keep the layout for late GPU resets */
476 for_each_engine(engine, gt, id) {
477 if (!engine->release)
480 intel_wakeref_wait_for_idle(&engine->wakeref);
481 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
483 engine->release(engine);
484 engine->release = NULL;
486 memset(&engine->reset, 0, sizeof(engine->reset));
490 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
492 if (!engine->request_pool)
495 kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
498 void intel_engines_free(struct intel_gt *gt)
500 struct intel_engine_cs *engine;
501 enum intel_engine_id id;
503 /* Free the requests! dma-resv keeps fences around for an eternity */
506 for_each_engine(engine, gt, id) {
507 intel_engine_free_request_pool(engine);
509 gt->engine[id] = NULL;
514 bool gen11_vdbox_has_sfc(struct intel_gt *gt,
515 unsigned int physical_vdbox,
516 unsigned int logical_vdbox, u16 vdbox_mask)
518 struct drm_i915_private *i915 = gt->i915;
521 * In Gen11, only even numbered logical VDBOXes are hooked
522 * up to an SFC (Scaler & Format Converter) unit.
523 * In Gen12, Even numbered physical instance always are connected
524 * to an SFC. Odd numbered physical instances have SFC only if
525 * previous even instance is fused off.
527 * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field
528 * in the fuse register that tells us whether a specific SFC is present.
530 if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
532 else if (GRAPHICS_VER(i915) == 12)
533 return (physical_vdbox % 2 == 0) ||
534 !(BIT(physical_vdbox - 1) & vdbox_mask);
535 else if (GRAPHICS_VER(i915) == 11)
536 return logical_vdbox % 2 == 0;
538 MISSING_CASE(GRAPHICS_VER(i915));
543 * Determine which engines are fused off in our particular hardware.
544 * Note that we have a catch-22 situation where we need to be able to access
545 * the blitter forcewake domain to read the engine fuses, but at the same time
546 * we need to know which engines are available on the system to know which
547 * forcewake domains are present. We solve this by intializing the forcewake
548 * domains based on the full engine mask in the platform capabilities before
549 * calling this function and pruning the domains for fused-off engines
552 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
554 struct drm_i915_private *i915 = gt->i915;
555 struct intel_gt_info *info = >->info;
556 struct intel_uncore *uncore = gt->uncore;
557 unsigned int logical_vdbox = 0;
559 u32 media_fuse, fuse1;
563 info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
565 if (GRAPHICS_VER(i915) < 11)
566 return info->engine_mask;
569 * On newer platforms the fusing register is called 'enable' and has
570 * enable semantics, while on older platforms it is called 'disable'
571 * and bits have disable semantices.
573 media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
574 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
575 media_fuse = ~media_fuse;
577 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
578 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
579 GEN11_GT_VEBOX_DISABLE_SHIFT;
581 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
582 fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
583 gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
585 gt->info.sfc_mask = ~0;
588 for (i = 0; i < I915_MAX_VCS; i++) {
589 if (!HAS_ENGINE(gt, _VCS(i))) {
590 vdbox_mask &= ~BIT(i);
594 if (!(BIT(i) & vdbox_mask)) {
595 info->engine_mask &= ~BIT(_VCS(i));
596 drm_dbg(&i915->drm, "vcs%u fused off\n", i);
600 if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
601 gt->info.vdbox_sfc_access |= BIT(i);
604 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
605 vdbox_mask, VDBOX_MASK(gt));
606 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
608 for (i = 0; i < I915_MAX_VECS; i++) {
609 if (!HAS_ENGINE(gt, _VECS(i))) {
610 vebox_mask &= ~BIT(i);
614 if (!(BIT(i) & vebox_mask)) {
615 info->engine_mask &= ~BIT(_VECS(i));
616 drm_dbg(&i915->drm, "vecs%u fused off\n", i);
619 drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
620 vebox_mask, VEBOX_MASK(gt));
621 GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
623 return info->engine_mask;
626 static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
627 u8 class, const u8 *map, u8 num_instances)
630 u8 current_logical_id = 0;
632 for (j = 0; j < num_instances; ++j) {
633 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
634 if (!HAS_ENGINE(gt, i) ||
635 intel_engines[i].class != class)
638 if (intel_engines[i].instance == map[j]) {
639 logical_ids[intel_engines[i].instance] =
640 current_logical_id++;
647 static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
650 u8 map[MAX_ENGINE_INSTANCE + 1];
652 for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
654 populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map));
658 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
659 * @gt: pointer to struct intel_gt
661 * Return: non-zero if the initialization failed.
663 int intel_engines_init_mmio(struct intel_gt *gt)
665 struct drm_i915_private *i915 = gt->i915;
666 const unsigned int engine_mask = init_engine_mask(gt);
667 unsigned int mask = 0;
668 unsigned int i, class;
669 u8 logical_ids[MAX_ENGINE_INSTANCE + 1];
672 drm_WARN_ON(&i915->drm, engine_mask == 0);
673 drm_WARN_ON(&i915->drm, engine_mask &
674 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
676 if (i915_inject_probe_failure(i915))
679 for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
680 setup_logical_ids(gt, logical_ids, class);
682 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
683 u8 instance = intel_engines[i].instance;
685 if (intel_engines[i].class != class ||
689 err = intel_engine_setup(gt, i,
690 logical_ids[instance]);
699 * Catch failures to update intel_engines table when the new engines
700 * are added to the driver by a warning and disabling the forgotten
703 if (drm_WARN_ON(&i915->drm, mask != engine_mask))
704 gt->info.engine_mask = mask;
706 gt->info.num_engines = hweight32(mask);
708 intel_gt_check_and_clear_faults(gt);
710 intel_setup_engine_capabilities(gt);
712 intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
717 intel_engines_free(gt);
721 void intel_engine_init_execlists(struct intel_engine_cs *engine)
723 struct intel_engine_execlists * const execlists = &engine->execlists;
725 execlists->port_mask = 1;
726 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
727 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
729 memset(execlists->pending, 0, sizeof(execlists->pending));
731 memset(execlists->inflight, 0, sizeof(execlists->inflight));
734 static void cleanup_status_page(struct intel_engine_cs *engine)
736 struct i915_vma *vma;
738 /* Prevent writes into HWSP after returning the page to the system */
739 intel_engine_set_hwsp_writemask(engine, ~0u);
741 vma = fetch_and_zero(&engine->status_page.vma);
745 if (!HWS_NEEDS_PHYSICAL(engine->i915))
748 i915_gem_object_unpin_map(vma->obj);
749 i915_gem_object_put(vma->obj);
752 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
753 struct i915_gem_ww_ctx *ww,
754 struct i915_vma *vma)
758 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
760 * On g33, we cannot place HWS above 256MiB, so
761 * restrict its pinning to the low mappable arena.
762 * Though this restriction is not documented for
763 * gen4, gen5, or byt, they also behave similarly
764 * and hang if the HWS is placed at the top of the
765 * GTT. To generalise, it appears that all !llc
766 * platforms have issues with us placing the HWS
767 * above the mappable region (even though we never
770 flags = PIN_MAPPABLE;
774 return i915_ggtt_pin(vma, ww, 0, flags);
777 static int init_status_page(struct intel_engine_cs *engine)
779 struct drm_i915_gem_object *obj;
780 struct i915_gem_ww_ctx ww;
781 struct i915_vma *vma;
785 INIT_LIST_HEAD(&engine->status_page.timelines);
788 * Though the HWS register does support 36bit addresses, historically
789 * we have had hangs and corruption reported due to wild writes if
790 * the HWS is placed above 4G. We only allow objects to be allocated
791 * in GFP_DMA32 for i965, and no earlier physical address users had
792 * access to more than 4G.
794 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
796 drm_err(&engine->i915->drm,
797 "Failed to allocate status page\n");
801 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
803 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
809 i915_gem_ww_ctx_init(&ww, true);
811 ret = i915_gem_object_lock(obj, &ww);
812 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
813 ret = pin_ggtt_status_page(engine, &ww, vma);
817 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
819 ret = PTR_ERR(vaddr);
823 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
824 engine->status_page.vma = vma;
830 if (ret == -EDEADLK) {
831 ret = i915_gem_ww_ctx_backoff(&ww);
835 i915_gem_ww_ctx_fini(&ww);
838 i915_gem_object_put(obj);
842 static int engine_setup_common(struct intel_engine_cs *engine)
846 init_llist_head(&engine->barrier_tasks);
848 err = init_status_page(engine);
852 engine->breadcrumbs = intel_breadcrumbs_create(engine);
853 if (!engine->breadcrumbs) {
858 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
859 if (!engine->sched_engine) {
861 goto err_sched_engine;
863 engine->sched_engine->private_data = engine;
865 err = intel_engine_init_cmd_parser(engine);
869 intel_engine_init_execlists(engine);
870 intel_engine_init__pm(engine);
871 intel_engine_init_retire(engine);
873 /* Use the whole device by default */
875 intel_sseu_from_device_info(&engine->gt->info.sseu);
877 intel_engine_init_workarounds(engine);
878 intel_engine_init_whitelist(engine);
879 intel_engine_init_ctx_wa(engine);
881 if (GRAPHICS_VER(engine->i915) >= 12)
882 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
887 i915_sched_engine_put(engine->sched_engine);
889 intel_breadcrumbs_put(engine->breadcrumbs);
891 cleanup_status_page(engine);
895 struct measure_breadcrumb {
896 struct i915_request rq;
897 struct intel_ring ring;
901 static int measure_breadcrumb_dw(struct intel_context *ce)
903 struct intel_engine_cs *engine = ce->engine;
904 struct measure_breadcrumb *frame;
907 GEM_BUG_ON(!engine->gt->scratch);
909 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
913 frame->rq.engine = engine;
914 frame->rq.context = ce;
915 rcu_assign_pointer(frame->rq.timeline, ce->timeline);
916 frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
918 frame->ring.vaddr = frame->cs;
919 frame->ring.size = sizeof(frame->cs);
921 BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
922 frame->ring.effective_size = frame->ring.size;
923 intel_ring_update_space(&frame->ring);
924 frame->rq.ring = &frame->ring;
926 mutex_lock(&ce->timeline->mutex);
927 spin_lock_irq(&engine->sched_engine->lock);
929 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
931 spin_unlock_irq(&engine->sched_engine->lock);
932 mutex_unlock(&ce->timeline->mutex);
934 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
940 struct intel_context *
941 intel_engine_create_pinned_context(struct intel_engine_cs *engine,
942 struct i915_address_space *vm,
943 unsigned int ring_size,
945 struct lock_class_key *key,
948 struct intel_context *ce;
951 ce = intel_context_create(engine);
955 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
956 ce->timeline = page_pack_bits(NULL, hwsp);
958 ce->ring_size = ring_size;
961 ce->vm = i915_vm_get(vm);
963 err = intel_context_pin(ce); /* perma-pin so it is always available */
965 intel_context_put(ce);
969 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
972 * Give our perma-pinned kernel timelines a separate lockdep class,
973 * so that we can use them from within the normal user timelines
974 * should we need to inject GPU operations during their request
977 lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
982 void intel_engine_destroy_pinned_context(struct intel_context *ce)
984 struct intel_engine_cs *engine = ce->engine;
985 struct i915_vma *hwsp = engine->status_page.vma;
987 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
989 mutex_lock(&hwsp->vm->mutex);
990 list_del(&ce->timeline->engine_link);
991 mutex_unlock(&hwsp->vm->mutex);
993 list_del(&ce->pinned_contexts_link);
994 intel_context_unpin(ce);
995 intel_context_put(ce);
998 static struct intel_context *
999 create_kernel_context(struct intel_engine_cs *engine)
1001 static struct lock_class_key kernel;
1003 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
1004 I915_GEM_HWS_SEQNO_ADDR,
1005 &kernel, "kernel_context");
1009 * intel_engines_init_common - initialize cengine state which might require hw access
1010 * @engine: Engine to initialize.
1012 * Initializes @engine@ structure members shared between legacy and execlists
1013 * submission modes which do require hardware access.
1015 * Typcally done at later stages of submission mode specific engine setup.
1017 * Returns zero on success or an error code on failure.
1019 static int engine_init_common(struct intel_engine_cs *engine)
1021 struct intel_context *ce;
1024 engine->set_default_submission(engine);
1027 * We may need to do things with the shrinker which
1028 * require us to immediately switch back to the default
1029 * context. This can cause a problem as pinning the
1030 * default context also requires GTT space which may not
1031 * be available. To avoid this we always pin the default
1034 ce = create_kernel_context(engine);
1038 ret = measure_breadcrumb_dw(ce);
1042 engine->emit_fini_breadcrumb_dw = ret;
1043 engine->kernel_context = ce;
1048 intel_engine_destroy_pinned_context(ce);
1052 int intel_engines_init(struct intel_gt *gt)
1054 int (*setup)(struct intel_engine_cs *engine);
1055 struct intel_engine_cs *engine;
1056 enum intel_engine_id id;
1059 if (intel_uc_uses_guc_submission(>->uc)) {
1060 gt->submission_method = INTEL_SUBMISSION_GUC;
1061 setup = intel_guc_submission_setup;
1062 } else if (HAS_EXECLISTS(gt->i915)) {
1063 gt->submission_method = INTEL_SUBMISSION_ELSP;
1064 setup = intel_execlists_submission_setup;
1066 gt->submission_method = INTEL_SUBMISSION_RING;
1067 setup = intel_ring_submission_setup;
1070 for_each_engine(engine, gt, id) {
1071 err = engine_setup_common(engine);
1075 err = setup(engine);
1079 err = engine_init_common(engine);
1083 intel_engine_add_user(engine);
1090 * intel_engines_cleanup_common - cleans up the engine state created by
1091 * the common initiailizers.
1092 * @engine: Engine to cleanup.
1094 * This cleans up everything created by the common helpers.
1096 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
1098 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
1100 i915_sched_engine_put(engine->sched_engine);
1101 intel_breadcrumbs_put(engine->breadcrumbs);
1103 intel_engine_fini_retire(engine);
1104 intel_engine_cleanup_cmd_parser(engine);
1106 if (engine->default_state)
1107 fput(engine->default_state);
1109 if (engine->kernel_context)
1110 intel_engine_destroy_pinned_context(engine->kernel_context);
1112 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
1113 cleanup_status_page(engine);
1115 intel_wa_list_free(&engine->ctx_wa_list);
1116 intel_wa_list_free(&engine->wa_list);
1117 intel_wa_list_free(&engine->whitelist);
1121 * intel_engine_resume - re-initializes the HW state of the engine
1122 * @engine: Engine to resume.
1124 * Returns zero on success or an error code on failure.
1126 int intel_engine_resume(struct intel_engine_cs *engine)
1128 intel_engine_apply_workarounds(engine);
1129 intel_engine_apply_whitelist(engine);
1131 return engine->resume(engine);
1134 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
1136 struct drm_i915_private *i915 = engine->i915;
1140 if (GRAPHICS_VER(i915) >= 8)
1141 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
1142 else if (GRAPHICS_VER(i915) >= 4)
1143 acthd = ENGINE_READ(engine, RING_ACTHD);
1145 acthd = ENGINE_READ(engine, ACTHD);
1150 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1154 if (GRAPHICS_VER(engine->i915) >= 8)
1155 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
1157 bbaddr = ENGINE_READ(engine, RING_BBADDR);
1162 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
1164 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
1168 * If we are doing a normal GPU reset, we can take our time and allow
1169 * the engine to quiesce. We've stopped submission to the engine, and
1170 * if we wait long enough an innocent context should complete and
1171 * leave the engine idle. So they should not be caught unaware by
1172 * the forthcoming GPU reset (which usually follows the stop_cs)!
1174 return READ_ONCE(engine->props.stop_timeout_ms);
1177 static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
1178 int fast_timeout_us,
1179 int slow_timeout_ms)
1181 struct intel_uncore *uncore = engine->uncore;
1182 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
1185 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
1186 err = __intel_wait_for_register_fw(engine->uncore, mode,
1187 MODE_IDLE, MODE_IDLE,
1192 /* A final mmio read to let GPU writes be hopefully flushed to memory */
1193 intel_uncore_posting_read_fw(uncore, mode);
1197 int intel_engine_stop_cs(struct intel_engine_cs *engine)
1201 if (GRAPHICS_VER(engine->i915) < 3)
1204 ENGINE_TRACE(engine, "\n");
1205 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
1206 ENGINE_TRACE(engine,
1207 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
1208 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
1209 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
1212 * Sometimes we observe that the idle flag is not
1213 * set even though the ring is empty. So double
1214 * check before giving up.
1216 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
1217 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
1224 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1226 ENGINE_TRACE(engine, "\n");
1228 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1231 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1234 case I915_CACHE_NONE: return " uncached";
1235 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1236 case I915_CACHE_L3_LLC: return " L3+LLC";
1237 case I915_CACHE_WT: return " WT";
1243 read_subslice_reg(const struct intel_engine_cs *engine,
1244 int slice, int subslice, i915_reg_t reg)
1246 return intel_uncore_read_with_mcr_steering(engine->uncore, reg,
1250 /* NB: please notice the memset */
1251 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1252 struct intel_instdone *instdone)
1254 struct drm_i915_private *i915 = engine->i915;
1255 const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
1256 struct intel_uncore *uncore = engine->uncore;
1257 u32 mmio_base = engine->mmio_base;
1262 memset(instdone, 0, sizeof(*instdone));
1264 if (GRAPHICS_VER(i915) >= 8) {
1265 instdone->instdone =
1266 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1268 if (engine->id != RCS0)
1271 instdone->slice_common =
1272 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1273 if (GRAPHICS_VER(i915) >= 12) {
1274 instdone->slice_common_extra[0] =
1275 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
1276 instdone->slice_common_extra[1] =
1277 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
1280 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1281 for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
1282 instdone->sampler[slice][subslice] =
1283 read_subslice_reg(engine, slice, subslice,
1284 GEN7_SAMPLER_INSTDONE);
1285 instdone->row[slice][subslice] =
1286 read_subslice_reg(engine, slice, subslice,
1290 for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
1291 instdone->sampler[slice][subslice] =
1292 read_subslice_reg(engine, slice, subslice,
1293 GEN7_SAMPLER_INSTDONE);
1294 instdone->row[slice][subslice] =
1295 read_subslice_reg(engine, slice, subslice,
1300 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
1301 for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice)
1302 instdone->geom_svg[slice][subslice] =
1303 read_subslice_reg(engine, slice, subslice,
1304 XEHPG_INSTDONE_GEOM_SVG);
1306 } else if (GRAPHICS_VER(i915) >= 7) {
1307 instdone->instdone =
1308 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1310 if (engine->id != RCS0)
1313 instdone->slice_common =
1314 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1315 instdone->sampler[0][0] =
1316 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1317 instdone->row[0][0] =
1318 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1319 } else if (GRAPHICS_VER(i915) >= 4) {
1320 instdone->instdone =
1321 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1322 if (engine->id == RCS0)
1323 /* HACK: Using the wrong struct member */
1324 instdone->slice_common =
1325 intel_uncore_read(uncore, GEN4_INSTDONE1);
1327 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1331 static bool ring_is_idle(struct intel_engine_cs *engine)
1335 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1338 if (!intel_engine_pm_get_if_awake(engine))
1341 /* First check that no commands are left in the ring */
1342 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1343 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1346 /* No bit for gen2, so assume the CS parser is idle */
1347 if (GRAPHICS_VER(engine->i915) > 2 &&
1348 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1351 intel_engine_pm_put(engine);
1356 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
1358 struct tasklet_struct *t = &engine->sched_engine->tasklet;
1364 if (tasklet_trylock(t)) {
1365 /* Must wait for any GPU reset in progress. */
1366 if (__tasklet_is_enabled(t))
1372 /* Synchronise and wait for the tasklet on another CPU */
1374 tasklet_unlock_wait(t);
1378 * intel_engine_is_idle() - Report if the engine has finished process all work
1379 * @engine: the intel_engine_cs
1381 * Return true if there are no requests pending, nothing left to be submitted
1382 * to hardware, and that the engine is idle.
1384 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1386 /* More white lies, if wedged, hw state is inconsistent */
1387 if (intel_gt_is_wedged(engine->gt))
1390 if (!intel_engine_pm_is_awake(engine))
1393 /* Waiting to drain ELSP? */
1394 intel_synchronize_hardirq(engine->i915);
1395 intel_engine_flush_submission(engine);
1397 /* ELSP is empty, but there are ready requests? E.g. after reset */
1398 if (!i915_sched_engine_is_empty(engine->sched_engine))
1402 return ring_is_idle(engine);
1405 bool intel_engines_are_idle(struct intel_gt *gt)
1407 struct intel_engine_cs *engine;
1408 enum intel_engine_id id;
1411 * If the driver is wedged, HW state may be very inconsistent and
1412 * report that it is still busy, even though we have stopped using it.
1414 if (intel_gt_is_wedged(gt))
1417 /* Already parked (and passed an idleness test); must still be idle */
1418 if (!READ_ONCE(gt->awake))
1421 for_each_engine(engine, gt, id) {
1422 if (!intel_engine_is_idle(engine))
1429 bool intel_engine_irq_enable(struct intel_engine_cs *engine)
1431 if (!engine->irq_enable)
1434 /* Caller disables interrupts */
1435 spin_lock(&engine->gt->irq_lock);
1436 engine->irq_enable(engine);
1437 spin_unlock(&engine->gt->irq_lock);
1442 void intel_engine_irq_disable(struct intel_engine_cs *engine)
1444 if (!engine->irq_disable)
1447 /* Caller disables interrupts */
1448 spin_lock(&engine->gt->irq_lock);
1449 engine->irq_disable(engine);
1450 spin_unlock(&engine->gt->irq_lock);
1453 void intel_engines_reset_default_submission(struct intel_gt *gt)
1455 struct intel_engine_cs *engine;
1456 enum intel_engine_id id;
1458 for_each_engine(engine, gt, id) {
1459 if (engine->sanitize)
1460 engine->sanitize(engine);
1462 engine->set_default_submission(engine);
1466 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1468 switch (GRAPHICS_VER(engine->i915)) {
1470 return false; /* uses physical not virtual addresses */
1472 /* maybe only uses physical not virtual addresses */
1473 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1475 return !IS_I965G(engine->i915); /* who knows! */
1477 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1483 static struct intel_timeline *get_timeline(struct i915_request *rq)
1485 struct intel_timeline *tl;
1488 * Even though we are holding the engine->sched_engine->lock here, there
1489 * is no control over the submission queue per-se and we are
1490 * inspecting the active state at a random point in time, with an
1491 * unknown queue. Play safe and make sure the timeline remains valid.
1492 * (Only being used for pretty printing, one extra kref shouldn't
1493 * cause a camel stampede!)
1496 tl = rcu_dereference(rq->timeline);
1497 if (!kref_get_unless_zero(&tl->kref))
1504 static int print_ring(char *buf, int sz, struct i915_request *rq)
1508 if (!i915_request_signaled(rq)) {
1509 struct intel_timeline *tl = get_timeline(rq);
1511 len = scnprintf(buf, sz,
1512 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
1513 i915_ggtt_offset(rq->ring->vma),
1514 tl ? tl->hwsp_offset : 0,
1516 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
1520 intel_timeline_put(tl);
1526 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1528 const size_t rowsize = 8 * sizeof(u32);
1529 const void *prev = NULL;
1533 for (pos = 0; pos < len; pos += rowsize) {
1536 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1538 drm_printf(m, "*\n");
1544 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1545 rowsize, sizeof(u32),
1547 false) >= sizeof(line));
1548 drm_printf(m, "[%04zx] %s\n", pos, line);
1555 static const char *repr_timer(const struct timer_list *t)
1557 if (!READ_ONCE(t->expires))
1560 if (timer_pending(t))
1566 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1567 struct drm_printer *m)
1569 struct drm_i915_private *dev_priv = engine->i915;
1570 struct intel_engine_execlists * const execlists = &engine->execlists;
1573 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
1574 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1575 if (HAS_EXECLISTS(dev_priv)) {
1576 drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
1577 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
1578 drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
1579 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
1581 drm_printf(m, "\tRING_START: 0x%08x\n",
1582 ENGINE_READ(engine, RING_START));
1583 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1584 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1585 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1586 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1587 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1588 ENGINE_READ(engine, RING_CTL),
1589 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1590 if (GRAPHICS_VER(engine->i915) > 2) {
1591 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1592 ENGINE_READ(engine, RING_MI_MODE),
1593 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1596 if (GRAPHICS_VER(dev_priv) >= 6) {
1597 drm_printf(m, "\tRING_IMR: 0x%08x\n",
1598 ENGINE_READ(engine, RING_IMR));
1599 drm_printf(m, "\tRING_ESR: 0x%08x\n",
1600 ENGINE_READ(engine, RING_ESR));
1601 drm_printf(m, "\tRING_EMR: 0x%08x\n",
1602 ENGINE_READ(engine, RING_EMR));
1603 drm_printf(m, "\tRING_EIR: 0x%08x\n",
1604 ENGINE_READ(engine, RING_EIR));
1607 addr = intel_engine_get_active_head(engine);
1608 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1609 upper_32_bits(addr), lower_32_bits(addr));
1610 addr = intel_engine_get_last_batch_head(engine);
1611 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1612 upper_32_bits(addr), lower_32_bits(addr));
1613 if (GRAPHICS_VER(dev_priv) >= 8)
1614 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1615 else if (GRAPHICS_VER(dev_priv) >= 4)
1616 addr = ENGINE_READ(engine, RING_DMA_FADD);
1618 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1619 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1620 upper_32_bits(addr), lower_32_bits(addr));
1621 if (GRAPHICS_VER(dev_priv) >= 4) {
1622 drm_printf(m, "\tIPEIR: 0x%08x\n",
1623 ENGINE_READ(engine, RING_IPEIR));
1624 drm_printf(m, "\tIPEHR: 0x%08x\n",
1625 ENGINE_READ(engine, RING_IPEHR));
1627 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1628 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1631 if (intel_engine_uses_guc(engine)) {
1632 /* nothing to print yet */
1633 } else if (HAS_EXECLISTS(dev_priv)) {
1634 struct i915_request * const *port, *rq;
1636 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1637 const u8 num_entries = execlists->csb_size;
1641 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1642 yesno(test_bit(TASKLET_STATE_SCHED,
1643 &engine->sched_engine->tasklet.state)),
1644 enableddisabled(!atomic_read(&engine->sched_engine->tasklet.count)),
1645 repr_timer(&engine->execlists.preempt),
1646 repr_timer(&engine->execlists.timer));
1648 read = execlists->csb_head;
1649 write = READ_ONCE(*execlists->csb_write);
1651 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1652 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1653 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1654 read, write, num_entries);
1656 if (read >= num_entries)
1658 if (write >= num_entries)
1661 write += num_entries;
1662 while (read < write) {
1663 idx = ++read % num_entries;
1664 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1665 idx, hws[idx * 2], hws[idx * 2 + 1]);
1668 i915_sched_engine_active_lock_bh(engine->sched_engine);
1670 for (port = execlists->active; (rq = *port); port++) {
1674 len = scnprintf(hdr, sizeof(hdr),
1675 "\t\tActive[%d]: ccid:%08x%s%s, ",
1676 (int)(port - execlists->active),
1677 rq->context->lrc.ccid,
1678 intel_context_is_closed(rq->context) ? "!" : "",
1679 intel_context_is_banned(rq->context) ? "*" : "");
1680 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1681 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1682 i915_request_show(m, rq, hdr, 0);
1684 for (port = execlists->pending; (rq = *port); port++) {
1688 len = scnprintf(hdr, sizeof(hdr),
1689 "\t\tPending[%d]: ccid:%08x%s%s, ",
1690 (int)(port - execlists->pending),
1691 rq->context->lrc.ccid,
1692 intel_context_is_closed(rq->context) ? "!" : "",
1693 intel_context_is_banned(rq->context) ? "*" : "");
1694 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1695 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1696 i915_request_show(m, rq, hdr, 0);
1699 i915_sched_engine_active_unlock_bh(engine->sched_engine);
1700 } else if (GRAPHICS_VER(dev_priv) > 6) {
1701 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1702 ENGINE_READ(engine, RING_PP_DIR_BASE));
1703 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1704 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1705 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1706 ENGINE_READ(engine, RING_PP_DIR_DCLV));
1710 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1712 struct i915_vma_snapshot *vsnap = &rq->batch_snapshot;
1716 if (!i915_vma_snapshot_present(vsnap))
1720 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1721 rq->head, rq->postfix, rq->tail,
1722 vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u,
1723 vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u);
1725 size = rq->tail - rq->head;
1726 if (rq->tail < rq->head)
1727 size += rq->ring->size;
1729 ring = kmalloc(size, GFP_ATOMIC);
1731 const void *vaddr = rq->ring->vaddr;
1732 unsigned int head = rq->head;
1733 unsigned int len = 0;
1735 if (rq->tail < head) {
1736 len = rq->ring->size - head;
1737 memcpy(ring, vaddr + head, len);
1740 memcpy(ring + len, vaddr + head, size - len);
1742 hexdump(m, ring, size);
1747 static unsigned long list_count(struct list_head *list)
1749 struct list_head *pos;
1750 unsigned long count = 0;
1752 list_for_each(pos, list)
1758 static unsigned long read_ul(void *p, size_t x)
1760 return *(unsigned long *)(p + x);
1763 static void print_properties(struct intel_engine_cs *engine,
1764 struct drm_printer *m)
1766 static const struct pmap {
1771 .offset = offsetof(typeof(engine->props), x), \
1774 P(heartbeat_interval_ms),
1775 P(max_busywait_duration_ns),
1776 P(preempt_timeout_ms),
1778 P(timeslice_duration_ms),
1783 const struct pmap *p;
1785 drm_printf(m, "\tProperties:\n");
1786 for (p = props; p->name; p++)
1787 drm_printf(m, "\t\t%s: %lu [default %lu]\n",
1789 read_ul(&engine->props, p->offset),
1790 read_ul(&engine->defaults, p->offset));
1793 static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
1795 struct intel_timeline *tl = get_timeline(rq);
1797 i915_request_show(m, rq, msg, 0);
1799 drm_printf(m, "\t\tring->start: 0x%08x\n",
1800 i915_ggtt_offset(rq->ring->vma));
1801 drm_printf(m, "\t\tring->head: 0x%08x\n",
1803 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1805 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1807 drm_printf(m, "\t\tring->space: 0x%08x\n",
1811 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1813 intel_timeline_put(tl);
1816 print_request_ring(m, rq);
1818 if (rq->context->lrc_reg_state) {
1819 drm_printf(m, "Logical Ring Context:\n");
1820 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
1824 void intel_engine_dump_active_requests(struct list_head *requests,
1825 struct i915_request *hung_rq,
1826 struct drm_printer *m)
1828 struct i915_request *rq;
1830 enum i915_request_state state;
1832 list_for_each_entry(rq, requests, sched.link) {
1836 state = i915_test_request_state(rq);
1837 if (state < I915_REQUEST_QUEUED)
1840 if (state == I915_REQUEST_ACTIVE)
1841 msg = "\t\tactive on engine";
1843 msg = "\t\tactive in queue";
1845 engine_dump_request(rq, m, msg);
1849 static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
1851 struct i915_request *hung_rq = NULL;
1852 struct intel_context *ce;
1856 * No need for an engine->irq_seqno_barrier() before the seqno reads.
1857 * The GPU is still running so requests are still executing and any
1858 * hardware reads will be out of date by the time they are reported.
1859 * But the intention here is just to report an instantaneous snapshot
1862 lockdep_assert_held(&engine->sched_engine->lock);
1864 drm_printf(m, "\tRequests:\n");
1866 guc = intel_uc_uses_guc_submission(&engine->gt->uc);
1868 ce = intel_engine_get_hung_context(engine);
1870 hung_rq = intel_context_find_active_request(ce);
1872 hung_rq = intel_engine_execlist_find_hung_request(engine);
1876 engine_dump_request(hung_rq, m, "\t\thung");
1879 intel_guc_dump_active_requests(engine, hung_rq, m);
1881 intel_engine_dump_active_requests(&engine->sched_engine->requests,
1885 void intel_engine_dump(struct intel_engine_cs *engine,
1886 struct drm_printer *m,
1887 const char *header, ...)
1889 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1890 struct i915_request *rq;
1891 intel_wakeref_t wakeref;
1892 unsigned long flags;
1898 va_start(ap, header);
1899 drm_vprintf(m, header, &ap);
1903 if (intel_gt_is_wedged(engine->gt))
1904 drm_printf(m, "*** WEDGED ***\n");
1906 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1907 drm_printf(m, "\tBarriers?: %s\n",
1908 yesno(!llist_empty(&engine->barrier_tasks)));
1909 drm_printf(m, "\tLatency: %luus\n",
1910 ewma__engine_latency_read(&engine->latency));
1911 if (intel_engine_supports_stats(engine))
1912 drm_printf(m, "\tRuntime: %llums\n",
1913 ktime_to_ms(intel_engine_get_busy_time(engine,
1915 drm_printf(m, "\tForcewake: %x domains, %d active\n",
1916 engine->fw_domain, READ_ONCE(engine->fw_active));
1919 rq = READ_ONCE(engine->heartbeat.systole);
1921 drm_printf(m, "\tHeartbeat: %d ms ago\n",
1922 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1924 drm_printf(m, "\tReset count: %d (global %d)\n",
1925 i915_reset_engine_count(error, engine),
1926 i915_reset_count(error));
1927 print_properties(engine, m);
1929 spin_lock_irqsave(&engine->sched_engine->lock, flags);
1930 engine_dump_active_requests(engine, m);
1932 drm_printf(m, "\tOn hold?: %lu\n",
1933 list_count(&engine->sched_engine->hold));
1934 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
1936 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
1937 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1939 intel_engine_print_registers(engine, m);
1940 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1942 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1945 intel_execlists_show_requests(engine, m, i915_request_show, 8);
1947 drm_printf(m, "HWSP:\n");
1948 hexdump(m, engine->status_page.addr, PAGE_SIZE);
1950 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1952 intel_engine_print_breadcrumbs(engine, m);
1956 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1957 * @engine: engine to report on
1958 * @now: monotonic timestamp of sampling
1960 * Returns accumulated time @engine was busy since engine stats were enabled.
1962 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
1964 return engine->busyness(engine, now);
1967 struct intel_context *
1968 intel_engine_create_virtual(struct intel_engine_cs **siblings,
1969 unsigned int count, unsigned long flags)
1972 return ERR_PTR(-EINVAL);
1974 if (count == 1 && !(flags & FORCE_VIRTUAL))
1975 return intel_context_create(siblings[0]);
1977 GEM_BUG_ON(!siblings[0]->cops->create_virtual);
1978 return siblings[0]->cops->create_virtual(siblings, count, flags);
1981 struct i915_request *
1982 intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
1984 struct i915_request *request, *active = NULL;
1987 * This search does not work in GuC submission mode. However, the GuC
1988 * will report the hanging context directly to the driver itself. So
1989 * the driver should never get here when in GuC mode.
1991 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
1994 * We are called by the error capture, reset and to dump engine
1995 * state at random points in time. In particular, note that neither is
1996 * crucially ordered with an interrupt. After a hang, the GPU is dead
1997 * and we assume that no more writes can happen (we waited long enough
1998 * for all writes that were in transaction to be flushed) - adding an
1999 * extra delay for a recent interrupt is pointless. Hence, we do
2000 * not need an engine->irq_seqno_barrier() before the seqno reads.
2001 * At all other times, we must assume the GPU is still running, but
2002 * we only care about the snapshot of this moment.
2004 lockdep_assert_held(&engine->sched_engine->lock);
2007 request = execlists_active(&engine->execlists);
2009 struct intel_timeline *tl = request->context->timeline;
2011 list_for_each_entry_from_reverse(request, &tl->requests, link) {
2012 if (__i915_request_is_complete(request))
2022 list_for_each_entry(request, &engine->sched_engine->requests,
2024 if (i915_test_request_state(request) != I915_REQUEST_ACTIVE)
2034 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2035 #include "mock_engine.c"
2036 #include "selftest_engine.c"
2037 #include "selftest_engine_cs.c"