1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2018 Intel Corporation
8 #include "intel_context.h"
9 #include "intel_engine_pm.h"
10 #include "intel_engine_regs.h"
11 #include "intel_gpu_commands.h"
13 #include "intel_gt_mcr.h"
14 #include "intel_gt_regs.h"
15 #include "intel_ring.h"
16 #include "intel_workarounds.h"
19 * DOC: Hardware workarounds
21 * Hardware workarounds are register programming documented to be executed in
22 * the driver that fall outside of the normal programming sequences for a
23 * platform. There are some basic categories of workarounds, depending on
24 * how/when they are applied:
26 * - Context workarounds: workarounds that touch registers that are
27 * saved/restored to/from the HW context image. The list is emitted (via Load
28 * Register Immediate commands) once when initializing the device and saved in
29 * the default context. That default context is then used on every context
30 * creation to have a "primed golden context", i.e. a context image that
31 * already contains the changes needed to all the registers.
33 * Context workarounds should be implemented in the \*_ctx_workarounds_init()
34 * variants respective to the targeted platforms.
36 * - Engine workarounds: the list of these WAs is applied whenever the specific
37 * engine is reset. It's also possible that a set of engine classes share a
38 * common power domain and they are reset together. This happens on some
39 * platforms with render and compute engines. In this case (at least) one of
40 * them need to keeep the workaround programming: the approach taken in the
41 * driver is to tie those workarounds to the first compute/render engine that
42 * is registered. When executing with GuC submission, engine resets are
43 * outside of kernel driver control, hence the list of registers involved in
44 * written once, on engine initialization, and then passed to GuC, that
45 * saves/restores their values before/after the reset takes place. See
46 * ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
48 * Workarounds for registers specific to RCS and CCS should be implemented in
49 * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
50 * registers belonging to BCS, VCS or VECS should be implemented in
51 * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
52 * engine's MMIO range but that are part of of the common RCS/CCS reset domain
53 * should be implemented in general_render_compute_wa_init().
55 * - GT workarounds: the list of these WAs is applied whenever these registers
56 * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
58 * GT workarounds should be implemented in the \*_gt_workarounds_init()
59 * variants respective to the targeted platforms.
61 * - Register whitelist: some workarounds need to be implemented in userspace,
62 * but need to touch privileged registers. The whitelist in the kernel
63 * instructs the hardware to allow the access to happen. From the kernel side,
64 * this is just a special case of a MMIO workaround (as we write the list of
65 * these to/be-whitelisted registers to some special HW registers).
67 * Register whitelisting should be done in the \*_whitelist_build() variants
68 * respective to the targeted platforms.
70 * - Workaround batchbuffers: buffers that get executed automatically by the
71 * hardware on every HW context restore. These buffers are created and
72 * programmed in the default context so the hardware always go through those
73 * programming sequences when switching contexts. The support for workaround
74 * batchbuffers is enabled these hardware mechanisms:
76 * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
77 * context, pointing the hardware to jump to that location when that offset
78 * is reached in the context restore. Workaround batchbuffer in the driver
79 * currently uses this mechanism for all platforms.
81 * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
82 * pointing the hardware to a buffer to continue executing after the
83 * engine registers are restored in a context restore sequence. This is
84 * currently not used in the driver.
86 * - Other: There are WAs that, due to their nature, cannot be applied from a
87 * central place. Those are peppered around the rest of the code, as needed.
88 * Workarounds related to the display IP are the main example.
90 * .. [1] Technically, some registers are powercontext saved & restored, so they
91 * survive a suspend/resume. In practice, writing them again is not too
92 * costly and simplifies things, so it's the approach taken in the driver.
95 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
96 const char *name, const char *engine_name)
100 wal->engine_name = engine_name;
103 #define WA_LIST_CHUNK (1 << 4)
105 static void wa_init_finish(struct i915_wa_list *wal)
107 /* Trim unused entries. */
108 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
109 struct i915_wa *list = kmemdup(wal->list,
110 wal->count * sizeof(*list),
122 drm_dbg(&wal->gt->i915->drm, "Initialized %u %s workarounds on %s\n",
123 wal->wa_count, wal->name, wal->engine_name);
126 static enum forcewake_domains
127 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
129 enum forcewake_domains fw = 0;
133 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
134 fw |= intel_uncore_forcewake_for_reg(uncore,
142 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
144 unsigned int addr = i915_mmio_reg_offset(wa->reg);
145 struct drm_i915_private *i915 = wal->gt->i915;
146 unsigned int start = 0, end = wal->count;
147 const unsigned int grow = WA_LIST_CHUNK;
150 GEM_BUG_ON(!is_power_of_2(grow));
152 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
153 struct i915_wa *list;
155 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
158 drm_err(&i915->drm, "No space for workaround init!\n");
163 memcpy(list, wal->list, sizeof(*wa) * wal->count);
170 while (start < end) {
171 unsigned int mid = start + (end - start) / 2;
173 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
175 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
178 wa_ = &wal->list[mid];
180 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
182 "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
183 i915_mmio_reg_offset(wa_->reg),
186 wa_->set &= ~wa->clr;
192 wa_->read |= wa->read;
198 wa_ = &wal->list[wal->count++];
201 while (wa_-- > wal->list) {
202 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
203 i915_mmio_reg_offset(wa_[1].reg));
204 if (i915_mmio_reg_offset(wa_[1].reg) >
205 i915_mmio_reg_offset(wa_[0].reg))
208 swap(wa_[1], wa_[0]);
212 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
213 u32 clear, u32 set, u32 read_mask, bool masked_reg)
215 struct i915_wa wa = {
220 .masked_reg = masked_reg,
226 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
227 u32 clear, u32 set, u32 read_mask, bool masked_reg)
229 struct i915_wa wa = {
234 .masked_reg = masked_reg,
242 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
244 wa_add(wal, reg, clear, set, clear | set, false);
248 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
250 wa_mcr_add(wal, reg, clear, set, clear | set, false);
254 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
256 wa_write_clr_set(wal, reg, ~0, set);
260 wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
262 wa_mcr_write_clr_set(wal, reg, ~0, set);
266 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
268 wa_write_clr_set(wal, reg, set, set);
272 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
274 wa_mcr_write_clr_set(wal, reg, set, set);
278 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
280 wa_write_clr_set(wal, reg, clr, 0);
284 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
286 wa_mcr_write_clr_set(wal, reg, clr, 0);
290 * WA operations on "masked register". A masked register has the upper 16 bits
291 * documented as "masked" in b-spec. Its purpose is to allow writing to just a
292 * portion of the register without a rmw: you simply write in the upper 16 bits
293 * the mask of bits you are going to modify.
295 * The wa_masked_* family of functions already does the necessary operations to
296 * calculate the mask based on the parameters passed, so user only has to
297 * provide the lower 16 bits of that register.
301 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
303 wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
307 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
309 wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
313 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
315 wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
319 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
321 wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
325 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
328 wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
332 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
335 wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
338 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
339 struct i915_wa_list *wal)
341 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
344 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
345 struct i915_wa_list *wal)
347 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
350 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
351 struct i915_wa_list *wal)
353 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
355 /* WaDisableAsyncFlipPerfMode:bdw,chv */
356 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
358 /* WaDisablePartialInstShootdown:bdw,chv */
359 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
360 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
362 /* Use Force Non-Coherent whenever executing a 3D context. This is a
363 * workaround for a possible hang in the unlikely event a TLB
364 * invalidation occurs during a PSD flush.
366 /* WaForceEnableNonCoherent:bdw,chv */
367 /* WaHdcDisableFetchWhenMasked:bdw,chv */
368 wa_masked_en(wal, HDC_CHICKEN0,
369 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
370 HDC_FORCE_NON_COHERENT);
372 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
373 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
374 * polygons in the same 8x4 pixel/sample area to be processed without
375 * stalling waiting for the earlier ones to write to Hierarchical Z
378 * This optimization is off by default for BDW and CHV; turn it on.
380 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
382 /* Wa4x4STCOptimizationDisable:bdw,chv */
383 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
386 * BSpec recommends 8x4 when MSAA is used,
387 * however in practice 16x4 seems fastest.
389 * Note that PS/WM thread counts depend on the WIZ hashing
390 * disable bit, which we don't touch here, but it's good
391 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
393 wa_masked_field_set(wal, GEN7_GT_MODE,
394 GEN6_WIZ_HASHING_MASK,
395 GEN6_WIZ_HASHING_16x4);
398 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
399 struct i915_wa_list *wal)
401 struct drm_i915_private *i915 = engine->i915;
403 gen8_ctx_workarounds_init(engine, wal);
405 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
406 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
408 /* WaDisableDopClockGating:bdw
410 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
411 * to disable EUTC clock gating.
413 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
414 DOP_CLOCK_GATING_DISABLE);
416 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
417 GEN8_SAMPLER_POWER_BYPASS_DIS);
419 wa_masked_en(wal, HDC_CHICKEN0,
420 /* WaForceContextSaveRestoreNonCoherent:bdw */
421 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
422 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
423 (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
426 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
427 struct i915_wa_list *wal)
429 gen8_ctx_workarounds_init(engine, wal);
431 /* WaDisableThreadStallDopClockGating:chv */
432 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
434 /* Improve HiZ throughput on CHV. */
435 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
438 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
439 struct i915_wa_list *wal)
441 struct drm_i915_private *i915 = engine->i915;
444 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
446 * Must match Display Engine. See
447 * WaCompressedResourceDisplayNewHashMode.
449 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
450 GEN9_PBE_COMPRESSED_HASH_SELECTION);
451 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
452 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
455 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
456 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
457 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
458 FLOW_CONTROL_ENABLE |
459 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
461 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
462 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
463 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
464 GEN9_ENABLE_YV12_BUGFIX |
465 GEN9_ENABLE_GPGPU_PREEMPTION);
467 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
468 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
469 wa_masked_en(wal, CACHE_MODE_1,
470 GEN8_4x4_STC_OPTIMIZATION_DISABLE |
471 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
473 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
474 wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
475 GEN9_CCS_TLB_PREFETCH_ENABLE);
477 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
478 wa_masked_en(wal, HDC_CHICKEN0,
479 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
480 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
482 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
483 * both tied to WaForceContextSaveRestoreNonCoherent
484 * in some hsds for skl. We keep the tie for all gen9. The
485 * documentation is a bit hazy and so we want to get common behaviour,
486 * even though there is no clear evidence we would need both on kbl/bxt.
487 * This area has been source of system hangs so we play it safe
488 * and mimic the skl regardless of what bspec says.
490 * Use Force Non-Coherent whenever executing a 3D context. This
491 * is a workaround for a possible hang in the unlikely event
492 * a TLB invalidation occurs during a PSD flush.
495 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
496 wa_masked_en(wal, HDC_CHICKEN0,
497 HDC_FORCE_NON_COHERENT);
499 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
500 if (IS_SKYLAKE(i915) ||
502 IS_COFFEELAKE(i915) ||
504 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
505 GEN8_SAMPLER_POWER_BYPASS_DIS);
507 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
508 wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
511 * Supporting preemption with fine-granularity requires changes in the
512 * batch buffer programming. Since we can't break old userspace, we
513 * need to set our default preemption level to safe value. Userspace is
514 * still able to use more fine-grained preemption levels, since in
515 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
516 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
517 * not real HW workarounds, but merely a way to start using preemption
518 * while maintaining old contract with userspace.
521 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
522 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
524 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
525 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
526 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
527 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
529 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
530 if (IS_GEN9_LP(i915))
531 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
534 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
535 struct i915_wa_list *wal)
537 struct intel_gt *gt = engine->gt;
538 u8 vals[3] = { 0, 0, 0 };
541 for (i = 0; i < 3; i++) {
545 * Only consider slices where one, and only one, subslice has 7
548 if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
552 * subslice_7eu[i] != 0 (because of the check above) and
553 * ss_max == 4 (maximum number of subslices possible per slice)
557 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
561 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
564 /* Tune IZ hashing. See intel_device_info_runtime_init() */
565 wa_masked_field_set(wal, GEN7_GT_MODE,
566 GEN9_IZ_HASHING_MASK(2) |
567 GEN9_IZ_HASHING_MASK(1) |
568 GEN9_IZ_HASHING_MASK(0),
569 GEN9_IZ_HASHING(2, vals[2]) |
570 GEN9_IZ_HASHING(1, vals[1]) |
571 GEN9_IZ_HASHING(0, vals[0]));
574 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
575 struct i915_wa_list *wal)
577 gen9_ctx_workarounds_init(engine, wal);
578 skl_tune_iz_hashing(engine, wal);
581 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
582 struct i915_wa_list *wal)
584 gen9_ctx_workarounds_init(engine, wal);
586 /* WaDisableThreadStallDopClockGating:bxt */
587 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
588 STALL_DOP_GATING_DISABLE);
590 /* WaToEnableHwFixForPushConstHWBug:bxt */
591 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
592 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
595 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
596 struct i915_wa_list *wal)
598 struct drm_i915_private *i915 = engine->i915;
600 gen9_ctx_workarounds_init(engine, wal);
602 /* WaToEnableHwFixForPushConstHWBug:kbl */
603 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
604 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
605 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
607 /* WaDisableSbeCacheDispatchPortSharing:kbl */
608 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
609 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
612 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
613 struct i915_wa_list *wal)
615 gen9_ctx_workarounds_init(engine, wal);
617 /* WaToEnableHwFixForPushConstHWBug:glk */
618 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
619 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
622 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
623 struct i915_wa_list *wal)
625 gen9_ctx_workarounds_init(engine, wal);
627 /* WaToEnableHwFixForPushConstHWBug:cfl */
628 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
629 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
631 /* WaDisableSbeCacheDispatchPortSharing:cfl */
632 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
633 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
636 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
637 struct i915_wa_list *wal)
639 /* Wa_1406697149 (WaDisableBankHangMode:icl) */
640 wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
642 /* WaForceEnableNonCoherent:icl
643 * This is not the same workaround as in early Gen9 platforms, where
644 * lacking this could cause system hangs, but coherency performance
645 * overhead is high and only a few compute workloads really need it
646 * (the register is whitelisted in hardware now, so UMDs can opt in
647 * for coherency if they have a good reason).
649 wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
651 /* WaEnableFloatBlendOptimization:icl */
652 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
653 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
654 0 /* write-only, so skip validation */,
657 /* WaDisableGPGPUMidThreadPreemption:icl */
658 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
659 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
660 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
662 /* allow headerless messages for preemptible GPGPU context */
663 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
664 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
666 /* Wa_1604278689:icl,ehl */
667 wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
668 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
672 /* Wa_1406306137:icl,ehl */
673 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
677 * These settings aren't actually workarounds, but general tuning settings that
678 * need to be programmed on dg2 platform.
680 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
681 struct i915_wa_list *wal)
683 wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
684 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
685 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
686 wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
687 FF_MODE2_TDS_TIMER_128);
690 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
691 struct i915_wa_list *wal)
693 struct drm_i915_private *i915 = engine->i915;
696 * Wa_1409142259:tgl,dg1,adl-p
697 * Wa_1409347922:tgl,dg1,adl-p
698 * Wa_1409252684:tgl,dg1,adl-p
699 * Wa_1409217633:tgl,dg1,adl-p
700 * Wa_1409207793:tgl,dg1,adl-p
701 * Wa_1409178076:tgl,dg1,adl-p
702 * Wa_1408979724:tgl,dg1,adl-p
703 * Wa_14010443199:tgl,rkl,dg1,adl-p
704 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
705 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
707 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
708 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
710 /* WaDisableGPGPUMidThreadPreemption:gen12 */
711 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
712 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
713 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
716 * Wa_16011163337 - GS_TIMER
718 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
719 * need to program it even on those that don't explicitly list that
722 * Note that the programming of GEN12_FF_MODE2 is further modified
723 * according to the FF_MODE2 guidance given by Wa_1608008084.
724 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
725 * value when read from the CPU.
727 * The default value for this register is zero for all fields.
728 * So instead of doing a RMW we should just write the desired values
729 * for TDS and GS timers. Note that since the readback can't be trusted,
730 * the clear mask is just set to ~0 to make sure other bits are not
731 * inadvertently set. For the same reason read verification is ignored.
736 FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
741 wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
744 wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
748 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
749 struct i915_wa_list *wal)
751 gen12_ctx_workarounds_init(engine, wal);
754 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
755 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
758 wa_masked_en(wal, HIZ_CHICKEN,
759 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
762 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
763 struct i915_wa_list *wal)
765 dg2_ctx_gt_tuning_init(engine, wal);
767 /* Wa_16013271637:dg2 */
768 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
769 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
771 /* Wa_14014947963:dg2 */
772 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
774 /* Wa_18018764978:dg2 */
775 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
777 /* Wa_18019271663:dg2 */
778 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
781 static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
782 struct i915_wa_list *wal)
784 struct intel_gt *gt = engine->gt;
786 dg2_ctx_gt_tuning_init(engine, wal);
788 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
789 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
790 wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
793 static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
794 struct i915_wa_list *wal)
796 struct intel_gt *gt = engine->gt;
798 xelpg_ctx_gt_tuning_init(engine, wal);
800 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
801 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
803 wa_masked_field_set(wal, VF_PREEMPTION,
804 PREEMPTION_VERTEX_COUNT, 0x4000);
807 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
808 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
811 wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
814 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
818 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
821 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
822 struct i915_wa_list *wal)
825 * This is a "fake" workaround defined by software to ensure we
826 * maintain reliable, backward-compatible behavior for userspace with
827 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
829 * The per-context setting of MI_MODE[12] determines whether the bits
830 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
831 * in the traditional manner or whether they should instead use a new
832 * tgl+ meaning that breaks backward compatibility, but allows nesting
833 * into 3rd-level batchbuffers. When this new capability was first
834 * added in TGL, it remained off by default unless a context
835 * intentionally opted in to the new behavior. However Xe_HPG now
836 * flips this on by default and requires that we explicitly opt out if
837 * we don't want the new behavior.
839 * From a SW perspective, we want to maintain the backward-compatible
840 * behavior for userspace, so we'll apply a fake workaround to set it
841 * back to the legacy behavior on platforms where the hardware default
842 * is to break compatibility. At the moment there is no Linux
843 * userspace that utilizes third-level batchbuffers, so this will avoid
844 * userspace from needing to make any changes. using the legacy
845 * meaning is the correct thing to do. If/when we have userspace
846 * consumers that want to utilize third-level batch nesting, we can
847 * provide a context parameter to allow them to opt-in.
849 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
852 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
853 struct i915_wa_list *wal)
858 * Some blitter commands do not have a field for MOCS, those
859 * commands will use MOCS index pointed by BLIT_CCTL.
860 * BLIT_CCTL registers are needed to be programmed to un-cached.
862 if (engine->class == COPY_ENGINE_CLASS) {
863 mocs = engine->gt->mocs.uc_index;
864 wa_write_clr_set(wal,
865 BLIT_CCTL(engine->mmio_base),
867 BLIT_CCTL_MOCS(mocs, mocs));
872 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
873 * defined by the hardware team, but it programming general context registers.
874 * Adding those context register programming in context workaround
875 * allow us to use the wa framework for proper application and validation.
878 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
879 struct i915_wa_list *wal)
881 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
882 fakewa_disable_nestedbb_mode(engine, wal);
884 gen12_ctx_gt_mocs_init(engine, wal);
888 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
889 struct i915_wa_list *wal,
892 struct drm_i915_private *i915 = engine->i915;
894 wa_init_start(wal, engine->gt, name, engine->name);
896 /* Applies to all engines */
898 * Fake workarounds are not the actual workaround but
899 * programming of context registers using workaround framework.
901 if (GRAPHICS_VER(i915) >= 12)
902 gen12_ctx_gt_fake_wa_init(engine, wal);
904 if (engine->class != RENDER_CLASS)
907 if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
908 xelpg_ctx_workarounds_init(engine, wal);
909 else if (IS_PONTEVECCHIO(i915))
910 ; /* noop; none at this time */
911 else if (IS_DG2(i915))
912 dg2_ctx_workarounds_init(engine, wal);
913 else if (IS_XEHPSDV(i915))
914 ; /* noop; none at this time */
915 else if (IS_DG1(i915))
916 dg1_ctx_workarounds_init(engine, wal);
917 else if (GRAPHICS_VER(i915) == 12)
918 gen12_ctx_workarounds_init(engine, wal);
919 else if (GRAPHICS_VER(i915) == 11)
920 icl_ctx_workarounds_init(engine, wal);
921 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
922 cfl_ctx_workarounds_init(engine, wal);
923 else if (IS_GEMINILAKE(i915))
924 glk_ctx_workarounds_init(engine, wal);
925 else if (IS_KABYLAKE(i915))
926 kbl_ctx_workarounds_init(engine, wal);
927 else if (IS_BROXTON(i915))
928 bxt_ctx_workarounds_init(engine, wal);
929 else if (IS_SKYLAKE(i915))
930 skl_ctx_workarounds_init(engine, wal);
931 else if (IS_CHERRYVIEW(i915))
932 chv_ctx_workarounds_init(engine, wal);
933 else if (IS_BROADWELL(i915))
934 bdw_ctx_workarounds_init(engine, wal);
935 else if (GRAPHICS_VER(i915) == 7)
936 gen7_ctx_workarounds_init(engine, wal);
937 else if (GRAPHICS_VER(i915) == 6)
938 gen6_ctx_workarounds_init(engine, wal);
939 else if (GRAPHICS_VER(i915) < 8)
942 MISSING_CASE(GRAPHICS_VER(i915));
948 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
950 __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
953 int intel_engine_emit_ctx_wa(struct i915_request *rq)
955 struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
956 struct intel_uncore *uncore = rq->engine->uncore;
957 enum forcewake_domains fw;
967 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
971 cs = intel_ring_begin(rq, (wal->count * 2 + 2));
975 fw = wal_get_fw_for_rmw(uncore, wal);
977 intel_gt_mcr_lock(wal->gt, &flags);
978 spin_lock(&uncore->lock);
979 intel_uncore_forcewake_get__locked(uncore, fw);
981 *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
982 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
985 /* Skip reading the register if it's not really needed */
986 if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
990 intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
991 intel_uncore_read_fw(uncore, wa->reg);
996 *cs++ = i915_mmio_reg_offset(wa->reg);
1001 intel_uncore_forcewake_put__locked(uncore, fw);
1002 spin_unlock(&uncore->lock);
1003 intel_gt_mcr_unlock(wal->gt, flags);
1005 intel_ring_advance(rq, cs);
1007 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1015 gen4_gt_workarounds_init(struct intel_gt *gt,
1016 struct i915_wa_list *wal)
1018 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
1019 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
1023 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1025 gen4_gt_workarounds_init(gt, wal);
1027 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
1028 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
1032 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1034 g4x_gt_workarounds_init(gt, wal);
1036 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
1040 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1045 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1047 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
1049 GEN7_COMMON_SLICE_CHICKEN1,
1050 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
1052 /* WaApplyL3ControlAndL3ChickenMode:ivb */
1053 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
1054 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
1056 /* WaForceL3Serialization:ivb */
1057 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1061 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1063 /* WaForceL3Serialization:vlv */
1064 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1067 * WaIncreaseL3CreditsForVLVB0:vlv
1068 * This is the hardware default actually.
1070 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
1074 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1076 /* L3 caching of data atomics doesn't work -- disable it. */
1077 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
1080 HSW_ROW_CHICKEN3, 0,
1081 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
1082 0 /* XXX does this reg exist? */, true);
1084 /* WaVSRefCountFullforceMissDisable:hsw */
1085 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
1089 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
1091 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
1092 unsigned int slice, subslice;
1095 GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
1098 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
1099 * Before any MMIO read into slice/subslice specific registers, MCR
1100 * packet control register needs to be programmed to point to any
1101 * enabled s/ss pair. Otherwise, incorrect values will be returned.
1102 * This means each subsequent MMIO read will be forwarded to an
1103 * specific s/ss combination, but this is OK since these registers
1104 * are consistent across s/ss in almost all cases. In the rare
1105 * occasions, such as INSTDONE, where this value is dependent
1106 * on s/ss combo, the read should be done with read_subslice_reg.
1108 slice = ffs(sseu->slice_mask) - 1;
1109 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
1110 subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
1111 GEM_BUG_ON(!subslice);
1115 * We use GEN8_MCR..() macros to calculate the |mcr| value for
1116 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
1118 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1119 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1121 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
1123 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
1127 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1129 struct drm_i915_private *i915 = gt->i915;
1131 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
1132 gen9_wa_init_mcr(i915, wal);
1134 /* WaDisableKillLogic:bxt,skl,kbl */
1135 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
1140 if (HAS_LLC(i915)) {
1141 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1143 * Must match Display Engine. See
1144 * WaCompressedResourceDisplayNewHashMode.
1148 MMCD_PCLA | MMCD_HOTSPOT_EN);
1151 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1154 BDW_DISABLE_HDC_INVALIDATION);
1158 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1160 gen9_gt_workarounds_init(gt, wal);
1162 /* WaDisableGafsUnitClkGating:skl */
1165 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1167 /* WaInPlaceDecompressionHang:skl */
1168 if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1170 GEN9_GAMT_ECO_REG_RW_IA,
1171 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1175 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1177 gen9_gt_workarounds_init(gt, wal);
1179 /* WaDisableDynamicCreditSharing:kbl */
1180 if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1183 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1185 /* WaDisableGafsUnitClkGating:kbl */
1188 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1190 /* WaInPlaceDecompressionHang:kbl */
1192 GEN9_GAMT_ECO_REG_RW_IA,
1193 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1197 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1199 gen9_gt_workarounds_init(gt, wal);
1203 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1205 gen9_gt_workarounds_init(gt, wal);
1207 /* WaDisableGafsUnitClkGating:cfl */
1210 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1212 /* WaInPlaceDecompressionHang:cfl */
1214 GEN9_GAMT_ECO_REG_RW_IA,
1215 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1218 static void __set_mcr_steering(struct i915_wa_list *wal,
1219 i915_reg_t steering_reg,
1220 unsigned int slice, unsigned int subslice)
1224 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1225 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1227 wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
1230 static void debug_dump_steering(struct intel_gt *gt)
1232 struct drm_printer p = drm_debug_printer("MCR Steering:");
1234 if (drm_debug_enabled(DRM_UT_DRIVER))
1235 intel_gt_mcr_report_steering(&p, gt, false);
1238 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1239 unsigned int slice, unsigned int subslice)
1241 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1243 gt->default_steering.groupid = slice;
1244 gt->default_steering.instanceid = subslice;
1246 debug_dump_steering(gt);
1250 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1252 const struct sseu_dev_info *sseu = >->info.sseu;
1253 unsigned int subslice;
1255 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1256 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1259 * Although a platform may have subslices, we need to always steer
1260 * reads to the lowest instance that isn't fused off. When Render
1261 * Power Gating is enabled, grabbing forcewake will only power up a
1262 * single subslice (the "minconfig") if there isn't a real workload
1263 * that needs to be run; this means that if we steer register reads to
1264 * one of the higher subslices, we run the risk of reading back 0's or
1267 subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
1270 * If the subslice we picked above also steers us to a valid L3 bank,
1271 * then we can just rely on the default steering and won't need to
1272 * worry about explicitly re-steering L3BANK reads later.
1274 if (gt->info.l3bank_mask & BIT(subslice))
1275 gt->steering_table[L3BANK] = NULL;
1277 __add_mcr_wa(gt, wal, 0, subslice);
1281 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1283 const struct sseu_dev_info *sseu = >->info.sseu;
1284 unsigned long slice, subslice = 0, slice_mask = 0;
1289 * On Xe_HP the steering increases in complexity. There are now several
1290 * more units that require steering and we're not guaranteed to be able
1291 * to find a common setting for all of them. These are:
1292 * - GSLICE (fusable)
1293 * - DSS (sub-unit within gslice; fusable)
1294 * - L3 Bank (fusable)
1295 * - MSLICE (fusable)
1296 * - LNCF (sub-unit within mslice; always present if mslice is present)
1298 * We'll do our default/implicit steering based on GSLICE (in the
1299 * sliceid field) and DSS (in the subsliceid field). If we can
1300 * find overlap between the valid MSLICE and/or LNCF values with
1301 * a suitable GSLICE, then we can just re-use the default value and
1302 * skip and explicit steering at runtime.
1304 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1305 * a valid sliceid value. DSS steering is the only type of steering
1306 * that utilizes the 'subsliceid' bits.
1308 * Also note that, even though the steering domain is called "GSlice"
1309 * and it is encoded in the register using the gslice format, the spec
1310 * says that the combined (geometry | compute) fuse should be used to
1311 * select the steering.
1314 /* Find the potential gslice candidates */
1315 slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
1316 GEN_DSS_PER_GSLICE);
1319 * Find the potential LNCF candidates. Either LNCF within a valid
1322 for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)
1323 lncf_mask |= (0x3 << (i * 2));
1326 * Are there any sliceid values that work for both GSLICE and LNCF
1329 if (slice_mask & lncf_mask) {
1330 slice_mask &= lncf_mask;
1331 gt->steering_table[LNCF] = NULL;
1334 /* How about sliceid values that also work for MSLICE steering? */
1335 if (slice_mask & gt->info.mslice_mask) {
1336 slice_mask &= gt->info.mslice_mask;
1337 gt->steering_table[MSLICE] = NULL;
1340 if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0))
1341 gt->steering_table[GAM] = NULL;
1343 slice = __ffs(slice_mask);
1344 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
1347 __add_mcr_wa(gt, wal, slice, subslice);
1350 * SQIDI ranges are special because they use different steering
1351 * registers than everything else we work with. On XeHP SDV and
1352 * DG2-G10, any value in the steering registers will work fine since
1353 * all instances are present, but DG2-G11 only has SQIDI instances at
1354 * ID's 2 and 3, so we need to steer to one of those. For simplicity
1355 * we'll just steer to a hardcoded "2" since that value will work
1358 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
1359 __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
1362 * On DG2, GAM registers have a dedicated steering control register
1363 * and must always be programmed to a hardcoded groupid of "1."
1365 if (IS_DG2(gt->i915))
1366 __set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
1370 pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1375 * Setup implicit steering for COMPUTE and DSS ranges to the first
1376 * non-fused-off DSS. All other types of MCR registers will be
1377 * explicitly steered.
1379 dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0);
1380 __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
1384 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1386 struct drm_i915_private *i915 = gt->i915;
1388 icl_wa_init_mcr(gt, wal);
1390 /* WaModifyGamTlbPartitioning:icl */
1391 wa_write_clr_set(wal,
1392 GEN11_GACB_PERF_CTRL,
1393 GEN11_HASH_CTRL_MASK,
1394 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1396 /* Wa_1405766107:icl
1397 * Formerly known as WaCL2SFHalfMaxAlloc
1401 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1402 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1405 * Formerly known as WaDisCtxReload
1408 GEN8_GAMW_ECO_DEV_RW_IA,
1409 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1411 /* Wa_1406463099:icl
1412 * Formerly known as WaGamTlbPendError
1416 GAMT_CHKN_DISABLE_L3_COH_PIPE);
1419 * Wa_1408615072:icl,ehl (vsunit)
1420 * Wa_1407596294:icl,ehl (hsunit)
1422 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1423 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1425 /* Wa_1407352427:icl,ehl */
1426 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1427 PSDUNIT_CLKGATE_DIS);
1429 /* Wa_1406680159:icl,ehl */
1430 wa_mcr_write_or(wal,
1431 GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1432 GWUNIT_CLKGATE_DIS);
1434 /* Wa_1607087056:icl,ehl,jsl */
1435 if (IS_ICELAKE(i915) ||
1436 ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1437 IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
1439 GEN11_SLICE_UNIT_LEVEL_CLKGATE,
1440 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1443 * This is not a documented workaround, but rather an optimization
1444 * to reduce sampler power.
1446 wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1450 * Though there are per-engine instances of these registers,
1451 * they retain their value through engine resets and should
1452 * only be provided on the GT workaround list rather than
1453 * the engine-specific workaround list.
1456 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1458 struct intel_engine_cs *engine;
1461 for_each_engine(engine, gt, id) {
1462 if (engine->class != VIDEO_DECODE_CLASS ||
1463 (engine->instance % 2))
1466 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1467 IECPUNIT_CLKGATE_DIS);
1472 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1474 icl_wa_init_mcr(gt, wal);
1476 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1477 wa_14011060649(gt, wal);
1479 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1480 wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1485 * Firmware on some gen12 platforms locks the MISCCPCTL register,
1486 * preventing i915 from modifying it for this workaround. Skip the
1487 * readback verification for this workaround on debug builds; if the
1488 * workaround doesn't stick due to firmware behavior, it's not an error
1489 * that we want CI to flag.
1491 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1496 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1498 gen12_gt_workarounds_init(gt, wal);
1500 /* Wa_1409420604:dg1 */
1501 wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
1502 CPSSUNIT_CLKGATE_DIS);
1504 /* Wa_1408615072:dg1 */
1505 /* Empirical testing shows this register is unaffected by engine reset. */
1506 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
1510 xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1512 struct drm_i915_private *i915 = gt->i915;
1514 xehp_init_mcr(gt, wal);
1516 /* Wa_1409757795:xehpsdv */
1517 wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
1519 /* Wa_18011725039:xehpsdv */
1520 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
1521 wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
1522 wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
1525 /* Wa_16011155590:xehpsdv */
1526 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1527 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1528 TSGUNIT_CLKGATE_DIS);
1530 /* Wa_14011780169:xehpsdv */
1531 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
1532 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
1533 GAMTLBVDBOX7_CLKGATE_DIS |
1534 GAMTLBVDBOX6_CLKGATE_DIS |
1535 GAMTLBVDBOX5_CLKGATE_DIS |
1536 GAMTLBVDBOX4_CLKGATE_DIS |
1537 GAMTLBVDBOX3_CLKGATE_DIS |
1538 GAMTLBVDBOX2_CLKGATE_DIS |
1539 GAMTLBVDBOX1_CLKGATE_DIS |
1540 GAMTLBVDBOX0_CLKGATE_DIS |
1541 GAMTLBKCR_CLKGATE_DIS |
1542 GAMTLBGUC_CLKGATE_DIS |
1543 GAMTLBBLT_CLKGATE_DIS);
1544 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
1545 GAMTLBGFXA1_CLKGATE_DIS |
1546 GAMTLBCOMPA0_CLKGATE_DIS |
1547 GAMTLBCOMPA1_CLKGATE_DIS |
1548 GAMTLBCOMPB0_CLKGATE_DIS |
1549 GAMTLBCOMPB1_CLKGATE_DIS |
1550 GAMTLBCOMPC0_CLKGATE_DIS |
1551 GAMTLBCOMPC1_CLKGATE_DIS |
1552 GAMTLBCOMPD0_CLKGATE_DIS |
1553 GAMTLBCOMPD1_CLKGATE_DIS |
1554 GAMTLBMERT_CLKGATE_DIS |
1555 GAMTLBVEBOX3_CLKGATE_DIS |
1556 GAMTLBVEBOX2_CLKGATE_DIS |
1557 GAMTLBVEBOX1_CLKGATE_DIS |
1558 GAMTLBVEBOX0_CLKGATE_DIS);
1561 /* Wa_16012725990:xehpsdv */
1562 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
1563 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
1565 /* Wa_14011060649:xehpsdv */
1566 wa_14011060649(gt, wal);
1568 /* Wa_14012362059:xehpsdv */
1569 wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
1571 /* Wa_14014368820:xehpsdv */
1572 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1573 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1575 /* Wa_14010670810:xehpsdv */
1576 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1580 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1582 xehp_init_mcr(gt, wal);
1584 /* Wa_14011060649:dg2 */
1585 wa_14011060649(gt, wal);
1587 if (IS_DG2_G10(gt->i915)) {
1588 /* Wa_22010523718:dg2 */
1589 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1590 CG3DDISCFEG_CLKGATE_DIS);
1592 /* Wa_14011006942:dg2 */
1593 wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1594 DSS_ROUTER_CLKGATE_DIS);
1597 /* Wa_14014830051:dg2 */
1598 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1602 * Skip verification for possibly locked register.
1604 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1607 /* Wa_18018781329 */
1608 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1609 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1610 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1611 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1613 /* Wa_1509235366:dg2 */
1614 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1615 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1617 /* Wa_14010648519:dg2 */
1618 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1622 pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1624 pvc_init_mcr(gt, wal);
1626 /* Wa_14015795083 */
1627 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1629 /* Wa_18018781329 */
1630 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1631 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1632 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1633 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1635 /* Wa_16016694945 */
1636 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
1640 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1642 /* Wa_14018778641 / Wa_18018781329 */
1643 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1645 /* Wa_22016670082 */
1646 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1648 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
1649 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
1650 /* Wa_14014830051 */
1651 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1653 /* Wa_14015795083 */
1654 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1658 * Unlike older platforms, we no longer setup implicit steering here;
1659 * all MCR accesses are explicitly steered.
1661 debug_dump_steering(gt);
1665 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1671 * Note that although these registers are MCR on the primary
1672 * GT, the media GT's versions are regular singleton registers.
1674 wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
1676 debug_dump_steering(gt);
1680 * The bspec performance guide has recommended MMIO tuning settings. These
1681 * aren't truly "workarounds" but we want to program them through the
1682 * workaround infrastructure to make sure they're (re)applied at the proper
1685 * The programming in this function is for settings that persist through
1686 * engine resets and also are not part of any engine's register state context.
1687 * I.e., settings that only need to be re-applied in the event of a full GT
1690 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
1692 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
1693 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1694 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1697 if (IS_PONTEVECCHIO(gt->i915)) {
1698 wa_mcr_write(wal, XEHPC_L3SCRUB,
1699 SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
1700 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
1703 if (IS_DG2(gt->i915)) {
1704 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1705 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1710 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1712 struct drm_i915_private *i915 = gt->i915;
1714 gt_tuning_settings(gt, wal);
1716 if (gt->type == GT_MEDIA) {
1717 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0))
1718 xelpmp_gt_workarounds_init(gt, wal);
1720 MISSING_CASE(MEDIA_VER_FULL(i915));
1725 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
1726 xelpg_gt_workarounds_init(gt, wal);
1727 else if (IS_PONTEVECCHIO(i915))
1728 pvc_gt_workarounds_init(gt, wal);
1729 else if (IS_DG2(i915))
1730 dg2_gt_workarounds_init(gt, wal);
1731 else if (IS_XEHPSDV(i915))
1732 xehpsdv_gt_workarounds_init(gt, wal);
1733 else if (IS_DG1(i915))
1734 dg1_gt_workarounds_init(gt, wal);
1735 else if (GRAPHICS_VER(i915) == 12)
1736 gen12_gt_workarounds_init(gt, wal);
1737 else if (GRAPHICS_VER(i915) == 11)
1738 icl_gt_workarounds_init(gt, wal);
1739 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1740 cfl_gt_workarounds_init(gt, wal);
1741 else if (IS_GEMINILAKE(i915))
1742 glk_gt_workarounds_init(gt, wal);
1743 else if (IS_KABYLAKE(i915))
1744 kbl_gt_workarounds_init(gt, wal);
1745 else if (IS_BROXTON(i915))
1746 gen9_gt_workarounds_init(gt, wal);
1747 else if (IS_SKYLAKE(i915))
1748 skl_gt_workarounds_init(gt, wal);
1749 else if (IS_HASWELL(i915))
1750 hsw_gt_workarounds_init(gt, wal);
1751 else if (IS_VALLEYVIEW(i915))
1752 vlv_gt_workarounds_init(gt, wal);
1753 else if (IS_IVYBRIDGE(i915))
1754 ivb_gt_workarounds_init(gt, wal);
1755 else if (GRAPHICS_VER(i915) == 6)
1756 snb_gt_workarounds_init(gt, wal);
1757 else if (GRAPHICS_VER(i915) == 5)
1758 ilk_gt_workarounds_init(gt, wal);
1759 else if (IS_G4X(i915))
1760 g4x_gt_workarounds_init(gt, wal);
1761 else if (GRAPHICS_VER(i915) == 4)
1762 gen4_gt_workarounds_init(gt, wal);
1763 else if (GRAPHICS_VER(i915) <= 8)
1766 MISSING_CASE(GRAPHICS_VER(i915));
1769 void intel_gt_init_workarounds(struct intel_gt *gt)
1771 struct i915_wa_list *wal = >->wa_list;
1773 wa_init_start(wal, gt, "GT", "global");
1774 gt_init_workarounds(gt, wal);
1775 wa_init_finish(wal);
1779 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
1780 const char *name, const char *from)
1782 if ((cur ^ wa->set) & wa->read) {
1783 drm_err(>->i915->drm,
1784 "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1785 name, from, i915_mmio_reg_offset(wa->reg),
1786 cur, cur & wa->read, wa->set & wa->read);
1794 static void wa_list_apply(const struct i915_wa_list *wal)
1796 struct intel_gt *gt = wal->gt;
1797 struct intel_uncore *uncore = gt->uncore;
1798 enum forcewake_domains fw;
1799 unsigned long flags;
1806 fw = wal_get_fw_for_rmw(uncore, wal);
1808 intel_gt_mcr_lock(gt, &flags);
1809 spin_lock(&uncore->lock);
1810 intel_uncore_forcewake_get__locked(uncore, fw);
1812 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1815 /* open-coded rmw due to steering */
1818 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1819 intel_uncore_read_fw(uncore, wa->reg);
1820 val = (old & ~wa->clr) | wa->set;
1821 if (val != old || !wa->clr) {
1823 intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
1825 intel_uncore_write_fw(uncore, wa->reg, val);
1828 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1829 u32 val = wa->is_mcr ?
1830 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1831 intel_uncore_read_fw(uncore, wa->reg);
1833 wa_verify(gt, wa, val, wal->name, "application");
1837 intel_uncore_forcewake_put__locked(uncore, fw);
1838 spin_unlock(&uncore->lock);
1839 intel_gt_mcr_unlock(gt, flags);
1842 void intel_gt_apply_workarounds(struct intel_gt *gt)
1844 wa_list_apply(>->wa_list);
1847 static bool wa_list_verify(struct intel_gt *gt,
1848 const struct i915_wa_list *wal,
1851 struct intel_uncore *uncore = gt->uncore;
1853 enum forcewake_domains fw;
1854 unsigned long flags;
1858 fw = wal_get_fw_for_rmw(uncore, wal);
1860 intel_gt_mcr_lock(gt, &flags);
1861 spin_lock(&uncore->lock);
1862 intel_uncore_forcewake_get__locked(uncore, fw);
1864 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1865 ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
1866 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1867 intel_uncore_read_fw(uncore, wa->reg),
1870 intel_uncore_forcewake_put__locked(uncore, fw);
1871 spin_unlock(&uncore->lock);
1872 intel_gt_mcr_unlock(gt, flags);
1877 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1879 return wa_list_verify(gt, >->wa_list, from);
1883 static bool is_nonpriv_flags_valid(u32 flags)
1885 /* Check only valid flag bits are set */
1886 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1889 /* NB: Only 3 out of 4 enum values are valid for access field */
1890 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1891 RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1898 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1900 struct i915_wa wa = {
1904 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1907 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1910 wa.reg.reg |= flags;
1915 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
1917 struct i915_wa wa = {
1922 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1925 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1928 wa.mcr_reg.reg |= flags;
1933 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1935 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1939 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
1941 whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1944 static void gen9_whitelist_build(struct i915_wa_list *w)
1946 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1947 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1949 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1950 whitelist_reg(w, GEN8_CS_CHICKEN1);
1952 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1953 whitelist_reg(w, GEN8_HDC_CHICKEN1);
1955 /* WaSendPushConstantsFromMMIO:skl,bxt */
1956 whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1959 static void skl_whitelist_build(struct intel_engine_cs *engine)
1961 struct i915_wa_list *w = &engine->whitelist;
1963 if (engine->class != RENDER_CLASS)
1966 gen9_whitelist_build(w);
1968 /* WaDisableLSQCROPERFforOCL:skl */
1969 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1972 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1974 if (engine->class != RENDER_CLASS)
1977 gen9_whitelist_build(&engine->whitelist);
1980 static void kbl_whitelist_build(struct intel_engine_cs *engine)
1982 struct i915_wa_list *w = &engine->whitelist;
1984 if (engine->class != RENDER_CLASS)
1987 gen9_whitelist_build(w);
1989 /* WaDisableLSQCROPERFforOCL:kbl */
1990 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1993 static void glk_whitelist_build(struct intel_engine_cs *engine)
1995 struct i915_wa_list *w = &engine->whitelist;
1997 if (engine->class != RENDER_CLASS)
2000 gen9_whitelist_build(w);
2002 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
2003 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2006 static void cfl_whitelist_build(struct intel_engine_cs *engine)
2008 struct i915_wa_list *w = &engine->whitelist;
2010 if (engine->class != RENDER_CLASS)
2013 gen9_whitelist_build(w);
2016 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
2018 * This covers 4 register which are next to one another :
2019 * - PS_INVOCATION_COUNT
2020 * - PS_INVOCATION_COUNT_UDW
2022 * - PS_DEPTH_COUNT_UDW
2024 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2025 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2026 RING_FORCE_TO_NONPRIV_RANGE_4);
2029 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
2031 struct i915_wa_list *w = &engine->whitelist;
2033 if (engine->class != RENDER_CLASS)
2034 whitelist_reg_ext(w,
2035 RING_CTX_TIMESTAMP(engine->mmio_base),
2036 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2039 static void cml_whitelist_build(struct intel_engine_cs *engine)
2041 allow_read_ctx_timestamp(engine);
2043 cfl_whitelist_build(engine);
2046 static void icl_whitelist_build(struct intel_engine_cs *engine)
2048 struct i915_wa_list *w = &engine->whitelist;
2050 allow_read_ctx_timestamp(engine);
2052 switch (engine->class) {
2054 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
2055 whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
2057 /* WaAllowUMDToModifySamplerMode:icl */
2058 whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
2060 /* WaEnableStateCacheRedirectToCS:icl */
2061 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2064 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
2066 * This covers 4 register which are next to one another :
2067 * - PS_INVOCATION_COUNT
2068 * - PS_INVOCATION_COUNT_UDW
2070 * - PS_DEPTH_COUNT_UDW
2072 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2073 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2074 RING_FORCE_TO_NONPRIV_RANGE_4);
2077 case VIDEO_DECODE_CLASS:
2078 /* hucStatusRegOffset */
2079 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
2080 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2081 /* hucUKernelHdrInfoRegOffset */
2082 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
2083 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2084 /* hucStatus2RegOffset */
2085 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
2086 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2094 static void tgl_whitelist_build(struct intel_engine_cs *engine)
2096 struct i915_wa_list *w = &engine->whitelist;
2098 allow_read_ctx_timestamp(engine);
2100 switch (engine->class) {
2103 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
2106 * This covers 4 registers which are next to one another :
2107 * - PS_INVOCATION_COUNT
2108 * - PS_INVOCATION_COUNT_UDW
2110 * - PS_DEPTH_COUNT_UDW
2112 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2113 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2114 RING_FORCE_TO_NONPRIV_RANGE_4);
2118 * Wa_14012131227:dg1
2119 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
2121 whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2123 /* Wa_1806527549:tgl */
2124 whitelist_reg(w, HIZ_CHICKEN);
2126 /* Required by recommended tuning setting (not a workaround) */
2127 whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
2135 static void dg2_whitelist_build(struct intel_engine_cs *engine)
2137 struct i915_wa_list *w = &engine->whitelist;
2139 switch (engine->class) {
2141 /* Required by recommended tuning setting (not a workaround) */
2142 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2150 static void blacklist_trtt(struct intel_engine_cs *engine)
2152 struct i915_wa_list *w = &engine->whitelist;
2155 * Prevent read/write access to [0x4400, 0x4600) which covers
2156 * the TRTT range across all engines. Note that normally userspace
2157 * cannot access the other engines' trtt control, but for simplicity
2158 * we cover the entire range on each engine.
2160 whitelist_reg_ext(w, _MMIO(0x4400),
2161 RING_FORCE_TO_NONPRIV_DENY |
2162 RING_FORCE_TO_NONPRIV_RANGE_64);
2163 whitelist_reg_ext(w, _MMIO(0x4500),
2164 RING_FORCE_TO_NONPRIV_DENY |
2165 RING_FORCE_TO_NONPRIV_RANGE_64);
2168 static void pvc_whitelist_build(struct intel_engine_cs *engine)
2170 /* Wa_16014440446:pvc */
2171 blacklist_trtt(engine);
2174 static void xelpg_whitelist_build(struct intel_engine_cs *engine)
2176 struct i915_wa_list *w = &engine->whitelist;
2178 switch (engine->class) {
2180 /* Required by recommended tuning setting (not a workaround) */
2181 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2189 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
2191 struct drm_i915_private *i915 = engine->i915;
2192 struct i915_wa_list *w = &engine->whitelist;
2194 wa_init_start(w, engine->gt, "whitelist", engine->name);
2196 if (engine->gt->type == GT_MEDIA)
2198 else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
2199 xelpg_whitelist_build(engine);
2200 else if (IS_PONTEVECCHIO(i915))
2201 pvc_whitelist_build(engine);
2202 else if (IS_DG2(i915))
2203 dg2_whitelist_build(engine);
2204 else if (IS_XEHPSDV(i915))
2206 else if (GRAPHICS_VER(i915) == 12)
2207 tgl_whitelist_build(engine);
2208 else if (GRAPHICS_VER(i915) == 11)
2209 icl_whitelist_build(engine);
2210 else if (IS_COMETLAKE(i915))
2211 cml_whitelist_build(engine);
2212 else if (IS_COFFEELAKE(i915))
2213 cfl_whitelist_build(engine);
2214 else if (IS_GEMINILAKE(i915))
2215 glk_whitelist_build(engine);
2216 else if (IS_KABYLAKE(i915))
2217 kbl_whitelist_build(engine);
2218 else if (IS_BROXTON(i915))
2219 bxt_whitelist_build(engine);
2220 else if (IS_SKYLAKE(i915))
2221 skl_whitelist_build(engine);
2222 else if (GRAPHICS_VER(i915) <= 8)
2225 MISSING_CASE(GRAPHICS_VER(i915));
2230 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
2232 const struct i915_wa_list *wal = &engine->whitelist;
2233 struct intel_uncore *uncore = engine->uncore;
2234 const u32 base = engine->mmio_base;
2241 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2242 intel_uncore_write(uncore,
2243 RING_FORCE_TO_NONPRIV(base, i),
2244 i915_mmio_reg_offset(wa->reg));
2246 /* And clear the rest just in case of garbage */
2247 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
2248 intel_uncore_write(uncore,
2249 RING_FORCE_TO_NONPRIV(base, i),
2250 i915_mmio_reg_offset(RING_NOPID(base)));
2254 * engine_fake_wa_init(), a place holder to program the registers
2255 * which are not part of an official workaround defined by the
2257 * Adding programming of those register inside workaround will
2258 * allow utilizing wa framework to proper application and verification.
2261 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2266 * RING_CMD_CCTL specifies the default MOCS entry that will be used
2267 * by the command streamer when executing commands that don't have
2268 * a way to explicitly specify a MOCS setting. The default should
2269 * usually reference whichever MOCS entry corresponds to uncached
2270 * behavior, although use of a WB cached entry is recommended by the
2271 * spec in certain circumstances on specific platforms.
2273 if (GRAPHICS_VER(engine->i915) >= 12) {
2274 mocs_r = engine->gt->mocs.uc_index;
2275 mocs_w = engine->gt->mocs.uc_index;
2277 if (HAS_L3_CCS_READ(engine->i915) &&
2278 engine->class == COMPUTE_CLASS) {
2279 mocs_r = engine->gt->mocs.wb_index;
2282 * Even on the few platforms where MOCS 0 is a
2283 * legitimate table entry, it's never the correct
2284 * setting to use here; we can assume the MOCS init
2285 * just forgot to initialize wb_index.
2287 drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
2290 wa_masked_field_set(wal,
2291 RING_CMD_CCTL(engine->mmio_base),
2293 CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
2298 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2300 struct drm_i915_private *i915 = engine->i915;
2301 struct intel_gt *gt = engine->gt;
2303 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2304 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
2305 /* Wa_22014600077 */
2306 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2307 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
2310 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2311 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2314 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2315 SC_DISABLE_POWER_OPTIMIZATION_EBB);
2318 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2320 /* Wa_22012856258 */
2321 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2322 GEN12_DISABLE_READ_SUPPRESSION);
2327 * Wa_22010960976:dg2
2328 * Wa_14013347512:dg2
2330 wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
2331 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2334 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) ||
2336 /* Wa_14015150844 */
2337 wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
2338 _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
2342 if (IS_DG2_G11(i915) || IS_DG2_G10(i915)) {
2343 /* Wa_22014600077:dg2 */
2344 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
2345 _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
2346 0 /* Wa_14012342262 write-only reg, so skip verification */,
2350 if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2351 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2353 * Wa_1606700617:tgl,dg1,adl-p
2354 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2355 * Wa_14010826681:tgl,dg1,rkl,adl-p
2356 * Wa_18019627453:dg2
2359 GEN9_CS_DEBUG_MODE1,
2360 FF_DOP_CLOCK_GATE_DISABLE);
2363 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2364 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2365 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2366 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2369 * Wa_1407928979:tgl A*
2370 * Wa_18011464164:tgl[B0+],dg1[B0+]
2371 * Wa_22010931296:tgl[B0+],dg1[B0+]
2372 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2374 wa_write_or(wal, GEN7_FF_THREAD_MODE,
2375 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2377 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2378 wa_mcr_masked_en(wal,
2383 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2384 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2386 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2387 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2389 /* Wa_14010229206 */
2390 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2393 if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
2397 * On TGL and RKL there are multiple entries for this WA in the
2398 * BSpec; some indicate this is an A0-only WA, others indicate
2399 * it applies to all steppings so we trust the "all steppings."
2402 RING_PSMI_CTL(RENDER_RING_BASE),
2403 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2404 GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2407 if (GRAPHICS_VER(i915) == 11) {
2408 /* This is not an Wa. Enable for better image quality */
2411 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2415 * Formerly known as WaGAPZPriorityScheme
2419 GEN11_ARBITRATION_PRIO_ORDER_MASK);
2423 * Formerly known as WaL3BankAddressHashing
2425 wa_write_clr_set(wal,
2427 GEN11_HASH_CTRL_EXCL_MASK,
2428 GEN11_HASH_CTRL_EXCL_BIT0);
2429 wa_write_clr_set(wal,
2431 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2432 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2436 * Formerly known as WaDisableCleanEvicts
2438 wa_mcr_write_or(wal,
2440 GEN11_LQSC_CLEAN_EVICT_DISABLE);
2442 /* Wa_1606682166:icl */
2445 GEN7_DISABLE_SAMPLER_PREFETCH);
2447 /* Wa_1409178092:icl */
2448 wa_mcr_write_clr_set(wal,
2450 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2453 /* WaEnable32PlaneMode:icl */
2454 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2455 GEN11_ENABLE_32_PLANE_MODE);
2458 * Wa_1408767742:icl[a2..forever],ehl[all]
2459 * Wa_1605460711:icl[a0..c0]
2462 GEN7_FF_THREAD_MODE,
2463 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2465 /* Wa_22010271021 */
2467 GEN9_CS_DEBUG_MODE1,
2468 FF_DOP_CLOCK_GATE_DISABLE);
2472 * Intel platforms that support fine-grained preemption (i.e., gen9 and
2473 * beyond) allow the kernel-mode driver to choose between two different
2474 * options for controlling preemption granularity and behavior.
2476 * Option 1 (hardware default):
2477 * Preemption settings are controlled in a global manner via
2478 * kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity
2479 * and settings chosen by the kernel-mode driver will apply to all
2480 * userspace clients.
2483 * Preemption settings are controlled on a per-context basis via
2484 * register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on
2485 * context switch and is writable by userspace (e.g., via
2486 * MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
2487 * which allows different userspace drivers/clients to select
2488 * different settings, or to change those settings on the fly in
2489 * response to runtime needs. This option was known by name
2490 * "FtrPerCtxtPreemptionGranularityControl" at one time, although
2491 * that name is somewhat misleading as other non-granularity
2492 * preemption settings are also impacted by this decision.
2494 * On Linux, our policy has always been to let userspace drivers
2495 * control preemption granularity/settings (Option 2). This was
2496 * originally mandatory on gen9 to prevent ABI breakage (old gen9
2497 * userspace developed before object-level preemption was enabled would
2498 * not behave well if i915 were to go with Option 1 and enable that
2499 * preemption in a global manner). On gen9 each context would have
2500 * object-level preemption disabled by default (see
2501 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
2502 * userspace drivers could opt-in to object-level preemption as they
2503 * saw fit. For post-gen9 platforms, we continue to utilize Option 2;
2504 * even though it is no longer necessary for ABI compatibility when
2505 * enabling a new platform, it does ensure that userspace will be able
2506 * to implement any workarounds that show up requiring temporary
2507 * adjustments to preemption behavior at runtime.
2509 * Notes/Workarounds:
2510 * - Wa_14015141709: On DG2 and early steppings of MTL,
2511 * CS_CHICKEN1[0] does not disable object-level preemption as
2512 * it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
2513 * using Option 1). Effectively this means userspace is unable
2514 * to disable object-level preemption on these platforms/steppings
2515 * despite the setting here.
2517 * - Wa_16013994831: May require that userspace program
2518 * CS_CHICKEN1[10] when certain runtime conditions are true.
2519 * Userspace requires Option 2 to be in effect for their update of
2520 * CS_CHICKEN1[10] to be effective.
2522 * Other workarounds may appear in the future that will also require
2523 * Option 2 behavior to allow proper userspace implementation.
2525 if (GRAPHICS_VER(i915) >= 9)
2527 GEN7_FF_SLICE_CS_CHICKEN1,
2528 GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2530 if (IS_SKYLAKE(i915) ||
2531 IS_KABYLAKE(i915) ||
2532 IS_COFFEELAKE(i915) ||
2533 IS_COMETLAKE(i915)) {
2534 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2537 GEN9_GAPS_TSV_CREDIT_DISABLE);
2540 if (IS_BROXTON(i915)) {
2541 /* WaDisablePooledEuLoadBalancingFix:bxt */
2543 FF_SLICE_CS_CHICKEN2,
2544 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2547 if (GRAPHICS_VER(i915) == 9) {
2548 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2550 GEN9_CSFE_CHICKEN1_RCS,
2551 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2553 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2554 wa_mcr_write_or(wal,
2556 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2558 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2559 if (IS_GEN9_LP(i915))
2560 wa_mcr_write_clr_set(wal,
2562 L3_PRIO_CREDITS_MASK,
2563 L3_GENERAL_PRIO_CREDITS(62) |
2564 L3_HIGH_PRIO_CREDITS(2));
2566 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2567 wa_mcr_write_or(wal,
2569 GEN8_LQSC_FLUSH_COHERENT_LINES);
2571 /* Disable atomics in L3 to prevent unrecoverable hangs */
2572 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2573 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2574 wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
2575 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2576 wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
2577 EVICTION_PERF_FIX_ENABLE, 0);
2580 if (IS_HASWELL(i915)) {
2581 /* WaSampleCChickenBitEnable:hsw */
2583 HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2587 /* enable HiZ Raw Stall Optimization */
2588 HIZ_RAW_STALL_OPT_DISABLE);
2591 if (IS_VALLEYVIEW(i915)) {
2592 /* WaDisableEarlyCull:vlv */
2595 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2598 * WaVSThreadDispatchOverride:ivb,vlv
2600 * This actually overrides the dispatch
2601 * mode for all thread types.
2603 wa_write_clr_set(wal,
2604 GEN7_FF_THREAD_MODE,
2606 GEN7_FF_TS_SCHED_HW |
2607 GEN7_FF_VS_SCHED_HW |
2608 GEN7_FF_DS_SCHED_HW);
2610 /* WaPsdDispatchEnable:vlv */
2611 /* WaDisablePSDDualDispatchEnable:vlv */
2613 GEN7_HALF_SLICE_CHICKEN1,
2614 GEN7_MAX_PS_THREAD_DEP |
2615 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2618 if (IS_IVYBRIDGE(i915)) {
2619 /* WaDisableEarlyCull:ivb */
2622 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2624 if (0) { /* causes HiZ corruption on ivb:gt1 */
2625 /* enable HiZ Raw Stall Optimization */
2628 HIZ_RAW_STALL_OPT_DISABLE);
2632 * WaVSThreadDispatchOverride:ivb,vlv
2634 * This actually overrides the dispatch
2635 * mode for all thread types.
2637 wa_write_clr_set(wal,
2638 GEN7_FF_THREAD_MODE,
2640 GEN7_FF_TS_SCHED_HW |
2641 GEN7_FF_VS_SCHED_HW |
2642 GEN7_FF_DS_SCHED_HW);
2644 /* WaDisablePSDDualDispatchEnable:ivb */
2645 if (IS_IVB_GT1(i915))
2647 GEN7_HALF_SLICE_CHICKEN1,
2648 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2651 if (GRAPHICS_VER(i915) == 7) {
2652 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2654 RING_MODE_GEN7(RENDER_RING_BASE),
2655 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2657 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
2658 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
2661 * BSpec says this must be set, even though
2662 * WaDisable4x2SubspanOptimization:ivb,hsw
2663 * WaDisable4x2SubspanOptimization isn't listed for VLV.
2667 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2670 * BSpec recommends 8x4 when MSAA is used,
2671 * however in practice 16x4 seems fastest.
2673 * Note that PS/WM thread counts depend on the WIZ hashing
2674 * disable bit, which we don't touch here, but it's good
2675 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2677 wa_masked_field_set(wal,
2679 GEN6_WIZ_HASHING_MASK,
2680 GEN6_WIZ_HASHING_16x4);
2683 if (IS_GRAPHICS_VER(i915, 6, 7))
2685 * We need to disable the AsyncFlip performance optimisations in
2686 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2687 * already be programmed to '1' on all products.
2689 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2692 RING_MI_MODE(RENDER_RING_BASE),
2693 ASYNC_FLIP_PERF_DISABLE);
2695 if (GRAPHICS_VER(i915) == 6) {
2697 * Required for the hardware to program scanline values for
2699 * WaEnableFlushTlbInvalidationMode:snb
2703 GFX_TLB_INVALIDATE_EXPLICIT);
2705 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2708 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2712 /* WaStripsFansDisableFastClipPerformanceFix:snb */
2713 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2716 * "This bit must be set if 3DSTATE_CLIP clip mode is set
2717 * to normal and 3DSTATE_SF number of SF output attributes
2720 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2723 * BSpec recommends 8x4 when MSAA is used,
2724 * however in practice 16x4 seems fastest.
2726 * Note that PS/WM thread counts depend on the WIZ hashing
2727 * disable bit, which we don't touch here, but it's good
2728 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2730 wa_masked_field_set(wal,
2732 GEN6_WIZ_HASHING_MASK,
2733 GEN6_WIZ_HASHING_16x4);
2735 /* WaDisable_RenderCache_OperationalFlush:snb */
2736 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
2739 * From the Sandybridge PRM, volume 1 part 3, page 24:
2740 * "If this bit is set, STCunit will have LRA as replacement
2741 * policy. [...] This bit must be reset. LRA replacement
2742 * policy is not supported."
2746 CM0_STC_EVICT_DISABLE_LRA_SNB);
2749 if (IS_GRAPHICS_VER(i915, 4, 6))
2750 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2751 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2752 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
2753 /* XXX bit doesn't stick on Broadwater */
2754 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
2756 if (GRAPHICS_VER(i915) == 4)
2758 * Disable CONSTANT_BUFFER before it is loaded from the context
2759 * image. For as it is loaded, it is executed and the stored
2760 * address may no longer be valid, leading to a GPU hang.
2762 * This imposes the requirement that userspace reload their
2763 * CONSTANT_BUFFER on every batch, fortunately a requirement
2764 * they are already accustomed to from before contexts were
2767 wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2768 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2769 0 /* XXX bit doesn't stick on Broadwater */,
2774 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2776 struct drm_i915_private *i915 = engine->i915;
2778 /* WaKBLVECSSemaphoreWaitPoll:kbl */
2779 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2781 RING_SEMA_WAIT_POLL(engine->mmio_base),
2787 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2789 if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
2790 /* Wa_14014999345:pvc */
2791 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
2796 * The bspec performance guide has recommended MMIO tuning settings. These
2797 * aren't truly "workarounds" but we want to program them with the same
2798 * workaround infrastructure to ensure that they're automatically added to
2799 * the GuC save/restore lists, re-applied at the right times, and checked for
2800 * any conflicting programming requested by real workarounds.
2802 * Programming settings should be added here only if their registers are not
2803 * part of an engine's register state context. If a register is part of a
2804 * context, then any tuning settings should be programmed in an appropriate
2805 * function invoked by __intel_engine_init_ctx_wa().
2808 add_render_compute_tuning_settings(struct intel_gt *gt,
2809 struct i915_wa_list *wal)
2811 struct drm_i915_private *i915 = gt->i915;
2813 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
2814 wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
2817 * This tuning setting proves beneficial only on ATS-M designs; the
2818 * default "age based" setting is optimal on regular DG2 and other
2821 if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
2822 wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
2823 THREAD_EX_ARB_MODE_RR_AFTER_DEP);
2825 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
2826 wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
2830 * The workarounds in this function apply to shared registers in
2831 * the general render reset domain that aren't tied to a
2832 * specific engine. Since all render+compute engines get reset
2833 * together, and the contents of these registers are lost during
2834 * the shared render domain reset, we'll define such workarounds
2835 * here and then add them to just a single RCS or CCS engine's
2836 * workaround list (whichever engine has the XXXX flag).
2839 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2841 struct drm_i915_private *i915 = engine->i915;
2842 struct intel_gt *gt = engine->gt;
2844 add_render_compute_tuning_settings(gt, wal);
2846 if (GRAPHICS_VER(i915) >= 11) {
2847 /* This is not a Wa (although referred to as
2848 * WaSetInidrectStateOverride in places), this allows
2849 * applications that reference sampler states through
2850 * the BindlessSamplerStateBaseAddress to have their
2851 * border color relative to DynamicStateBaseAddress
2852 * rather than BindlessSamplerStateBaseAddress.
2854 * Otherwise SAMPLER_STATE border colors have to be
2855 * copied in multiple heaps (DynamicStateBaseAddress &
2856 * BindlessSamplerStateBaseAddress)
2860 wa_mcr_masked_en(wal,
2862 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
2865 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
2866 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
2867 /* Wa_14017856879 */
2868 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
2870 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2871 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2876 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2877 MTL_DISABLE_SAMPLER_SC_OOO);
2879 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2880 /* Wa_22015279794 */
2881 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2882 DISABLE_PREFETCH_INTO_IC);
2884 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2885 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2887 /* Wa_22013037850 */
2888 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
2889 DISABLE_128B_EVICTION_COMMAND_UDW);
2891 /* Wa_18017747507 */
2892 wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
2895 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2896 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2897 IS_PONTEVECCHIO(i915) ||
2899 /* Wa_22014226127 */
2900 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
2903 if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) {
2904 /* Wa_14015227452:dg2,pvc */
2905 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
2907 /* Wa_16015675438:dg2,pvc */
2908 wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
2913 * Wa_16011620976:dg2_g11
2914 * Wa_22015475538:dg2
2916 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
2919 if (IS_DG2_G11(i915)) {
2921 * Wa_22012826095:dg2
2922 * Wa_22013059131:dg2
2924 wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
2926 REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
2928 /* Wa_22013059131:dg2 */
2929 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
2930 FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
2935 * Note that register 0xE420 is write-only and cannot be read
2936 * back for verification on DG2 (due to Wa_14012342262), so
2937 * we need to explicitly skip the readback.
2939 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
2940 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
2941 0 /* write-only, so skip validation */,
2945 if (IS_DG2_G10(i915) || IS_DG2_G12(i915)) {
2946 /* Wa_18028616096 */
2947 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
2950 if (IS_XEHPSDV(i915)) {
2952 wa_mcr_masked_en(wal,
2954 SYSTOLIC_DOP_CLOCK_GATING_DIS);
2957 wa_mcr_masked_en(wal,
2959 GEN12_DISABLE_GRF_CLEAR);
2961 /* Wa_14010449647:xehpsdv */
2962 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
2963 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2968 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2970 if (GRAPHICS_VER(engine->i915) < 4)
2973 engine_fake_wa_init(engine, wal);
2976 * These are common workarounds that just need to applied
2977 * to a single RCS/CCS engine's workaround list since
2978 * they're reset as part of the general render domain reset.
2980 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
2981 general_render_compute_wa_init(engine, wal);
2983 if (engine->class == COMPUTE_CLASS)
2984 ccs_engine_wa_init(engine, wal);
2985 else if (engine->class == RENDER_CLASS)
2986 rcs_engine_wa_init(engine, wal);
2988 xcs_engine_wa_init(engine, wal);
2991 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
2993 struct i915_wa_list *wal = &engine->wa_list;
2995 wa_init_start(wal, engine->gt, "engine", engine->name);
2996 engine_init_workarounds(engine, wal);
2997 wa_init_finish(wal);
3000 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
3002 wa_list_apply(&engine->wa_list);
3005 static const struct i915_range mcr_ranges_gen8[] = {
3006 { .start = 0x5500, .end = 0x55ff },
3007 { .start = 0x7000, .end = 0x7fff },
3008 { .start = 0x9400, .end = 0x97ff },
3009 { .start = 0xb000, .end = 0xb3ff },
3010 { .start = 0xe000, .end = 0xe7ff },
3014 static const struct i915_range mcr_ranges_gen12[] = {
3015 { .start = 0x8150, .end = 0x815f },
3016 { .start = 0x9520, .end = 0x955f },
3017 { .start = 0xb100, .end = 0xb3ff },
3018 { .start = 0xde80, .end = 0xe8ff },
3019 { .start = 0x24a00, .end = 0x24a7f },
3023 static const struct i915_range mcr_ranges_xehp[] = {
3024 { .start = 0x4000, .end = 0x4aff },
3025 { .start = 0x5200, .end = 0x52ff },
3026 { .start = 0x5400, .end = 0x7fff },
3027 { .start = 0x8140, .end = 0x815f },
3028 { .start = 0x8c80, .end = 0x8dff },
3029 { .start = 0x94d0, .end = 0x955f },
3030 { .start = 0x9680, .end = 0x96ff },
3031 { .start = 0xb000, .end = 0xb3ff },
3032 { .start = 0xc800, .end = 0xcfff },
3033 { .start = 0xd800, .end = 0xd8ff },
3034 { .start = 0xdc00, .end = 0xffff },
3035 { .start = 0x17000, .end = 0x17fff },
3036 { .start = 0x24a00, .end = 0x24a7f },
3040 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
3042 const struct i915_range *mcr_ranges;
3045 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
3046 mcr_ranges = mcr_ranges_xehp;
3047 else if (GRAPHICS_VER(i915) >= 12)
3048 mcr_ranges = mcr_ranges_gen12;
3049 else if (GRAPHICS_VER(i915) >= 8)
3050 mcr_ranges = mcr_ranges_gen8;
3055 * Registers in these ranges are affected by the MCR selector
3056 * which only controls CPU initiated MMIO. Routing does not
3057 * work for CS access so we cannot verify them on this path.
3059 for (i = 0; mcr_ranges[i].start; i++)
3060 if (offset >= mcr_ranges[i].start &&
3061 offset <= mcr_ranges[i].end)
3068 wa_list_srm(struct i915_request *rq,
3069 const struct i915_wa_list *wal,
3070 struct i915_vma *vma)
3072 struct drm_i915_private *i915 = rq->i915;
3073 unsigned int i, count = 0;
3074 const struct i915_wa *wa;
3077 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
3078 if (GRAPHICS_VER(i915) >= 8)
3081 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3082 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
3086 cs = intel_ring_begin(rq, 4 * count);
3090 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3091 u32 offset = i915_mmio_reg_offset(wa->reg);
3093 if (mcr_range(i915, offset))
3098 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
3101 intel_ring_advance(rq, cs);
3106 static int engine_wa_list_verify(struct intel_context *ce,
3107 const struct i915_wa_list * const wal,
3110 const struct i915_wa *wa;
3111 struct i915_request *rq;
3112 struct i915_vma *vma;
3113 struct i915_gem_ww_ctx ww;
3121 vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
3122 wal->count * sizeof(u32));
3124 return PTR_ERR(vma);
3126 intel_engine_pm_get(ce->engine);
3127 i915_gem_ww_ctx_init(&ww, false);
3129 err = i915_gem_object_lock(vma->obj, &ww);
3131 err = intel_context_pin_ww(ce, &ww);
3135 err = i915_vma_pin_ww(vma, &ww, 0, 0,
3136 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
3140 rq = i915_request_create(ce);
3146 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3148 err = wa_list_srm(rq, wal, vma);
3150 i915_request_get(rq);
3152 i915_request_set_error_once(rq, err);
3153 i915_request_add(rq);
3158 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
3163 results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
3164 if (IS_ERR(results)) {
3165 err = PTR_ERR(results);
3170 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3171 if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
3174 if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
3178 i915_gem_object_unpin_map(vma->obj);
3181 i915_request_put(rq);
3183 i915_vma_unpin(vma);
3185 intel_context_unpin(ce);
3187 if (err == -EDEADLK) {
3188 err = i915_gem_ww_ctx_backoff(&ww);
3192 i915_gem_ww_ctx_fini(&ww);
3193 intel_engine_pm_put(ce->engine);
3198 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
3201 return engine_wa_list_verify(engine->kernel_context,
3206 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3207 #include "selftest_workarounds.c"