1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2018 Intel Corporation
8 #include "intel_context.h"
9 #include "intel_engine_pm.h"
10 #include "intel_engine_regs.h"
11 #include "intel_gpu_commands.h"
13 #include "intel_gt_ccs_mode.h"
14 #include "intel_gt_mcr.h"
15 #include "intel_gt_print.h"
16 #include "intel_gt_regs.h"
17 #include "intel_ring.h"
18 #include "intel_workarounds.h"
20 #include "display/intel_fbc_regs.h"
23 * DOC: Hardware workarounds
25 * Hardware workarounds are register programming documented to be executed in
26 * the driver that fall outside of the normal programming sequences for a
27 * platform. There are some basic categories of workarounds, depending on
28 * how/when they are applied:
30 * - Context workarounds: workarounds that touch registers that are
31 * saved/restored to/from the HW context image. The list is emitted (via Load
32 * Register Immediate commands) once when initializing the device and saved in
33 * the default context. That default context is then used on every context
34 * creation to have a "primed golden context", i.e. a context image that
35 * already contains the changes needed to all the registers.
37 * Context workarounds should be implemented in the \*_ctx_workarounds_init()
38 * variants respective to the targeted platforms.
40 * - Engine workarounds: the list of these WAs is applied whenever the specific
41 * engine is reset. It's also possible that a set of engine classes share a
42 * common power domain and they are reset together. This happens on some
43 * platforms with render and compute engines. In this case (at least) one of
44 * them need to keeep the workaround programming: the approach taken in the
45 * driver is to tie those workarounds to the first compute/render engine that
46 * is registered. When executing with GuC submission, engine resets are
47 * outside of kernel driver control, hence the list of registers involved in
48 * written once, on engine initialization, and then passed to GuC, that
49 * saves/restores their values before/after the reset takes place. See
50 * ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
52 * Workarounds for registers specific to RCS and CCS should be implemented in
53 * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
54 * registers belonging to BCS, VCS or VECS should be implemented in
55 * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
56 * engine's MMIO range but that are part of of the common RCS/CCS reset domain
57 * should be implemented in general_render_compute_wa_init(). The settings
58 * about the CCS load balancing should be added in ccs_engine_wa_mode().
60 * - GT workarounds: the list of these WAs is applied whenever these registers
61 * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
63 * GT workarounds should be implemented in the \*_gt_workarounds_init()
64 * variants respective to the targeted platforms.
66 * - Register whitelist: some workarounds need to be implemented in userspace,
67 * but need to touch privileged registers. The whitelist in the kernel
68 * instructs the hardware to allow the access to happen. From the kernel side,
69 * this is just a special case of a MMIO workaround (as we write the list of
70 * these to/be-whitelisted registers to some special HW registers).
72 * Register whitelisting should be done in the \*_whitelist_build() variants
73 * respective to the targeted platforms.
75 * - Workaround batchbuffers: buffers that get executed automatically by the
76 * hardware on every HW context restore. These buffers are created and
77 * programmed in the default context so the hardware always go through those
78 * programming sequences when switching contexts. The support for workaround
79 * batchbuffers is enabled these hardware mechanisms:
81 * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
82 * context, pointing the hardware to jump to that location when that offset
83 * is reached in the context restore. Workaround batchbuffer in the driver
84 * currently uses this mechanism for all platforms.
86 * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
87 * pointing the hardware to a buffer to continue executing after the
88 * engine registers are restored in a context restore sequence. This is
89 * currently not used in the driver.
91 * - Other: There are WAs that, due to their nature, cannot be applied from a
92 * central place. Those are peppered around the rest of the code, as needed.
93 * Workarounds related to the display IP are the main example.
95 * .. [1] Technically, some registers are powercontext saved & restored, so they
96 * survive a suspend/resume. In practice, writing them again is not too
97 * costly and simplifies things, so it's the approach taken in the driver.
100 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
101 const char *name, const char *engine_name)
105 wal->engine_name = engine_name;
108 #define WA_LIST_CHUNK (1 << 4)
110 static void wa_init_finish(struct i915_wa_list *wal)
112 /* Trim unused entries. */
113 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
114 struct i915_wa *list = kmemdup(wal->list,
115 wal->count * sizeof(*list),
127 gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n",
128 wal->wa_count, wal->name, wal->engine_name);
131 static enum forcewake_domains
132 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
134 enum forcewake_domains fw = 0;
138 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
139 fw |= intel_uncore_forcewake_for_reg(uncore,
147 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
149 unsigned int addr = i915_mmio_reg_offset(wa->reg);
150 struct drm_i915_private *i915 = wal->gt->i915;
151 unsigned int start = 0, end = wal->count;
152 const unsigned int grow = WA_LIST_CHUNK;
155 GEM_BUG_ON(!is_power_of_2(grow));
157 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
158 struct i915_wa *list;
160 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
163 drm_err(&i915->drm, "No space for workaround init!\n");
168 memcpy(list, wal->list, sizeof(*wa) * wal->count);
175 while (start < end) {
176 unsigned int mid = start + (end - start) / 2;
178 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
180 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
183 wa_ = &wal->list[mid];
185 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
187 "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
188 i915_mmio_reg_offset(wa_->reg),
191 wa_->set &= ~wa->clr;
197 wa_->read |= wa->read;
203 wa_ = &wal->list[wal->count++];
206 while (wa_-- > wal->list) {
207 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
208 i915_mmio_reg_offset(wa_[1].reg));
209 if (i915_mmio_reg_offset(wa_[1].reg) >
210 i915_mmio_reg_offset(wa_[0].reg))
213 swap(wa_[1], wa_[0]);
217 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
218 u32 clear, u32 set, u32 read_mask, bool masked_reg)
220 struct i915_wa wa = {
225 .masked_reg = masked_reg,
231 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
232 u32 clear, u32 set, u32 read_mask, bool masked_reg)
234 struct i915_wa wa = {
239 .masked_reg = masked_reg,
247 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
249 wa_add(wal, reg, clear, set, clear | set, false);
253 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
255 wa_mcr_add(wal, reg, clear, set, clear | set, false);
259 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
261 wa_write_clr_set(wal, reg, ~0, set);
265 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
267 wa_write_clr_set(wal, reg, set, set);
271 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
273 wa_mcr_write_clr_set(wal, reg, set, set);
277 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
279 wa_write_clr_set(wal, reg, clr, 0);
283 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
285 wa_mcr_write_clr_set(wal, reg, clr, 0);
289 * WA operations on "masked register". A masked register has the upper 16 bits
290 * documented as "masked" in b-spec. Its purpose is to allow writing to just a
291 * portion of the register without a rmw: you simply write in the upper 16 bits
292 * the mask of bits you are going to modify.
294 * The wa_masked_* family of functions already does the necessary operations to
295 * calculate the mask based on the parameters passed, so user only has to
296 * provide the lower 16 bits of that register.
300 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
302 wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
306 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
308 wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
312 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
314 wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
318 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
320 wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
324 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
327 wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
331 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
334 wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
337 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
338 struct i915_wa_list *wal)
340 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
343 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
344 struct i915_wa_list *wal)
346 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
349 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
350 struct i915_wa_list *wal)
352 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
354 /* WaDisableAsyncFlipPerfMode:bdw,chv */
355 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
357 /* WaDisablePartialInstShootdown:bdw,chv */
358 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
359 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
361 /* Use Force Non-Coherent whenever executing a 3D context. This is a
362 * workaround for a possible hang in the unlikely event a TLB
363 * invalidation occurs during a PSD flush.
365 /* WaForceEnableNonCoherent:bdw,chv */
366 /* WaHdcDisableFetchWhenMasked:bdw,chv */
367 wa_masked_en(wal, HDC_CHICKEN0,
368 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
369 HDC_FORCE_NON_COHERENT);
371 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
372 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
373 * polygons in the same 8x4 pixel/sample area to be processed without
374 * stalling waiting for the earlier ones to write to Hierarchical Z
377 * This optimization is off by default for BDW and CHV; turn it on.
379 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
381 /* Wa4x4STCOptimizationDisable:bdw,chv */
382 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
385 * BSpec recommends 8x4 when MSAA is used,
386 * however in practice 16x4 seems fastest.
388 * Note that PS/WM thread counts depend on the WIZ hashing
389 * disable bit, which we don't touch here, but it's good
390 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
392 wa_masked_field_set(wal, GEN7_GT_MODE,
393 GEN6_WIZ_HASHING_MASK,
394 GEN6_WIZ_HASHING_16x4);
397 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
398 struct i915_wa_list *wal)
400 struct drm_i915_private *i915 = engine->i915;
402 gen8_ctx_workarounds_init(engine, wal);
404 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
405 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
407 /* WaDisableDopClockGating:bdw
409 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
410 * to disable EUTC clock gating.
412 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
413 DOP_CLOCK_GATING_DISABLE);
415 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
416 GEN8_SAMPLER_POWER_BYPASS_DIS);
418 wa_masked_en(wal, HDC_CHICKEN0,
419 /* WaForceContextSaveRestoreNonCoherent:bdw */
420 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
421 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
422 (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
425 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
426 struct i915_wa_list *wal)
428 gen8_ctx_workarounds_init(engine, wal);
430 /* WaDisableThreadStallDopClockGating:chv */
431 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
433 /* Improve HiZ throughput on CHV. */
434 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
437 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
438 struct i915_wa_list *wal)
440 struct drm_i915_private *i915 = engine->i915;
443 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
445 * Must match Display Engine. See
446 * WaCompressedResourceDisplayNewHashMode.
448 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
449 GEN9_PBE_COMPRESSED_HASH_SELECTION);
450 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
451 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
454 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
455 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
456 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
457 FLOW_CONTROL_ENABLE |
458 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
460 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
461 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
462 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
463 GEN9_ENABLE_YV12_BUGFIX |
464 GEN9_ENABLE_GPGPU_PREEMPTION);
466 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
467 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
468 wa_masked_en(wal, CACHE_MODE_1,
469 GEN8_4x4_STC_OPTIMIZATION_DISABLE |
470 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
472 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
473 wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
474 GEN9_CCS_TLB_PREFETCH_ENABLE);
476 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
477 wa_masked_en(wal, HDC_CHICKEN0,
478 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
479 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
481 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
482 * both tied to WaForceContextSaveRestoreNonCoherent
483 * in some hsds for skl. We keep the tie for all gen9. The
484 * documentation is a bit hazy and so we want to get common behaviour,
485 * even though there is no clear evidence we would need both on kbl/bxt.
486 * This area has been source of system hangs so we play it safe
487 * and mimic the skl regardless of what bspec says.
489 * Use Force Non-Coherent whenever executing a 3D context. This
490 * is a workaround for a possible hang in the unlikely event
491 * a TLB invalidation occurs during a PSD flush.
494 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
495 wa_masked_en(wal, HDC_CHICKEN0,
496 HDC_FORCE_NON_COHERENT);
498 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
499 if (IS_SKYLAKE(i915) ||
501 IS_COFFEELAKE(i915) ||
503 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
504 GEN8_SAMPLER_POWER_BYPASS_DIS);
506 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
507 wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
510 * Supporting preemption with fine-granularity requires changes in the
511 * batch buffer programming. Since we can't break old userspace, we
512 * need to set our default preemption level to safe value. Userspace is
513 * still able to use more fine-grained preemption levels, since in
514 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
515 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
516 * not real HW workarounds, but merely a way to start using preemption
517 * while maintaining old contract with userspace.
520 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
521 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
523 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
524 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
525 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
526 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
528 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
529 if (IS_GEN9_LP(i915))
530 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
533 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
534 struct i915_wa_list *wal)
536 struct intel_gt *gt = engine->gt;
537 u8 vals[3] = { 0, 0, 0 };
540 for (i = 0; i < 3; i++) {
544 * Only consider slices where one, and only one, subslice has 7
547 if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
551 * subslice_7eu[i] != 0 (because of the check above) and
552 * ss_max == 4 (maximum number of subslices possible per slice)
556 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
560 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
563 /* Tune IZ hashing. See intel_device_info_runtime_init() */
564 wa_masked_field_set(wal, GEN7_GT_MODE,
565 GEN9_IZ_HASHING_MASK(2) |
566 GEN9_IZ_HASHING_MASK(1) |
567 GEN9_IZ_HASHING_MASK(0),
568 GEN9_IZ_HASHING(2, vals[2]) |
569 GEN9_IZ_HASHING(1, vals[1]) |
570 GEN9_IZ_HASHING(0, vals[0]));
573 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
574 struct i915_wa_list *wal)
576 gen9_ctx_workarounds_init(engine, wal);
577 skl_tune_iz_hashing(engine, wal);
580 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
581 struct i915_wa_list *wal)
583 gen9_ctx_workarounds_init(engine, wal);
585 /* WaDisableThreadStallDopClockGating:bxt */
586 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
587 STALL_DOP_GATING_DISABLE);
589 /* WaToEnableHwFixForPushConstHWBug:bxt */
590 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
591 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
594 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
595 struct i915_wa_list *wal)
597 struct drm_i915_private *i915 = engine->i915;
599 gen9_ctx_workarounds_init(engine, wal);
601 /* WaToEnableHwFixForPushConstHWBug:kbl */
602 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
603 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
604 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
606 /* WaDisableSbeCacheDispatchPortSharing:kbl */
607 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
608 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
611 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
612 struct i915_wa_list *wal)
614 gen9_ctx_workarounds_init(engine, wal);
616 /* WaToEnableHwFixForPushConstHWBug:glk */
617 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
618 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
621 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
622 struct i915_wa_list *wal)
624 gen9_ctx_workarounds_init(engine, wal);
626 /* WaToEnableHwFixForPushConstHWBug:cfl */
627 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
628 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
630 /* WaDisableSbeCacheDispatchPortSharing:cfl */
631 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
632 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
635 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
636 struct i915_wa_list *wal)
638 /* Wa_1406697149 (WaDisableBankHangMode:icl) */
639 wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
641 /* WaForceEnableNonCoherent:icl
642 * This is not the same workaround as in early Gen9 platforms, where
643 * lacking this could cause system hangs, but coherency performance
644 * overhead is high and only a few compute workloads really need it
645 * (the register is whitelisted in hardware now, so UMDs can opt in
646 * for coherency if they have a good reason).
648 wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
650 /* WaEnableFloatBlendOptimization:icl */
651 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
652 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
653 0 /* write-only, so skip validation */,
656 /* WaDisableGPGPUMidThreadPreemption:icl */
657 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
658 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
659 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
661 /* allow headerless messages for preemptible GPGPU context */
662 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
663 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
665 /* Wa_1604278689:icl,ehl */
666 wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
667 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
671 /* Wa_1406306137:icl,ehl */
672 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
676 * These settings aren't actually workarounds, but general tuning settings that
677 * need to be programmed on dg2 platform.
679 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
680 struct i915_wa_list *wal)
682 wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
683 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
684 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
685 wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
686 FF_MODE2_TDS_TIMER_128);
689 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
690 struct i915_wa_list *wal)
692 struct drm_i915_private *i915 = engine->i915;
695 * Wa_1409142259:tgl,dg1,adl-p
696 * Wa_1409347922:tgl,dg1,adl-p
697 * Wa_1409252684:tgl,dg1,adl-p
698 * Wa_1409217633:tgl,dg1,adl-p
699 * Wa_1409207793:tgl,dg1,adl-p
700 * Wa_1409178076:tgl,dg1,adl-p
701 * Wa_1408979724:tgl,dg1,adl-p
702 * Wa_14010443199:tgl,rkl,dg1,adl-p
703 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
704 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
706 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
707 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
709 /* WaDisableGPGPUMidThreadPreemption:gen12 */
710 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
711 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
712 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
715 * Wa_16011163337 - GS_TIMER
717 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
718 * need to program it even on those that don't explicitly list that
721 * Note that the programming of GEN12_FF_MODE2 is further modified
722 * according to the FF_MODE2 guidance given by Wa_1608008084.
723 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
724 * value when read from the CPU.
726 * The default value for this register is zero for all fields.
727 * So instead of doing a RMW we should just write the desired values
728 * for TDS and GS timers. Note that since the readback can't be trusted,
729 * the clear mask is just set to ~0 to make sure other bits are not
730 * inadvertently set. For the same reason read verification is ignored.
735 FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
740 wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
743 wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
747 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
748 struct i915_wa_list *wal)
750 gen12_ctx_workarounds_init(engine, wal);
753 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
754 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
757 wa_masked_en(wal, HIZ_CHICKEN,
758 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
761 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
762 struct i915_wa_list *wal)
764 dg2_ctx_gt_tuning_init(engine, wal);
766 /* Wa_16013271637:dg2 */
767 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
768 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
770 /* Wa_14014947963:dg2 */
771 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
773 /* Wa_18018764978:dg2 */
774 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
776 /* Wa_18019271663:dg2 */
777 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
779 /* Wa_14019877138:dg2 */
780 wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
783 static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
784 struct i915_wa_list *wal)
786 struct intel_gt *gt = engine->gt;
788 dg2_ctx_gt_tuning_init(engine, wal);
791 * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
792 * gen12_emit_indirect_ctx_rcs() rather than here on some early
795 if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
796 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
797 wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
800 static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
801 struct i915_wa_list *wal)
803 struct intel_gt *gt = engine->gt;
805 xelpg_ctx_gt_tuning_init(engine, wal);
807 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
808 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
810 wa_masked_field_set(wal, VF_PREEMPTION,
811 PREEMPTION_VERTEX_COUNT, 0x4000);
814 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
815 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
818 wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
821 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
825 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
828 wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
831 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
832 struct i915_wa_list *wal)
835 * This is a "fake" workaround defined by software to ensure we
836 * maintain reliable, backward-compatible behavior for userspace with
837 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
839 * The per-context setting of MI_MODE[12] determines whether the bits
840 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
841 * in the traditional manner or whether they should instead use a new
842 * tgl+ meaning that breaks backward compatibility, but allows nesting
843 * into 3rd-level batchbuffers. When this new capability was first
844 * added in TGL, it remained off by default unless a context
845 * intentionally opted in to the new behavior. However Xe_HPG now
846 * flips this on by default and requires that we explicitly opt out if
847 * we don't want the new behavior.
849 * From a SW perspective, we want to maintain the backward-compatible
850 * behavior for userspace, so we'll apply a fake workaround to set it
851 * back to the legacy behavior on platforms where the hardware default
852 * is to break compatibility. At the moment there is no Linux
853 * userspace that utilizes third-level batchbuffers, so this will avoid
854 * userspace from needing to make any changes. using the legacy
855 * meaning is the correct thing to do. If/when we have userspace
856 * consumers that want to utilize third-level batch nesting, we can
857 * provide a context parameter to allow them to opt-in.
859 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
862 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
863 struct i915_wa_list *wal)
868 * Some blitter commands do not have a field for MOCS, those
869 * commands will use MOCS index pointed by BLIT_CCTL.
870 * BLIT_CCTL registers are needed to be programmed to un-cached.
872 if (engine->class == COPY_ENGINE_CLASS) {
873 mocs = engine->gt->mocs.uc_index;
874 wa_write_clr_set(wal,
875 BLIT_CCTL(engine->mmio_base),
877 BLIT_CCTL_MOCS(mocs, mocs));
882 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
883 * defined by the hardware team, but it programming general context registers.
884 * Adding those context register programming in context workaround
885 * allow us to use the wa framework for proper application and validation.
888 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
889 struct i915_wa_list *wal)
891 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
892 fakewa_disable_nestedbb_mode(engine, wal);
894 gen12_ctx_gt_mocs_init(engine, wal);
898 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
899 struct i915_wa_list *wal,
902 struct drm_i915_private *i915 = engine->i915;
904 wa_init_start(wal, engine->gt, name, engine->name);
906 /* Applies to all engines */
908 * Fake workarounds are not the actual workaround but
909 * programming of context registers using workaround framework.
911 if (GRAPHICS_VER(i915) >= 12)
912 gen12_ctx_gt_fake_wa_init(engine, wal);
914 if (engine->class != RENDER_CLASS)
917 if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
918 xelpg_ctx_workarounds_init(engine, wal);
919 else if (IS_DG2(i915))
920 dg2_ctx_workarounds_init(engine, wal);
921 else if (IS_DG1(i915))
922 dg1_ctx_workarounds_init(engine, wal);
923 else if (GRAPHICS_VER(i915) == 12)
924 gen12_ctx_workarounds_init(engine, wal);
925 else if (GRAPHICS_VER(i915) == 11)
926 icl_ctx_workarounds_init(engine, wal);
927 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
928 cfl_ctx_workarounds_init(engine, wal);
929 else if (IS_GEMINILAKE(i915))
930 glk_ctx_workarounds_init(engine, wal);
931 else if (IS_KABYLAKE(i915))
932 kbl_ctx_workarounds_init(engine, wal);
933 else if (IS_BROXTON(i915))
934 bxt_ctx_workarounds_init(engine, wal);
935 else if (IS_SKYLAKE(i915))
936 skl_ctx_workarounds_init(engine, wal);
937 else if (IS_CHERRYVIEW(i915))
938 chv_ctx_workarounds_init(engine, wal);
939 else if (IS_BROADWELL(i915))
940 bdw_ctx_workarounds_init(engine, wal);
941 else if (GRAPHICS_VER(i915) == 7)
942 gen7_ctx_workarounds_init(engine, wal);
943 else if (GRAPHICS_VER(i915) == 6)
944 gen6_ctx_workarounds_init(engine, wal);
945 else if (GRAPHICS_VER(i915) < 8)
948 MISSING_CASE(GRAPHICS_VER(i915));
954 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
956 __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
959 int intel_engine_emit_ctx_wa(struct i915_request *rq)
961 struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
962 struct intel_uncore *uncore = rq->engine->uncore;
963 enum forcewake_domains fw;
973 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
977 cs = intel_ring_begin(rq, (wal->count * 2 + 2));
981 fw = wal_get_fw_for_rmw(uncore, wal);
983 intel_gt_mcr_lock(wal->gt, &flags);
984 spin_lock(&uncore->lock);
985 intel_uncore_forcewake_get__locked(uncore, fw);
987 *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
988 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
991 /* Skip reading the register if it's not really needed */
992 if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
996 intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
997 intel_uncore_read_fw(uncore, wa->reg);
1002 *cs++ = i915_mmio_reg_offset(wa->reg);
1007 intel_uncore_forcewake_put__locked(uncore, fw);
1008 spin_unlock(&uncore->lock);
1009 intel_gt_mcr_unlock(wal->gt, flags);
1011 intel_ring_advance(rq, cs);
1013 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1021 gen4_gt_workarounds_init(struct intel_gt *gt,
1022 struct i915_wa_list *wal)
1024 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
1025 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
1029 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1031 gen4_gt_workarounds_init(gt, wal);
1033 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
1034 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
1038 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1040 g4x_gt_workarounds_init(gt, wal);
1042 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
1046 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1051 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1053 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
1055 GEN7_COMMON_SLICE_CHICKEN1,
1056 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
1058 /* WaApplyL3ControlAndL3ChickenMode:ivb */
1059 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
1060 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
1062 /* WaForceL3Serialization:ivb */
1063 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1067 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1069 /* WaForceL3Serialization:vlv */
1070 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1073 * WaIncreaseL3CreditsForVLVB0:vlv
1074 * This is the hardware default actually.
1076 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
1080 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1082 /* L3 caching of data atomics doesn't work -- disable it. */
1083 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
1086 HSW_ROW_CHICKEN3, 0,
1087 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
1088 0 /* XXX does this reg exist? */, true);
1090 /* WaVSRefCountFullforceMissDisable:hsw */
1091 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
1095 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
1097 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
1098 unsigned int slice, subslice;
1101 GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
1104 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
1105 * Before any MMIO read into slice/subslice specific registers, MCR
1106 * packet control register needs to be programmed to point to any
1107 * enabled s/ss pair. Otherwise, incorrect values will be returned.
1108 * This means each subsequent MMIO read will be forwarded to an
1109 * specific s/ss combination, but this is OK since these registers
1110 * are consistent across s/ss in almost all cases. In the rare
1111 * occasions, such as INSTDONE, where this value is dependent
1112 * on s/ss combo, the read should be done with read_subslice_reg.
1114 slice = ffs(sseu->slice_mask) - 1;
1115 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
1116 subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
1117 GEM_BUG_ON(!subslice);
1121 * We use GEN8_MCR..() macros to calculate the |mcr| value for
1122 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
1124 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1125 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1127 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
1129 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
1133 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1135 struct drm_i915_private *i915 = gt->i915;
1137 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
1138 gen9_wa_init_mcr(i915, wal);
1140 /* WaDisableKillLogic:bxt,skl,kbl */
1141 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
1146 if (HAS_LLC(i915)) {
1147 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1149 * Must match Display Engine. See
1150 * WaCompressedResourceDisplayNewHashMode.
1154 MMCD_PCLA | MMCD_HOTSPOT_EN);
1157 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1160 BDW_DISABLE_HDC_INVALIDATION);
1164 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1166 gen9_gt_workarounds_init(gt, wal);
1168 /* WaDisableGafsUnitClkGating:skl */
1171 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1173 /* WaInPlaceDecompressionHang:skl */
1174 if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1176 GEN9_GAMT_ECO_REG_RW_IA,
1177 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1181 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1183 gen9_gt_workarounds_init(gt, wal);
1185 /* WaDisableDynamicCreditSharing:kbl */
1186 if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1189 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1191 /* WaDisableGafsUnitClkGating:kbl */
1194 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1196 /* WaInPlaceDecompressionHang:kbl */
1198 GEN9_GAMT_ECO_REG_RW_IA,
1199 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1203 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1205 gen9_gt_workarounds_init(gt, wal);
1209 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1211 gen9_gt_workarounds_init(gt, wal);
1213 /* WaDisableGafsUnitClkGating:cfl */
1216 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1218 /* WaInPlaceDecompressionHang:cfl */
1220 GEN9_GAMT_ECO_REG_RW_IA,
1221 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1224 static void __set_mcr_steering(struct i915_wa_list *wal,
1225 i915_reg_t steering_reg,
1226 unsigned int slice, unsigned int subslice)
1230 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1231 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1233 wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
1236 static void debug_dump_steering(struct intel_gt *gt)
1238 struct drm_printer p = drm_dbg_printer(>->i915->drm, DRM_UT_DRIVER,
1241 if (drm_debug_enabled(DRM_UT_DRIVER))
1242 intel_gt_mcr_report_steering(&p, gt, false);
1245 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1246 unsigned int slice, unsigned int subslice)
1248 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1250 gt->default_steering.groupid = slice;
1251 gt->default_steering.instanceid = subslice;
1253 debug_dump_steering(gt);
1257 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1259 const struct sseu_dev_info *sseu = >->info.sseu;
1260 unsigned int subslice;
1262 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1263 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1266 * Although a platform may have subslices, we need to always steer
1267 * reads to the lowest instance that isn't fused off. When Render
1268 * Power Gating is enabled, grabbing forcewake will only power up a
1269 * single subslice (the "minconfig") if there isn't a real workload
1270 * that needs to be run; this means that if we steer register reads to
1271 * one of the higher subslices, we run the risk of reading back 0's or
1274 subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
1277 * If the subslice we picked above also steers us to a valid L3 bank,
1278 * then we can just rely on the default steering and won't need to
1279 * worry about explicitly re-steering L3BANK reads later.
1281 if (gt->info.l3bank_mask & BIT(subslice))
1282 gt->steering_table[L3BANK] = NULL;
1284 __add_mcr_wa(gt, wal, 0, subslice);
1288 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1290 const struct sseu_dev_info *sseu = >->info.sseu;
1291 unsigned long slice, subslice = 0, slice_mask = 0;
1296 * On Xe_HP the steering increases in complexity. There are now several
1297 * more units that require steering and we're not guaranteed to be able
1298 * to find a common setting for all of them. These are:
1299 * - GSLICE (fusable)
1300 * - DSS (sub-unit within gslice; fusable)
1301 * - L3 Bank (fusable)
1302 * - MSLICE (fusable)
1303 * - LNCF (sub-unit within mslice; always present if mslice is present)
1305 * We'll do our default/implicit steering based on GSLICE (in the
1306 * sliceid field) and DSS (in the subsliceid field). If we can
1307 * find overlap between the valid MSLICE and/or LNCF values with
1308 * a suitable GSLICE, then we can just re-use the default value and
1309 * skip and explicit steering at runtime.
1311 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1312 * a valid sliceid value. DSS steering is the only type of steering
1313 * that utilizes the 'subsliceid' bits.
1315 * Also note that, even though the steering domain is called "GSlice"
1316 * and it is encoded in the register using the gslice format, the spec
1317 * says that the combined (geometry | compute) fuse should be used to
1318 * select the steering.
1321 /* Find the potential gslice candidates */
1322 slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
1323 GEN_DSS_PER_GSLICE);
1326 * Find the potential LNCF candidates. Either LNCF within a valid
1329 for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)
1330 lncf_mask |= (0x3 << (i * 2));
1333 * Are there any sliceid values that work for both GSLICE and LNCF
1336 if (slice_mask & lncf_mask) {
1337 slice_mask &= lncf_mask;
1338 gt->steering_table[LNCF] = NULL;
1341 /* How about sliceid values that also work for MSLICE steering? */
1342 if (slice_mask & gt->info.mslice_mask) {
1343 slice_mask &= gt->info.mslice_mask;
1344 gt->steering_table[MSLICE] = NULL;
1347 slice = __ffs(slice_mask);
1348 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
1351 __add_mcr_wa(gt, wal, slice, subslice);
1354 * SQIDI ranges are special because they use different steering
1355 * registers than everything else we work with. On XeHP SDV and
1356 * DG2-G10, any value in the steering registers will work fine since
1357 * all instances are present, but DG2-G11 only has SQIDI instances at
1358 * ID's 2 and 3, so we need to steer to one of those. For simplicity
1359 * we'll just steer to a hardcoded "2" since that value will work
1362 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
1363 __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
1366 * On DG2, GAM registers have a dedicated steering control register
1367 * and must always be programmed to a hardcoded groupid of "1."
1369 if (IS_DG2(gt->i915))
1370 __set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
1374 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1376 struct drm_i915_private *i915 = gt->i915;
1378 icl_wa_init_mcr(gt, wal);
1380 /* WaModifyGamTlbPartitioning:icl */
1381 wa_write_clr_set(wal,
1382 GEN11_GACB_PERF_CTRL,
1383 GEN11_HASH_CTRL_MASK,
1384 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1386 /* Wa_1405766107:icl
1387 * Formerly known as WaCL2SFHalfMaxAlloc
1391 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1392 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1395 * Formerly known as WaDisCtxReload
1398 GEN8_GAMW_ECO_DEV_RW_IA,
1399 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1401 /* Wa_1406463099:icl
1402 * Formerly known as WaGamTlbPendError
1406 GAMT_CHKN_DISABLE_L3_COH_PIPE);
1409 * Wa_1408615072:icl,ehl (vsunit)
1410 * Wa_1407596294:icl,ehl (hsunit)
1412 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1413 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1415 /* Wa_1407352427:icl,ehl */
1416 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1417 PSDUNIT_CLKGATE_DIS);
1419 /* Wa_1406680159:icl,ehl */
1420 wa_mcr_write_or(wal,
1421 GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1422 GWUNIT_CLKGATE_DIS);
1424 /* Wa_1607087056:icl,ehl,jsl */
1425 if (IS_ICELAKE(i915) ||
1426 ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1427 IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
1429 GEN11_SLICE_UNIT_LEVEL_CLKGATE,
1430 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1433 * This is not a documented workaround, but rather an optimization
1434 * to reduce sampler power.
1436 wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1440 * Though there are per-engine instances of these registers,
1441 * they retain their value through engine resets and should
1442 * only be provided on the GT workaround list rather than
1443 * the engine-specific workaround list.
1446 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1448 struct intel_engine_cs *engine;
1451 for_each_engine(engine, gt, id) {
1452 if (engine->class != VIDEO_DECODE_CLASS ||
1453 (engine->instance % 2))
1456 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1457 IECPUNIT_CLKGATE_DIS);
1462 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1464 icl_wa_init_mcr(gt, wal);
1466 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1467 wa_14011060649(gt, wal);
1469 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1470 wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1475 * Firmware on some gen12 platforms locks the MISCCPCTL register,
1476 * preventing i915 from modifying it for this workaround. Skip the
1477 * readback verification for this workaround on debug builds; if the
1478 * workaround doesn't stick due to firmware behavior, it's not an error
1479 * that we want CI to flag.
1481 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1486 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1488 gen12_gt_workarounds_init(gt, wal);
1490 /* Wa_1409420604:dg1 */
1491 wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
1492 CPSSUNIT_CLKGATE_DIS);
1494 /* Wa_1408615072:dg1 */
1495 /* Empirical testing shows this register is unaffected by engine reset. */
1496 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
1500 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1502 xehp_init_mcr(gt, wal);
1504 /* Wa_14011060649:dg2 */
1505 wa_14011060649(gt, wal);
1507 if (IS_DG2_G10(gt->i915)) {
1508 /* Wa_22010523718:dg2 */
1509 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1510 CG3DDISCFEG_CLKGATE_DIS);
1512 /* Wa_14011006942:dg2 */
1513 wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1514 DSS_ROUTER_CLKGATE_DIS);
1517 /* Wa_14014830051:dg2 */
1518 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1522 * Skip verification for possibly locked register.
1524 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1527 /* Wa_18018781329 */
1528 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1529 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1530 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1531 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1533 /* Wa_1509235366:dg2 */
1534 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1535 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1537 /* Wa_14010648519:dg2 */
1538 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1542 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1544 /* Wa_14018575942 / Wa_18018781329 */
1545 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1546 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1548 /* Wa_22016670082 */
1549 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1551 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
1552 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
1553 /* Wa_14014830051 */
1554 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1556 /* Wa_14015795083 */
1557 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1561 * Unlike older platforms, we no longer setup implicit steering here;
1562 * all MCR accesses are explicitly steered.
1564 debug_dump_steering(gt);
1568 wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
1570 struct intel_engine_cs *engine;
1573 for_each_engine(engine, gt, id)
1574 if (engine->class == VIDEO_DECODE_CLASS)
1575 wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
1576 MFXPIPE_CLKGATE_DIS);
1580 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1582 wa_16021867713(gt, wal);
1588 * Note that although these registers are MCR on the primary
1589 * GT, the media GT's versions are regular singleton registers.
1591 wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
1593 /* Wa_22016670082 */
1594 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1596 debug_dump_steering(gt);
1600 * The bspec performance guide has recommended MMIO tuning settings. These
1601 * aren't truly "workarounds" but we want to program them through the
1602 * workaround infrastructure to make sure they're (re)applied at the proper
1605 * The programming in this function is for settings that persist through
1606 * engine resets and also are not part of any engine's register state context.
1607 * I.e., settings that only need to be re-applied in the event of a full GT
1610 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
1612 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
1613 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1614 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1617 if (IS_DG2(gt->i915)) {
1618 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1619 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1624 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1626 struct drm_i915_private *i915 = gt->i915;
1628 gt_tuning_settings(gt, wal);
1630 if (gt->type == GT_MEDIA) {
1631 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0))
1632 xelpmp_gt_workarounds_init(gt, wal);
1634 MISSING_CASE(MEDIA_VER_FULL(i915));
1639 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
1640 xelpg_gt_workarounds_init(gt, wal);
1641 else if (IS_DG2(i915))
1642 dg2_gt_workarounds_init(gt, wal);
1643 else if (IS_DG1(i915))
1644 dg1_gt_workarounds_init(gt, wal);
1645 else if (GRAPHICS_VER(i915) == 12)
1646 gen12_gt_workarounds_init(gt, wal);
1647 else if (GRAPHICS_VER(i915) == 11)
1648 icl_gt_workarounds_init(gt, wal);
1649 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1650 cfl_gt_workarounds_init(gt, wal);
1651 else if (IS_GEMINILAKE(i915))
1652 glk_gt_workarounds_init(gt, wal);
1653 else if (IS_KABYLAKE(i915))
1654 kbl_gt_workarounds_init(gt, wal);
1655 else if (IS_BROXTON(i915))
1656 gen9_gt_workarounds_init(gt, wal);
1657 else if (IS_SKYLAKE(i915))
1658 skl_gt_workarounds_init(gt, wal);
1659 else if (IS_HASWELL(i915))
1660 hsw_gt_workarounds_init(gt, wal);
1661 else if (IS_VALLEYVIEW(i915))
1662 vlv_gt_workarounds_init(gt, wal);
1663 else if (IS_IVYBRIDGE(i915))
1664 ivb_gt_workarounds_init(gt, wal);
1665 else if (GRAPHICS_VER(i915) == 6)
1666 snb_gt_workarounds_init(gt, wal);
1667 else if (GRAPHICS_VER(i915) == 5)
1668 ilk_gt_workarounds_init(gt, wal);
1669 else if (IS_G4X(i915))
1670 g4x_gt_workarounds_init(gt, wal);
1671 else if (GRAPHICS_VER(i915) == 4)
1672 gen4_gt_workarounds_init(gt, wal);
1673 else if (GRAPHICS_VER(i915) <= 8)
1676 MISSING_CASE(GRAPHICS_VER(i915));
1679 void intel_gt_init_workarounds(struct intel_gt *gt)
1681 struct i915_wa_list *wal = >->wa_list;
1683 wa_init_start(wal, gt, "GT", "global");
1684 gt_init_workarounds(gt, wal);
1685 wa_init_finish(wal);
1689 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
1690 const char *name, const char *from)
1692 if ((cur ^ wa->set) & wa->read) {
1694 "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1695 name, from, i915_mmio_reg_offset(wa->reg),
1696 cur, cur & wa->read, wa->set & wa->read);
1704 static void wa_list_apply(const struct i915_wa_list *wal)
1706 struct intel_gt *gt = wal->gt;
1707 struct intel_uncore *uncore = gt->uncore;
1708 enum forcewake_domains fw;
1709 unsigned long flags;
1716 fw = wal_get_fw_for_rmw(uncore, wal);
1718 intel_gt_mcr_lock(gt, &flags);
1719 spin_lock(&uncore->lock);
1720 intel_uncore_forcewake_get__locked(uncore, fw);
1722 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1725 /* open-coded rmw due to steering */
1728 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1729 intel_uncore_read_fw(uncore, wa->reg);
1730 val = (old & ~wa->clr) | wa->set;
1731 if (val != old || !wa->clr) {
1733 intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
1735 intel_uncore_write_fw(uncore, wa->reg, val);
1738 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1739 u32 val = wa->is_mcr ?
1740 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1741 intel_uncore_read_fw(uncore, wa->reg);
1743 wa_verify(gt, wa, val, wal->name, "application");
1747 intel_uncore_forcewake_put__locked(uncore, fw);
1748 spin_unlock(&uncore->lock);
1749 intel_gt_mcr_unlock(gt, flags);
1752 void intel_gt_apply_workarounds(struct intel_gt *gt)
1754 wa_list_apply(>->wa_list);
1757 static bool wa_list_verify(struct intel_gt *gt,
1758 const struct i915_wa_list *wal,
1761 struct intel_uncore *uncore = gt->uncore;
1763 enum forcewake_domains fw;
1764 unsigned long flags;
1768 fw = wal_get_fw_for_rmw(uncore, wal);
1770 intel_gt_mcr_lock(gt, &flags);
1771 spin_lock(&uncore->lock);
1772 intel_uncore_forcewake_get__locked(uncore, fw);
1774 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1775 ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
1776 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1777 intel_uncore_read_fw(uncore, wa->reg),
1780 intel_uncore_forcewake_put__locked(uncore, fw);
1781 spin_unlock(&uncore->lock);
1782 intel_gt_mcr_unlock(gt, flags);
1787 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1789 return wa_list_verify(gt, >->wa_list, from);
1793 static bool is_nonpriv_flags_valid(u32 flags)
1795 /* Check only valid flag bits are set */
1796 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1799 /* NB: Only 3 out of 4 enum values are valid for access field */
1800 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1801 RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1808 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1810 struct i915_wa wa = {
1814 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1817 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1820 wa.reg.reg |= flags;
1825 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
1827 struct i915_wa wa = {
1832 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1835 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1838 wa.mcr_reg.reg |= flags;
1843 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1845 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1849 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
1851 whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1854 static void gen9_whitelist_build(struct i915_wa_list *w)
1856 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1857 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1859 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1860 whitelist_reg(w, GEN8_CS_CHICKEN1);
1862 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1863 whitelist_reg(w, GEN8_HDC_CHICKEN1);
1865 /* WaSendPushConstantsFromMMIO:skl,bxt */
1866 whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1869 static void skl_whitelist_build(struct intel_engine_cs *engine)
1871 struct i915_wa_list *w = &engine->whitelist;
1873 if (engine->class != RENDER_CLASS)
1876 gen9_whitelist_build(w);
1878 /* WaDisableLSQCROPERFforOCL:skl */
1879 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1882 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1884 if (engine->class != RENDER_CLASS)
1887 gen9_whitelist_build(&engine->whitelist);
1890 static void kbl_whitelist_build(struct intel_engine_cs *engine)
1892 struct i915_wa_list *w = &engine->whitelist;
1894 if (engine->class != RENDER_CLASS)
1897 gen9_whitelist_build(w);
1899 /* WaDisableLSQCROPERFforOCL:kbl */
1900 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1903 static void glk_whitelist_build(struct intel_engine_cs *engine)
1905 struct i915_wa_list *w = &engine->whitelist;
1907 if (engine->class != RENDER_CLASS)
1910 gen9_whitelist_build(w);
1912 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1913 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1916 static void cfl_whitelist_build(struct intel_engine_cs *engine)
1918 struct i915_wa_list *w = &engine->whitelist;
1920 if (engine->class != RENDER_CLASS)
1923 gen9_whitelist_build(w);
1926 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1928 * This covers 4 register which are next to one another :
1929 * - PS_INVOCATION_COUNT
1930 * - PS_INVOCATION_COUNT_UDW
1932 * - PS_DEPTH_COUNT_UDW
1934 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1935 RING_FORCE_TO_NONPRIV_ACCESS_RD |
1936 RING_FORCE_TO_NONPRIV_RANGE_4);
1939 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
1941 struct i915_wa_list *w = &engine->whitelist;
1943 if (engine->class != RENDER_CLASS)
1944 whitelist_reg_ext(w,
1945 RING_CTX_TIMESTAMP(engine->mmio_base),
1946 RING_FORCE_TO_NONPRIV_ACCESS_RD);
1949 static void cml_whitelist_build(struct intel_engine_cs *engine)
1951 allow_read_ctx_timestamp(engine);
1953 cfl_whitelist_build(engine);
1956 static void icl_whitelist_build(struct intel_engine_cs *engine)
1958 struct i915_wa_list *w = &engine->whitelist;
1960 allow_read_ctx_timestamp(engine);
1962 switch (engine->class) {
1964 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
1965 whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
1967 /* WaAllowUMDToModifySamplerMode:icl */
1968 whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
1970 /* WaEnableStateCacheRedirectToCS:icl */
1971 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1974 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
1976 * This covers 4 register which are next to one another :
1977 * - PS_INVOCATION_COUNT
1978 * - PS_INVOCATION_COUNT_UDW
1980 * - PS_DEPTH_COUNT_UDW
1982 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1983 RING_FORCE_TO_NONPRIV_ACCESS_RD |
1984 RING_FORCE_TO_NONPRIV_RANGE_4);
1987 case VIDEO_DECODE_CLASS:
1988 /* hucStatusRegOffset */
1989 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1990 RING_FORCE_TO_NONPRIV_ACCESS_RD);
1991 /* hucUKernelHdrInfoRegOffset */
1992 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
1993 RING_FORCE_TO_NONPRIV_ACCESS_RD);
1994 /* hucStatus2RegOffset */
1995 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
1996 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2004 static void tgl_whitelist_build(struct intel_engine_cs *engine)
2006 struct i915_wa_list *w = &engine->whitelist;
2008 allow_read_ctx_timestamp(engine);
2010 switch (engine->class) {
2013 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
2016 * This covers 4 registers which are next to one another :
2017 * - PS_INVOCATION_COUNT
2018 * - PS_INVOCATION_COUNT_UDW
2020 * - PS_DEPTH_COUNT_UDW
2022 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2023 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2024 RING_FORCE_TO_NONPRIV_RANGE_4);
2028 * Wa_14012131227:dg1
2029 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
2031 whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2033 /* Wa_1806527549:tgl */
2034 whitelist_reg(w, HIZ_CHICKEN);
2036 /* Required by recommended tuning setting (not a workaround) */
2037 whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
2045 static void dg2_whitelist_build(struct intel_engine_cs *engine)
2047 struct i915_wa_list *w = &engine->whitelist;
2049 switch (engine->class) {
2051 /* Required by recommended tuning setting (not a workaround) */
2052 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2060 static void xelpg_whitelist_build(struct intel_engine_cs *engine)
2062 struct i915_wa_list *w = &engine->whitelist;
2064 switch (engine->class) {
2066 /* Required by recommended tuning setting (not a workaround) */
2067 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2075 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
2077 struct drm_i915_private *i915 = engine->i915;
2078 struct i915_wa_list *w = &engine->whitelist;
2080 wa_init_start(w, engine->gt, "whitelist", engine->name);
2082 if (engine->gt->type == GT_MEDIA)
2084 else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
2085 xelpg_whitelist_build(engine);
2086 else if (IS_DG2(i915))
2087 dg2_whitelist_build(engine);
2088 else if (GRAPHICS_VER(i915) == 12)
2089 tgl_whitelist_build(engine);
2090 else if (GRAPHICS_VER(i915) == 11)
2091 icl_whitelist_build(engine);
2092 else if (IS_COMETLAKE(i915))
2093 cml_whitelist_build(engine);
2094 else if (IS_COFFEELAKE(i915))
2095 cfl_whitelist_build(engine);
2096 else if (IS_GEMINILAKE(i915))
2097 glk_whitelist_build(engine);
2098 else if (IS_KABYLAKE(i915))
2099 kbl_whitelist_build(engine);
2100 else if (IS_BROXTON(i915))
2101 bxt_whitelist_build(engine);
2102 else if (IS_SKYLAKE(i915))
2103 skl_whitelist_build(engine);
2104 else if (GRAPHICS_VER(i915) <= 8)
2107 MISSING_CASE(GRAPHICS_VER(i915));
2112 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
2114 const struct i915_wa_list *wal = &engine->whitelist;
2115 struct intel_uncore *uncore = engine->uncore;
2116 const u32 base = engine->mmio_base;
2123 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2124 intel_uncore_write(uncore,
2125 RING_FORCE_TO_NONPRIV(base, i),
2126 i915_mmio_reg_offset(wa->reg));
2128 /* And clear the rest just in case of garbage */
2129 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
2130 intel_uncore_write(uncore,
2131 RING_FORCE_TO_NONPRIV(base, i),
2132 i915_mmio_reg_offset(RING_NOPID(base)));
2136 * engine_fake_wa_init(), a place holder to program the registers
2137 * which are not part of an official workaround defined by the
2139 * Adding programming of those register inside workaround will
2140 * allow utilizing wa framework to proper application and verification.
2143 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2148 * RING_CMD_CCTL specifies the default MOCS entry that will be used
2149 * by the command streamer when executing commands that don't have
2150 * a way to explicitly specify a MOCS setting. The default should
2151 * usually reference whichever MOCS entry corresponds to uncached
2152 * behavior, although use of a WB cached entry is recommended by the
2153 * spec in certain circumstances on specific platforms.
2155 if (GRAPHICS_VER(engine->i915) >= 12) {
2156 mocs_r = engine->gt->mocs.uc_index;
2157 mocs_w = engine->gt->mocs.uc_index;
2159 if (HAS_L3_CCS_READ(engine->i915) &&
2160 engine->class == COMPUTE_CLASS) {
2161 mocs_r = engine->gt->mocs.wb_index;
2164 * Even on the few platforms where MOCS 0 is a
2165 * legitimate table entry, it's never the correct
2166 * setting to use here; we can assume the MOCS init
2167 * just forgot to initialize wb_index.
2169 drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
2172 wa_masked_field_set(wal,
2173 RING_CMD_CCTL(engine->mmio_base),
2175 CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
2180 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2182 struct drm_i915_private *i915 = engine->i915;
2183 struct intel_gt *gt = engine->gt;
2185 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2186 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
2187 /* Wa_22014600077 */
2188 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2189 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
2192 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2193 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2196 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2197 SC_DISABLE_POWER_OPTIMIZATION_EBB);
2200 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2202 /* Wa_22012856258 */
2203 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2204 GEN12_DISABLE_READ_SUPPRESSION);
2209 * Wa_22010960976:dg2
2210 * Wa_14013347512:dg2
2212 wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
2213 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2216 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) ||
2218 /* Wa_14015150844 */
2219 wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
2220 _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
2224 if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2225 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2227 * Wa_1606700617:tgl,dg1,adl-p
2228 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2229 * Wa_14010826681:tgl,dg1,rkl,adl-p
2230 * Wa_18019627453:dg2
2233 GEN9_CS_DEBUG_MODE1,
2234 FF_DOP_CLOCK_GATE_DISABLE);
2237 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2238 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2239 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2240 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2243 * Wa_1407928979:tgl A*
2244 * Wa_18011464164:tgl[B0+],dg1[B0+]
2245 * Wa_22010931296:tgl[B0+],dg1[B0+]
2246 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2248 wa_write_or(wal, GEN7_FF_THREAD_MODE,
2249 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2251 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2252 wa_mcr_masked_en(wal,
2257 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2258 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2260 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2261 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2263 /* Wa_14010229206 */
2264 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2267 if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
2271 * On TGL and RKL there are multiple entries for this WA in the
2272 * BSpec; some indicate this is an A0-only WA, others indicate
2273 * it applies to all steppings so we trust the "all steppings."
2276 RING_PSMI_CTL(RENDER_RING_BASE),
2277 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2278 GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2281 if (GRAPHICS_VER(i915) == 11) {
2282 /* This is not an Wa. Enable for better image quality */
2285 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2289 * Formerly known as WaGAPZPriorityScheme
2293 GEN11_ARBITRATION_PRIO_ORDER_MASK);
2297 * Formerly known as WaL3BankAddressHashing
2299 wa_write_clr_set(wal,
2301 GEN11_HASH_CTRL_EXCL_MASK,
2302 GEN11_HASH_CTRL_EXCL_BIT0);
2303 wa_write_clr_set(wal,
2305 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2306 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2310 * Formerly known as WaDisableCleanEvicts
2312 wa_mcr_write_or(wal,
2314 GEN11_LQSC_CLEAN_EVICT_DISABLE);
2316 /* Wa_1606682166:icl */
2319 GEN7_DISABLE_SAMPLER_PREFETCH);
2321 /* Wa_1409178092:icl */
2322 wa_mcr_write_clr_set(wal,
2324 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2327 /* WaEnable32PlaneMode:icl */
2328 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2329 GEN11_ENABLE_32_PLANE_MODE);
2332 * Wa_1408767742:icl[a2..forever],ehl[all]
2333 * Wa_1605460711:icl[a0..c0]
2336 GEN7_FF_THREAD_MODE,
2337 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2339 /* Wa_22010271021 */
2341 GEN9_CS_DEBUG_MODE1,
2342 FF_DOP_CLOCK_GATE_DISABLE);
2346 * Intel platforms that support fine-grained preemption (i.e., gen9 and
2347 * beyond) allow the kernel-mode driver to choose between two different
2348 * options for controlling preemption granularity and behavior.
2350 * Option 1 (hardware default):
2351 * Preemption settings are controlled in a global manner via
2352 * kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity
2353 * and settings chosen by the kernel-mode driver will apply to all
2354 * userspace clients.
2357 * Preemption settings are controlled on a per-context basis via
2358 * register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on
2359 * context switch and is writable by userspace (e.g., via
2360 * MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
2361 * which allows different userspace drivers/clients to select
2362 * different settings, or to change those settings on the fly in
2363 * response to runtime needs. This option was known by name
2364 * "FtrPerCtxtPreemptionGranularityControl" at one time, although
2365 * that name is somewhat misleading as other non-granularity
2366 * preemption settings are also impacted by this decision.
2368 * On Linux, our policy has always been to let userspace drivers
2369 * control preemption granularity/settings (Option 2). This was
2370 * originally mandatory on gen9 to prevent ABI breakage (old gen9
2371 * userspace developed before object-level preemption was enabled would
2372 * not behave well if i915 were to go with Option 1 and enable that
2373 * preemption in a global manner). On gen9 each context would have
2374 * object-level preemption disabled by default (see
2375 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
2376 * userspace drivers could opt-in to object-level preemption as they
2377 * saw fit. For post-gen9 platforms, we continue to utilize Option 2;
2378 * even though it is no longer necessary for ABI compatibility when
2379 * enabling a new platform, it does ensure that userspace will be able
2380 * to implement any workarounds that show up requiring temporary
2381 * adjustments to preemption behavior at runtime.
2383 * Notes/Workarounds:
2384 * - Wa_14015141709: On DG2 and early steppings of MTL,
2385 * CS_CHICKEN1[0] does not disable object-level preemption as
2386 * it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
2387 * using Option 1). Effectively this means userspace is unable
2388 * to disable object-level preemption on these platforms/steppings
2389 * despite the setting here.
2391 * - Wa_16013994831: May require that userspace program
2392 * CS_CHICKEN1[10] when certain runtime conditions are true.
2393 * Userspace requires Option 2 to be in effect for their update of
2394 * CS_CHICKEN1[10] to be effective.
2396 * Other workarounds may appear in the future that will also require
2397 * Option 2 behavior to allow proper userspace implementation.
2399 if (GRAPHICS_VER(i915) >= 9)
2401 GEN7_FF_SLICE_CS_CHICKEN1,
2402 GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2404 if (IS_SKYLAKE(i915) ||
2405 IS_KABYLAKE(i915) ||
2406 IS_COFFEELAKE(i915) ||
2407 IS_COMETLAKE(i915)) {
2408 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2411 GEN9_GAPS_TSV_CREDIT_DISABLE);
2414 if (IS_BROXTON(i915)) {
2415 /* WaDisablePooledEuLoadBalancingFix:bxt */
2417 FF_SLICE_CS_CHICKEN2,
2418 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2421 if (GRAPHICS_VER(i915) == 9) {
2422 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2424 GEN9_CSFE_CHICKEN1_RCS,
2425 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2427 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2428 wa_mcr_write_or(wal,
2430 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2432 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2433 if (IS_GEN9_LP(i915))
2434 wa_mcr_write_clr_set(wal,
2436 L3_PRIO_CREDITS_MASK,
2437 L3_GENERAL_PRIO_CREDITS(62) |
2438 L3_HIGH_PRIO_CREDITS(2));
2440 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2441 wa_mcr_write_or(wal,
2443 GEN8_LQSC_FLUSH_COHERENT_LINES);
2445 /* Disable atomics in L3 to prevent unrecoverable hangs */
2446 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2447 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2448 wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
2449 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2450 wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
2451 EVICTION_PERF_FIX_ENABLE, 0);
2454 if (IS_HASWELL(i915)) {
2455 /* WaSampleCChickenBitEnable:hsw */
2457 HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2461 /* enable HiZ Raw Stall Optimization */
2462 HIZ_RAW_STALL_OPT_DISABLE);
2465 if (IS_VALLEYVIEW(i915)) {
2466 /* WaDisableEarlyCull:vlv */
2469 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2472 * WaVSThreadDispatchOverride:ivb,vlv
2474 * This actually overrides the dispatch
2475 * mode for all thread types.
2477 wa_write_clr_set(wal,
2478 GEN7_FF_THREAD_MODE,
2480 GEN7_FF_TS_SCHED_HW |
2481 GEN7_FF_VS_SCHED_HW |
2482 GEN7_FF_DS_SCHED_HW);
2484 /* WaPsdDispatchEnable:vlv */
2485 /* WaDisablePSDDualDispatchEnable:vlv */
2487 GEN7_HALF_SLICE_CHICKEN1,
2488 GEN7_MAX_PS_THREAD_DEP |
2489 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2492 if (IS_IVYBRIDGE(i915)) {
2493 /* WaDisableEarlyCull:ivb */
2496 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2498 if (0) { /* causes HiZ corruption on ivb:gt1 */
2499 /* enable HiZ Raw Stall Optimization */
2502 HIZ_RAW_STALL_OPT_DISABLE);
2506 * WaVSThreadDispatchOverride:ivb,vlv
2508 * This actually overrides the dispatch
2509 * mode for all thread types.
2511 wa_write_clr_set(wal,
2512 GEN7_FF_THREAD_MODE,
2514 GEN7_FF_TS_SCHED_HW |
2515 GEN7_FF_VS_SCHED_HW |
2516 GEN7_FF_DS_SCHED_HW);
2518 /* WaDisablePSDDualDispatchEnable:ivb */
2519 if (IS_IVB_GT1(i915))
2521 GEN7_HALF_SLICE_CHICKEN1,
2522 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2525 if (GRAPHICS_VER(i915) == 7) {
2526 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2528 RING_MODE_GEN7(RENDER_RING_BASE),
2529 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2531 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
2532 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
2535 * BSpec says this must be set, even though
2536 * WaDisable4x2SubspanOptimization:ivb,hsw
2537 * WaDisable4x2SubspanOptimization isn't listed for VLV.
2541 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2544 * BSpec recommends 8x4 when MSAA is used,
2545 * however in practice 16x4 seems fastest.
2547 * Note that PS/WM thread counts depend on the WIZ hashing
2548 * disable bit, which we don't touch here, but it's good
2549 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2551 wa_masked_field_set(wal,
2553 GEN6_WIZ_HASHING_MASK,
2554 GEN6_WIZ_HASHING_16x4);
2557 if (IS_GRAPHICS_VER(i915, 6, 7))
2559 * We need to disable the AsyncFlip performance optimisations in
2560 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2561 * already be programmed to '1' on all products.
2563 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2566 RING_MI_MODE(RENDER_RING_BASE),
2567 ASYNC_FLIP_PERF_DISABLE);
2569 if (GRAPHICS_VER(i915) == 6) {
2571 * Required for the hardware to program scanline values for
2573 * WaEnableFlushTlbInvalidationMode:snb
2577 GFX_TLB_INVALIDATE_EXPLICIT);
2579 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2582 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2586 /* WaStripsFansDisableFastClipPerformanceFix:snb */
2587 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2590 * "This bit must be set if 3DSTATE_CLIP clip mode is set
2591 * to normal and 3DSTATE_SF number of SF output attributes
2594 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2597 * BSpec recommends 8x4 when MSAA is used,
2598 * however in practice 16x4 seems fastest.
2600 * Note that PS/WM thread counts depend on the WIZ hashing
2601 * disable bit, which we don't touch here, but it's good
2602 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2604 wa_masked_field_set(wal,
2606 GEN6_WIZ_HASHING_MASK,
2607 GEN6_WIZ_HASHING_16x4);
2609 /* WaDisable_RenderCache_OperationalFlush:snb */
2610 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
2613 * From the Sandybridge PRM, volume 1 part 3, page 24:
2614 * "If this bit is set, STCunit will have LRA as replacement
2615 * policy. [...] This bit must be reset. LRA replacement
2616 * policy is not supported."
2620 CM0_STC_EVICT_DISABLE_LRA_SNB);
2623 if (IS_GRAPHICS_VER(i915, 4, 6))
2624 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2625 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2626 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
2627 /* XXX bit doesn't stick on Broadwater */
2628 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
2630 if (GRAPHICS_VER(i915) == 4)
2632 * Disable CONSTANT_BUFFER before it is loaded from the context
2633 * image. For as it is loaded, it is executed and the stored
2634 * address may no longer be valid, leading to a GPU hang.
2636 * This imposes the requirement that userspace reload their
2637 * CONSTANT_BUFFER on every batch, fortunately a requirement
2638 * they are already accustomed to from before contexts were
2641 wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2642 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2643 0 /* XXX bit doesn't stick on Broadwater */,
2648 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2650 struct drm_i915_private *i915 = engine->i915;
2652 /* WaKBLVECSSemaphoreWaitPoll:kbl */
2653 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2655 RING_SEMA_WAIT_POLL(engine->mmio_base),
2658 /* Wa_16018031267, Wa_16018063123 */
2659 if (NEEDS_FASTCOLOR_BLT_WABB(engine))
2660 wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
2661 XEHP_BLITTER_SCHEDULING_MODE_MASK,
2662 XEHP_BLITTER_ROUND_ROBIN_MODE);
2666 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2668 /* boilerplate for any CCS engine workaround */
2672 * The bspec performance guide has recommended MMIO tuning settings. These
2673 * aren't truly "workarounds" but we want to program them with the same
2674 * workaround infrastructure to ensure that they're automatically added to
2675 * the GuC save/restore lists, re-applied at the right times, and checked for
2676 * any conflicting programming requested by real workarounds.
2678 * Programming settings should be added here only if their registers are not
2679 * part of an engine's register state context. If a register is part of a
2680 * context, then any tuning settings should be programmed in an appropriate
2681 * function invoked by __intel_engine_init_ctx_wa().
2684 add_render_compute_tuning_settings(struct intel_gt *gt,
2685 struct i915_wa_list *wal)
2687 struct drm_i915_private *i915 = gt->i915;
2689 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
2690 wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
2693 * This tuning setting proves beneficial only on ATS-M designs; the
2694 * default "age based" setting is optimal on regular DG2 and other
2697 if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
2698 wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
2699 THREAD_EX_ARB_MODE_RR_AFTER_DEP);
2701 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
2702 wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
2705 static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2707 struct intel_gt *gt = engine->gt;
2710 if (!IS_DG2(gt->i915))
2714 * Wa_14019159160: This workaround, along with others, leads to
2715 * significant challenges in utilizing load balancing among the
2716 * CCS slices. Consequently, an architectural decision has been
2717 * made to completely disable automatic CCS load balancing.
2719 wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
2722 * After having disabled automatic load balancing we need to
2723 * assign all slices to a single CCS. We will call it CCS mode 1
2725 mode = intel_gt_apply_ccs_mode(gt);
2726 wa_masked_en(wal, XEHP_CCS_MODE, mode);
2730 * The workarounds in this function apply to shared registers in
2731 * the general render reset domain that aren't tied to a
2732 * specific engine. Since all render+compute engines get reset
2733 * together, and the contents of these registers are lost during
2734 * the shared render domain reset, we'll define such workarounds
2735 * here and then add them to just a single RCS or CCS engine's
2736 * workaround list (whichever engine has the XXXX flag).
2739 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2741 struct drm_i915_private *i915 = engine->i915;
2742 struct intel_gt *gt = engine->gt;
2744 add_render_compute_tuning_settings(gt, wal);
2746 if (GRAPHICS_VER(i915) >= 11) {
2747 /* This is not a Wa (although referred to as
2748 * WaSetInidrectStateOverride in places), this allows
2749 * applications that reference sampler states through
2750 * the BindlessSamplerStateBaseAddress to have their
2751 * border color relative to DynamicStateBaseAddress
2752 * rather than BindlessSamplerStateBaseAddress.
2754 * Otherwise SAMPLER_STATE border colors have to be
2755 * copied in multiple heaps (DynamicStateBaseAddress &
2756 * BindlessSamplerStateBaseAddress)
2760 wa_mcr_masked_en(wal,
2762 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
2765 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
2766 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
2767 IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) {
2768 /* Wa_14017856879 */
2769 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
2771 /* Wa_14020495402 */
2772 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING);
2775 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2776 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2781 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2782 MTL_DISABLE_SAMPLER_SC_OOO);
2784 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2785 /* Wa_22015279794 */
2786 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2787 DISABLE_PREFETCH_INTO_IC);
2789 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2790 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2792 /* Wa_22013037850 */
2793 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
2794 DISABLE_128B_EVICTION_COMMAND_UDW);
2796 /* Wa_18017747507 */
2797 wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
2800 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2801 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2803 /* Wa_22014226127 */
2804 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
2808 /* Wa_14015227452:dg2,pvc */
2809 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
2812 * Wa_16011620976:dg2_g11
2813 * Wa_22015475538:dg2
2815 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
2817 /* Wa_18028616096 */
2818 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
2821 if (IS_DG2_G11(i915)) {
2823 * Wa_22012826095:dg2
2824 * Wa_22013059131:dg2
2826 wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
2828 REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
2830 /* Wa_22013059131:dg2 */
2831 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
2832 FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
2837 * Note that register 0xE420 is write-only and cannot be read
2838 * back for verification on DG2 (due to Wa_14012342262), so
2839 * we need to explicitly skip the readback.
2841 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
2842 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
2843 0 /* write-only, so skip validation */,
2849 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2851 if (GRAPHICS_VER(engine->i915) < 4)
2854 engine_fake_wa_init(engine, wal);
2857 * These are common workarounds that just need to applied
2858 * to a single RCS/CCS engine's workaround list since
2859 * they're reset as part of the general render domain reset.
2861 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
2862 general_render_compute_wa_init(engine, wal);
2863 ccs_engine_wa_mode(engine, wal);
2866 if (engine->class == COMPUTE_CLASS)
2867 ccs_engine_wa_init(engine, wal);
2868 else if (engine->class == RENDER_CLASS)
2869 rcs_engine_wa_init(engine, wal);
2871 xcs_engine_wa_init(engine, wal);
2874 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
2876 struct i915_wa_list *wal = &engine->wa_list;
2878 wa_init_start(wal, engine->gt, "engine", engine->name);
2879 engine_init_workarounds(engine, wal);
2880 wa_init_finish(wal);
2883 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
2885 wa_list_apply(&engine->wa_list);
2888 static const struct i915_range mcr_ranges_gen8[] = {
2889 { .start = 0x5500, .end = 0x55ff },
2890 { .start = 0x7000, .end = 0x7fff },
2891 { .start = 0x9400, .end = 0x97ff },
2892 { .start = 0xb000, .end = 0xb3ff },
2893 { .start = 0xe000, .end = 0xe7ff },
2897 static const struct i915_range mcr_ranges_gen12[] = {
2898 { .start = 0x8150, .end = 0x815f },
2899 { .start = 0x9520, .end = 0x955f },
2900 { .start = 0xb100, .end = 0xb3ff },
2901 { .start = 0xde80, .end = 0xe8ff },
2902 { .start = 0x24a00, .end = 0x24a7f },
2906 static const struct i915_range mcr_ranges_xehp[] = {
2907 { .start = 0x4000, .end = 0x4aff },
2908 { .start = 0x5200, .end = 0x52ff },
2909 { .start = 0x5400, .end = 0x7fff },
2910 { .start = 0x8140, .end = 0x815f },
2911 { .start = 0x8c80, .end = 0x8dff },
2912 { .start = 0x94d0, .end = 0x955f },
2913 { .start = 0x9680, .end = 0x96ff },
2914 { .start = 0xb000, .end = 0xb3ff },
2915 { .start = 0xc800, .end = 0xcfff },
2916 { .start = 0xd800, .end = 0xd8ff },
2917 { .start = 0xdc00, .end = 0xffff },
2918 { .start = 0x17000, .end = 0x17fff },
2919 { .start = 0x24a00, .end = 0x24a7f },
2923 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
2925 const struct i915_range *mcr_ranges;
2928 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
2929 mcr_ranges = mcr_ranges_xehp;
2930 else if (GRAPHICS_VER(i915) >= 12)
2931 mcr_ranges = mcr_ranges_gen12;
2932 else if (GRAPHICS_VER(i915) >= 8)
2933 mcr_ranges = mcr_ranges_gen8;
2938 * Registers in these ranges are affected by the MCR selector
2939 * which only controls CPU initiated MMIO. Routing does not
2940 * work for CS access so we cannot verify them on this path.
2942 for (i = 0; mcr_ranges[i].start; i++)
2943 if (offset >= mcr_ranges[i].start &&
2944 offset <= mcr_ranges[i].end)
2951 wa_list_srm(struct i915_request *rq,
2952 const struct i915_wa_list *wal,
2953 struct i915_vma *vma)
2955 struct drm_i915_private *i915 = rq->i915;
2956 unsigned int i, count = 0;
2957 const struct i915_wa *wa;
2960 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
2961 if (GRAPHICS_VER(i915) >= 8)
2964 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2965 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
2969 cs = intel_ring_begin(rq, 4 * count);
2973 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2974 u32 offset = i915_mmio_reg_offset(wa->reg);
2976 if (mcr_range(i915, offset))
2981 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
2984 intel_ring_advance(rq, cs);
2989 static int engine_wa_list_verify(struct intel_context *ce,
2990 const struct i915_wa_list * const wal,
2993 const struct i915_wa *wa;
2994 struct i915_request *rq;
2995 struct i915_vma *vma;
2996 struct i915_gem_ww_ctx ww;
3004 vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
3005 wal->count * sizeof(u32));
3007 return PTR_ERR(vma);
3009 intel_engine_pm_get(ce->engine);
3010 i915_gem_ww_ctx_init(&ww, false);
3012 err = i915_gem_object_lock(vma->obj, &ww);
3014 err = intel_context_pin_ww(ce, &ww);
3018 err = i915_vma_pin_ww(vma, &ww, 0, 0,
3019 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
3023 rq = i915_request_create(ce);
3029 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3031 err = wa_list_srm(rq, wal, vma);
3033 i915_request_get(rq);
3035 i915_request_set_error_once(rq, err);
3036 i915_request_add(rq);
3041 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
3046 results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
3047 if (IS_ERR(results)) {
3048 err = PTR_ERR(results);
3053 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3054 if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
3057 if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
3061 i915_gem_object_unpin_map(vma->obj);
3064 i915_request_put(rq);
3066 i915_vma_unpin(vma);
3068 intel_context_unpin(ce);
3070 if (err == -EDEADLK) {
3071 err = i915_gem_ww_ctx_backoff(&ww);
3075 i915_gem_ww_ctx_fini(&ww);
3076 intel_engine_pm_put(ce->engine);
3081 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
3084 return engine_wa_list_verify(engine->kernel_context,
3089 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3090 #include "selftest_workarounds.c"