drm/i915/icl: whitelist PS_(DEPTH|INVOCATION)_COUNT
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / intel_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2018 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "intel_context.h"
9 #include "intel_gt.h"
10 #include "intel_workarounds.h"
11
12 /**
13  * DOC: Hardware workarounds
14  *
15  * This file is intended as a central place to implement most [1]_ of the
16  * required workarounds for hardware to work as originally intended. They fall
17  * in five basic categories depending on how/when they are applied:
18  *
19  * - Workarounds that touch registers that are saved/restored to/from the HW
20  *   context image. The list is emitted (via Load Register Immediate commands)
21  *   everytime a new context is created.
22  * - GT workarounds. The list of these WAs is applied whenever these registers
23  *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
24  * - Display workarounds. The list is applied during display clock-gating
25  *   initialization.
26  * - Workarounds that whitelist a privileged register, so that UMDs can manage
27  *   them directly. This is just a special case of a MMMIO workaround (as we
28  *   write the list of these to/be-whitelisted registers to some special HW
29  *   registers).
30  * - Workaround batchbuffers, that get executed automatically by the hardware
31  *   on every HW context restore.
32  *
33  * .. [1] Please notice that there are other WAs that, due to their nature,
34  *    cannot be applied from a central place. Those are peppered around the rest
35  *    of the code, as needed.
36  *
37  * .. [2] Technically, some registers are powercontext saved & restored, so they
38  *    survive a suspend/resume. In practice, writing them again is not too
39  *    costly and simplifies things. We can revisit this in the future.
40  *
41  * Layout
42  * ''''''
43  *
44  * Keep things in this file ordered by WA type, as per the above (context, GT,
45  * display, register whitelist, batchbuffer). Then, inside each type, keep the
46  * following order:
47  *
48  * - Infrastructure functions and macros
49  * - WAs per platform in standard gen/chrono order
50  * - Public functions to init or apply the given workaround type.
51  */
52
53 static void wa_init_start(struct i915_wa_list *wal, const char *name)
54 {
55         wal->name = name;
56 }
57
58 #define WA_LIST_CHUNK (1 << 4)
59
60 static void wa_init_finish(struct i915_wa_list *wal)
61 {
62         /* Trim unused entries. */
63         if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
64                 struct i915_wa *list = kmemdup(wal->list,
65                                                wal->count * sizeof(*list),
66                                                GFP_KERNEL);
67
68                 if (list) {
69                         kfree(wal->list);
70                         wal->list = list;
71                 }
72         }
73
74         if (!wal->count)
75                 return;
76
77         DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
78                          wal->wa_count, wal->name);
79 }
80
81 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
82 {
83         unsigned int addr = i915_mmio_reg_offset(wa->reg);
84         unsigned int start = 0, end = wal->count;
85         const unsigned int grow = WA_LIST_CHUNK;
86         struct i915_wa *wa_;
87
88         GEM_BUG_ON(!is_power_of_2(grow));
89
90         if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
91                 struct i915_wa *list;
92
93                 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
94                                      GFP_KERNEL);
95                 if (!list) {
96                         DRM_ERROR("No space for workaround init!\n");
97                         return;
98                 }
99
100                 if (wal->list)
101                         memcpy(list, wal->list, sizeof(*wa) * wal->count);
102
103                 wal->list = list;
104         }
105
106         while (start < end) {
107                 unsigned int mid = start + (end - start) / 2;
108
109                 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
110                         start = mid + 1;
111                 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
112                         end = mid;
113                 } else {
114                         wa_ = &wal->list[mid];
115
116                         if ((wa->mask & ~wa_->mask) == 0) {
117                                 DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
118                                           i915_mmio_reg_offset(wa_->reg),
119                                           wa_->mask, wa_->val);
120
121                                 wa_->val &= ~wa->mask;
122                         }
123
124                         wal->wa_count++;
125                         wa_->val |= wa->val;
126                         wa_->mask |= wa->mask;
127                         wa_->read |= wa->read;
128                         return;
129                 }
130         }
131
132         wal->wa_count++;
133         wa_ = &wal->list[wal->count++];
134         *wa_ = *wa;
135
136         while (wa_-- > wal->list) {
137                 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
138                            i915_mmio_reg_offset(wa_[1].reg));
139                 if (i915_mmio_reg_offset(wa_[1].reg) >
140                     i915_mmio_reg_offset(wa_[0].reg))
141                         break;
142
143                 swap(wa_[1], wa_[0]);
144         }
145 }
146
147 static void
148 wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
149                    u32 val)
150 {
151         struct i915_wa wa = {
152                 .reg  = reg,
153                 .mask = mask,
154                 .val  = val,
155                 .read = mask,
156         };
157
158         _wa_add(wal, &wa);
159 }
160
161 static void
162 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
163 {
164         wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
165 }
166
167 static void
168 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
169 {
170         wa_write_masked_or(wal, reg, ~0, val);
171 }
172
173 static void
174 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
175 {
176         wa_write_masked_or(wal, reg, val, val);
177 }
178
179 static void
180 ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
181 {
182         struct i915_wa wa = {
183                 .reg  = reg,
184                 .mask = mask,
185                 .val  = val,
186                 /* Bonkers HW, skip verifying */
187         };
188
189         _wa_add(wal, &wa);
190 }
191
192 #define WA_SET_BIT_MASKED(addr, mask) \
193         wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
194
195 #define WA_CLR_BIT_MASKED(addr, mask) \
196         wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
197
198 #define WA_SET_FIELD_MASKED(addr, mask, value) \
199         wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
200
201 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
202                                       struct i915_wa_list *wal)
203 {
204         WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
205
206         /* WaDisableAsyncFlipPerfMode:bdw,chv */
207         WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
208
209         /* WaDisablePartialInstShootdown:bdw,chv */
210         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
211                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
212
213         /* Use Force Non-Coherent whenever executing a 3D context. This is a
214          * workaround for for a possible hang in the unlikely event a TLB
215          * invalidation occurs during a PSD flush.
216          */
217         /* WaForceEnableNonCoherent:bdw,chv */
218         /* WaHdcDisableFetchWhenMasked:bdw,chv */
219         WA_SET_BIT_MASKED(HDC_CHICKEN0,
220                           HDC_DONOT_FETCH_MEM_WHEN_MASKED |
221                           HDC_FORCE_NON_COHERENT);
222
223         /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
224          * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
225          *  polygons in the same 8x4 pixel/sample area to be processed without
226          *  stalling waiting for the earlier ones to write to Hierarchical Z
227          *  buffer."
228          *
229          * This optimization is off by default for BDW and CHV; turn it on.
230          */
231         WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
232
233         /* Wa4x4STCOptimizationDisable:bdw,chv */
234         WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
235
236         /*
237          * BSpec recommends 8x4 when MSAA is used,
238          * however in practice 16x4 seems fastest.
239          *
240          * Note that PS/WM thread counts depend on the WIZ hashing
241          * disable bit, which we don't touch here, but it's good
242          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
243          */
244         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
245                             GEN6_WIZ_HASHING_MASK,
246                             GEN6_WIZ_HASHING_16x4);
247 }
248
249 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
250                                      struct i915_wa_list *wal)
251 {
252         struct drm_i915_private *i915 = engine->i915;
253
254         gen8_ctx_workarounds_init(engine, wal);
255
256         /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
257         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
258
259         /* WaDisableDopClockGating:bdw
260          *
261          * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
262          * to disable EUTC clock gating.
263          */
264         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
265                           DOP_CLOCK_GATING_DISABLE);
266
267         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
268                           GEN8_SAMPLER_POWER_BYPASS_DIS);
269
270         WA_SET_BIT_MASKED(HDC_CHICKEN0,
271                           /* WaForceContextSaveRestoreNonCoherent:bdw */
272                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
273                           /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
274                           (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
275 }
276
277 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
278                                      struct i915_wa_list *wal)
279 {
280         gen8_ctx_workarounds_init(engine, wal);
281
282         /* WaDisableThreadStallDopClockGating:chv */
283         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
284
285         /* Improve HiZ throughput on CHV. */
286         WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
287 }
288
289 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
290                                       struct i915_wa_list *wal)
291 {
292         struct drm_i915_private *i915 = engine->i915;
293
294         if (HAS_LLC(i915)) {
295                 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
296                  *
297                  * Must match Display Engine. See
298                  * WaCompressedResourceDisplayNewHashMode.
299                  */
300                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
301                                   GEN9_PBE_COMPRESSED_HASH_SELECTION);
302                 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
303                                   GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
304         }
305
306         /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
307         /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
308         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
309                           FLOW_CONTROL_ENABLE |
310                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
311
312         /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
313         if (!IS_COFFEELAKE(i915))
314                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
315                                   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
316
317         /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
318         /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
319         WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
320                           GEN9_ENABLE_YV12_BUGFIX |
321                           GEN9_ENABLE_GPGPU_PREEMPTION);
322
323         /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
324         /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
325         WA_SET_BIT_MASKED(CACHE_MODE_1,
326                           GEN8_4x4_STC_OPTIMIZATION_DISABLE |
327                           GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
328
329         /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
330         WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
331                           GEN9_CCS_TLB_PREFETCH_ENABLE);
332
333         /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
334         WA_SET_BIT_MASKED(HDC_CHICKEN0,
335                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
336                           HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
337
338         /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
339          * both tied to WaForceContextSaveRestoreNonCoherent
340          * in some hsds for skl. We keep the tie for all gen9. The
341          * documentation is a bit hazy and so we want to get common behaviour,
342          * even though there is no clear evidence we would need both on kbl/bxt.
343          * This area has been source of system hangs so we play it safe
344          * and mimic the skl regardless of what bspec says.
345          *
346          * Use Force Non-Coherent whenever executing a 3D context. This
347          * is a workaround for a possible hang in the unlikely event
348          * a TLB invalidation occurs during a PSD flush.
349          */
350
351         /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
352         WA_SET_BIT_MASKED(HDC_CHICKEN0,
353                           HDC_FORCE_NON_COHERENT);
354
355         /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
356         if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
357                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
358                                   GEN8_SAMPLER_POWER_BYPASS_DIS);
359
360         /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
361         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
362
363         /*
364          * Supporting preemption with fine-granularity requires changes in the
365          * batch buffer programming. Since we can't break old userspace, we
366          * need to set our default preemption level to safe value. Userspace is
367          * still able to use more fine-grained preemption levels, since in
368          * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
369          * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
370          * not real HW workarounds, but merely a way to start using preemption
371          * while maintaining old contract with userspace.
372          */
373
374         /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
375         WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
376
377         /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
378         WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
379                             GEN9_PREEMPT_GPGPU_LEVEL_MASK,
380                             GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
381
382         /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
383         if (IS_GEN9_LP(i915))
384                 WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
385 }
386
387 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
388                                 struct i915_wa_list *wal)
389 {
390         struct drm_i915_private *i915 = engine->i915;
391         u8 vals[3] = { 0, 0, 0 };
392         unsigned int i;
393
394         for (i = 0; i < 3; i++) {
395                 u8 ss;
396
397                 /*
398                  * Only consider slices where one, and only one, subslice has 7
399                  * EUs
400                  */
401                 if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
402                         continue;
403
404                 /*
405                  * subslice_7eu[i] != 0 (because of the check above) and
406                  * ss_max == 4 (maximum number of subslices possible per slice)
407                  *
408                  * ->    0 <= ss <= 3;
409                  */
410                 ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
411                 vals[i] = 3 - ss;
412         }
413
414         if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
415                 return;
416
417         /* Tune IZ hashing. See intel_device_info_runtime_init() */
418         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
419                             GEN9_IZ_HASHING_MASK(2) |
420                             GEN9_IZ_HASHING_MASK(1) |
421                             GEN9_IZ_HASHING_MASK(0),
422                             GEN9_IZ_HASHING(2, vals[2]) |
423                             GEN9_IZ_HASHING(1, vals[1]) |
424                             GEN9_IZ_HASHING(0, vals[0]));
425 }
426
427 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
428                                      struct i915_wa_list *wal)
429 {
430         gen9_ctx_workarounds_init(engine, wal);
431         skl_tune_iz_hashing(engine, wal);
432 }
433
434 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
435                                      struct i915_wa_list *wal)
436 {
437         gen9_ctx_workarounds_init(engine, wal);
438
439         /* WaDisableThreadStallDopClockGating:bxt */
440         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
441                           STALL_DOP_GATING_DISABLE);
442
443         /* WaToEnableHwFixForPushConstHWBug:bxt */
444         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
445                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
446 }
447
448 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
449                                      struct i915_wa_list *wal)
450 {
451         struct drm_i915_private *i915 = engine->i915;
452
453         gen9_ctx_workarounds_init(engine, wal);
454
455         /* WaToEnableHwFixForPushConstHWBug:kbl */
456         if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
457                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
458                                   GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
459
460         /* WaDisableSbeCacheDispatchPortSharing:kbl */
461         WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
462                           GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
463 }
464
465 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
466                                      struct i915_wa_list *wal)
467 {
468         gen9_ctx_workarounds_init(engine, wal);
469
470         /* WaToEnableHwFixForPushConstHWBug:glk */
471         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
472                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
473 }
474
475 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
476                                      struct i915_wa_list *wal)
477 {
478         gen9_ctx_workarounds_init(engine, wal);
479
480         /* WaToEnableHwFixForPushConstHWBug:cfl */
481         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
482                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
483
484         /* WaDisableSbeCacheDispatchPortSharing:cfl */
485         WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
486                           GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
487 }
488
489 static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
490                                      struct i915_wa_list *wal)
491 {
492         struct drm_i915_private *i915 = engine->i915;
493
494         /* WaForceContextSaveRestoreNonCoherent:cnl */
495         WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
496                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
497
498         /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
499         if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
500                 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
501
502         /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
503         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
504                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
505
506         /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
507         if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
508                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
509                                   GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
510
511         /* WaPushConstantDereferenceHoldDisable:cnl */
512         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
513
514         /* FtrEnableFastAnisoL1BankingFix:cnl */
515         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
516
517         /* WaDisable3DMidCmdPreemption:cnl */
518         WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
519
520         /* WaDisableGPGPUMidCmdPreemption:cnl */
521         WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
522                             GEN9_PREEMPT_GPGPU_LEVEL_MASK,
523                             GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
524
525         /* WaDisableEarlyEOT:cnl */
526         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
527 }
528
529 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
530                                      struct i915_wa_list *wal)
531 {
532         struct drm_i915_private *i915 = engine->i915;
533
534         /* WaDisableBankHangMode:icl */
535         wa_write(wal,
536                  GEN8_L3CNTLREG,
537                  intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
538                  GEN8_ERRDETBCTRL);
539
540         /* Wa_1604370585:icl (pre-prod)
541          * Formerly known as WaPushConstantDereferenceHoldDisable
542          */
543         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
544                 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
545                                   PUSH_CONSTANT_DEREF_DISABLE);
546
547         /* WaForceEnableNonCoherent:icl
548          * This is not the same workaround as in early Gen9 platforms, where
549          * lacking this could cause system hangs, but coherency performance
550          * overhead is high and only a few compute workloads really need it
551          * (the register is whitelisted in hardware now, so UMDs can opt in
552          * for coherency if they have a good reason).
553          */
554         WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
555
556         /* Wa_2006611047:icl (pre-prod)
557          * Formerly known as WaDisableImprovedTdlClkGating
558          */
559         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
560                 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
561                                   GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
562
563         /* Wa_2006665173:icl (pre-prod) */
564         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
565                 WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
566                                   GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
567
568         /* WaEnableFloatBlendOptimization:icl */
569         wa_write_masked_or(wal,
570                            GEN10_CACHE_MODE_SS,
571                            0, /* write-only, so skip validation */
572                            _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
573
574         /* WaDisableGPGPUMidThreadPreemption:icl */
575         WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
576                             GEN9_PREEMPT_GPGPU_LEVEL_MASK,
577                             GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
578
579         /* allow headerless messages for preemptible GPGPU context */
580         WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
581                           GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
582 }
583
584 static void
585 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
586                            struct i915_wa_list *wal,
587                            const char *name)
588 {
589         struct drm_i915_private *i915 = engine->i915;
590
591         if (engine->class != RENDER_CLASS)
592                 return;
593
594         wa_init_start(wal, name);
595
596         if (IS_GEN(i915, 11))
597                 icl_ctx_workarounds_init(engine, wal);
598         else if (IS_CANNONLAKE(i915))
599                 cnl_ctx_workarounds_init(engine, wal);
600         else if (IS_COFFEELAKE(i915))
601                 cfl_ctx_workarounds_init(engine, wal);
602         else if (IS_GEMINILAKE(i915))
603                 glk_ctx_workarounds_init(engine, wal);
604         else if (IS_KABYLAKE(i915))
605                 kbl_ctx_workarounds_init(engine, wal);
606         else if (IS_BROXTON(i915))
607                 bxt_ctx_workarounds_init(engine, wal);
608         else if (IS_SKYLAKE(i915))
609                 skl_ctx_workarounds_init(engine, wal);
610         else if (IS_CHERRYVIEW(i915))
611                 chv_ctx_workarounds_init(engine, wal);
612         else if (IS_BROADWELL(i915))
613                 bdw_ctx_workarounds_init(engine, wal);
614         else if (INTEL_GEN(i915) < 8)
615                 return;
616         else
617                 MISSING_CASE(INTEL_GEN(i915));
618
619         wa_init_finish(wal);
620 }
621
622 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
623 {
624         __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
625 }
626
627 int intel_engine_emit_ctx_wa(struct i915_request *rq)
628 {
629         struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
630         struct i915_wa *wa;
631         unsigned int i;
632         u32 *cs;
633         int ret;
634
635         if (wal->count == 0)
636                 return 0;
637
638         ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
639         if (ret)
640                 return ret;
641
642         cs = intel_ring_begin(rq, (wal->count * 2 + 2));
643         if (IS_ERR(cs))
644                 return PTR_ERR(cs);
645
646         *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
647         for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
648                 *cs++ = i915_mmio_reg_offset(wa->reg);
649                 *cs++ = wa->val;
650         }
651         *cs++ = MI_NOOP;
652
653         intel_ring_advance(rq, cs);
654
655         ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
656         if (ret)
657                 return ret;
658
659         return 0;
660 }
661
662 static void
663 gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
664 {
665         /* WaDisableKillLogic:bxt,skl,kbl */
666         if (!IS_COFFEELAKE(i915))
667                 wa_write_or(wal,
668                             GAM_ECOCHK,
669                             ECOCHK_DIS_TLB);
670
671         if (HAS_LLC(i915)) {
672                 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
673                  *
674                  * Must match Display Engine. See
675                  * WaCompressedResourceDisplayNewHashMode.
676                  */
677                 wa_write_or(wal,
678                             MMCD_MISC_CTRL,
679                             MMCD_PCLA | MMCD_HOTSPOT_EN);
680         }
681
682         /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
683         wa_write_or(wal,
684                     GAM_ECOCHK,
685                     BDW_DISABLE_HDC_INVALIDATION);
686 }
687
688 static void
689 skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
690 {
691         gen9_gt_workarounds_init(i915, wal);
692
693         /* WaDisableGafsUnitClkGating:skl */
694         wa_write_or(wal,
695                     GEN7_UCGCTL4,
696                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
697
698         /* WaInPlaceDecompressionHang:skl */
699         if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
700                 wa_write_or(wal,
701                             GEN9_GAMT_ECO_REG_RW_IA,
702                             GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
703 }
704
705 static void
706 bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
707 {
708         gen9_gt_workarounds_init(i915, wal);
709
710         /* WaInPlaceDecompressionHang:bxt */
711         wa_write_or(wal,
712                     GEN9_GAMT_ECO_REG_RW_IA,
713                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
714 }
715
716 static void
717 kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
718 {
719         gen9_gt_workarounds_init(i915, wal);
720
721         /* WaDisableDynamicCreditSharing:kbl */
722         if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
723                 wa_write_or(wal,
724                             GAMT_CHKN_BIT_REG,
725                             GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
726
727         /* WaDisableGafsUnitClkGating:kbl */
728         wa_write_or(wal,
729                     GEN7_UCGCTL4,
730                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
731
732         /* WaInPlaceDecompressionHang:kbl */
733         wa_write_or(wal,
734                     GEN9_GAMT_ECO_REG_RW_IA,
735                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
736 }
737
738 static void
739 glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
740 {
741         gen9_gt_workarounds_init(i915, wal);
742 }
743
744 static void
745 cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
746 {
747         gen9_gt_workarounds_init(i915, wal);
748
749         /* WaDisableGafsUnitClkGating:cfl */
750         wa_write_or(wal,
751                     GEN7_UCGCTL4,
752                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
753
754         /* WaInPlaceDecompressionHang:cfl */
755         wa_write_or(wal,
756                     GEN9_GAMT_ECO_REG_RW_IA,
757                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
758 }
759
760 static void
761 wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
762 {
763         const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
764         u32 mcr_slice_subslice_mask;
765
766         /*
767          * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
768          * L3Banks could be fused off in single slice scenario. If that is
769          * the case, we might need to program MCR select to a valid L3Bank
770          * by default, to make sure we correctly read certain registers
771          * later on (in the range 0xB100 - 0xB3FF).
772          * This might be incompatible with
773          * WaProgramMgsrForCorrectSliceSpecificMmioReads.
774          * Fortunately, this should not happen in production hardware, so
775          * we only assert that this is the case (instead of implementing
776          * something more complex that requires checking the range of every
777          * MMIO read).
778          */
779         if (INTEL_GEN(i915) >= 10 &&
780             is_power_of_2(sseu->slice_mask)) {
781                 /*
782                  * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
783                  * enabled subslice, no need to redirect MCR packet
784                  */
785                 u32 slice = fls(sseu->slice_mask);
786                 u32 fuse3 =
787                         intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
788                 u8 ss_mask = sseu->subslice_mask[slice];
789
790                 u8 enabled_mask = (ss_mask | ss_mask >>
791                                    GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
792                 u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
793
794                 /*
795                  * Production silicon should have matched L3Bank and
796                  * subslice enabled
797                  */
798                 WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
799         }
800
801         if (INTEL_GEN(i915) >= 11)
802                 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
803                                           GEN11_MCR_SUBSLICE_MASK;
804         else
805                 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
806                                           GEN8_MCR_SUBSLICE_MASK;
807         /*
808          * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
809          * Before any MMIO read into slice/subslice specific registers, MCR
810          * packet control register needs to be programmed to point to any
811          * enabled s/ss pair. Otherwise, incorrect values will be returned.
812          * This means each subsequent MMIO read will be forwarded to an
813          * specific s/ss combination, but this is OK since these registers
814          * are consistent across s/ss in almost all cases. In the rare
815          * occasions, such as INSTDONE, where this value is dependent
816          * on s/ss combo, the read should be done with read_subslice_reg.
817          */
818         wa_write_masked_or(wal,
819                            GEN8_MCR_SELECTOR,
820                            mcr_slice_subslice_mask,
821                            intel_calculate_mcr_s_ss_select(i915));
822 }
823
824 static void
825 cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
826 {
827         wa_init_mcr(i915, wal);
828
829         /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
830         if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
831                 wa_write_or(wal,
832                             GAMT_CHKN_BIT_REG,
833                             GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
834
835         /* WaInPlaceDecompressionHang:cnl */
836         wa_write_or(wal,
837                     GEN9_GAMT_ECO_REG_RW_IA,
838                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
839 }
840
841 static void
842 icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
843 {
844         wa_init_mcr(i915, wal);
845
846         /* WaInPlaceDecompressionHang:icl */
847         wa_write_or(wal,
848                     GEN9_GAMT_ECO_REG_RW_IA,
849                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
850
851         /* WaModifyGamTlbPartitioning:icl */
852         wa_write_masked_or(wal,
853                            GEN11_GACB_PERF_CTRL,
854                            GEN11_HASH_CTRL_MASK,
855                            GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
856
857         /* Wa_1405766107:icl
858          * Formerly known as WaCL2SFHalfMaxAlloc
859          */
860         wa_write_or(wal,
861                     GEN11_LSN_UNSLCVC,
862                     GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
863                     GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
864
865         /* Wa_220166154:icl
866          * Formerly known as WaDisCtxReload
867          */
868         wa_write_or(wal,
869                     GEN8_GAMW_ECO_DEV_RW_IA,
870                     GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
871
872         /* Wa_1405779004:icl (pre-prod) */
873         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
874                 wa_write_or(wal,
875                             SLICE_UNIT_LEVEL_CLKGATE,
876                             MSCUNIT_CLKGATE_DIS);
877
878         /* Wa_1406680159:icl */
879         wa_write_or(wal,
880                     SUBSLICE_UNIT_LEVEL_CLKGATE,
881                     GWUNIT_CLKGATE_DIS);
882
883         /* Wa_1406838659:icl (pre-prod) */
884         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
885                 wa_write_or(wal,
886                             INF_UNIT_LEVEL_CLKGATE,
887                             CGPSF_CLKGATE_DIS);
888
889         /* Wa_1406463099:icl
890          * Formerly known as WaGamTlbPendError
891          */
892         wa_write_or(wal,
893                     GAMT_CHKN_BIT_REG,
894                     GAMT_CHKN_DISABLE_L3_COH_PIPE);
895 }
896
897 static void
898 gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
899 {
900         if (IS_GEN(i915, 11))
901                 icl_gt_workarounds_init(i915, wal);
902         else if (IS_CANNONLAKE(i915))
903                 cnl_gt_workarounds_init(i915, wal);
904         else if (IS_COFFEELAKE(i915))
905                 cfl_gt_workarounds_init(i915, wal);
906         else if (IS_GEMINILAKE(i915))
907                 glk_gt_workarounds_init(i915, wal);
908         else if (IS_KABYLAKE(i915))
909                 kbl_gt_workarounds_init(i915, wal);
910         else if (IS_BROXTON(i915))
911                 bxt_gt_workarounds_init(i915, wal);
912         else if (IS_SKYLAKE(i915))
913                 skl_gt_workarounds_init(i915, wal);
914         else if (INTEL_GEN(i915) <= 8)
915                 return;
916         else
917                 MISSING_CASE(INTEL_GEN(i915));
918 }
919
920 void intel_gt_init_workarounds(struct drm_i915_private *i915)
921 {
922         struct i915_wa_list *wal = &i915->gt_wa_list;
923
924         wa_init_start(wal, "GT");
925         gt_init_workarounds(i915, wal);
926         wa_init_finish(wal);
927 }
928
929 static enum forcewake_domains
930 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
931 {
932         enum forcewake_domains fw = 0;
933         struct i915_wa *wa;
934         unsigned int i;
935
936         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
937                 fw |= intel_uncore_forcewake_for_reg(uncore,
938                                                      wa->reg,
939                                                      FW_REG_READ |
940                                                      FW_REG_WRITE);
941
942         return fw;
943 }
944
945 static bool
946 wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
947 {
948         if ((cur ^ wa->val) & wa->read) {
949                 DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
950                           name, from, i915_mmio_reg_offset(wa->reg),
951                           cur, cur & wa->read,
952                           wa->val, wa->mask);
953
954                 return false;
955         }
956
957         return true;
958 }
959
960 static void
961 wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
962 {
963         enum forcewake_domains fw;
964         unsigned long flags;
965         struct i915_wa *wa;
966         unsigned int i;
967
968         if (!wal->count)
969                 return;
970
971         fw = wal_get_fw_for_rmw(uncore, wal);
972
973         spin_lock_irqsave(&uncore->lock, flags);
974         intel_uncore_forcewake_get__locked(uncore, fw);
975
976         for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
977                 intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
978                 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
979                         wa_verify(wa,
980                                   intel_uncore_read_fw(uncore, wa->reg),
981                                   wal->name, "application");
982         }
983
984         intel_uncore_forcewake_put__locked(uncore, fw);
985         spin_unlock_irqrestore(&uncore->lock, flags);
986 }
987
988 void intel_gt_apply_workarounds(struct intel_gt *gt)
989 {
990         wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
991 }
992
993 static bool wa_list_verify(struct intel_uncore *uncore,
994                            const struct i915_wa_list *wal,
995                            const char *from)
996 {
997         struct i915_wa *wa;
998         unsigned int i;
999         bool ok = true;
1000
1001         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1002                 ok &= wa_verify(wa,
1003                                 intel_uncore_read(uncore, wa->reg),
1004                                 wal->name, from);
1005
1006         return ok;
1007 }
1008
1009 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1010 {
1011         return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
1012 }
1013
1014 static void
1015 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1016 {
1017         struct i915_wa wa = {
1018                 .reg = reg
1019         };
1020
1021         if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1022                 return;
1023
1024         wa.reg.reg |= flags;
1025         _wa_add(wal, &wa);
1026 }
1027
1028 static void
1029 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1030 {
1031         whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
1032 }
1033
1034 static void gen9_whitelist_build(struct i915_wa_list *w)
1035 {
1036         /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1037         whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1038
1039         /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1040         whitelist_reg(w, GEN8_CS_CHICKEN1);
1041
1042         /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1043         whitelist_reg(w, GEN8_HDC_CHICKEN1);
1044 }
1045
1046 static void skl_whitelist_build(struct intel_engine_cs *engine)
1047 {
1048         struct i915_wa_list *w = &engine->whitelist;
1049
1050         if (engine->class != RENDER_CLASS)
1051                 return;
1052
1053         gen9_whitelist_build(w);
1054
1055         /* WaDisableLSQCROPERFforOCL:skl */
1056         whitelist_reg(w, GEN8_L3SQCREG4);
1057 }
1058
1059 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1060 {
1061         if (engine->class != RENDER_CLASS)
1062                 return;
1063
1064         gen9_whitelist_build(&engine->whitelist);
1065 }
1066
1067 static void kbl_whitelist_build(struct intel_engine_cs *engine)
1068 {
1069         struct i915_wa_list *w = &engine->whitelist;
1070
1071         if (engine->class != RENDER_CLASS)
1072                 return;
1073
1074         gen9_whitelist_build(w);
1075
1076         /* WaDisableLSQCROPERFforOCL:kbl */
1077         whitelist_reg(w, GEN8_L3SQCREG4);
1078 }
1079
1080 static void glk_whitelist_build(struct intel_engine_cs *engine)
1081 {
1082         struct i915_wa_list *w = &engine->whitelist;
1083
1084         if (engine->class != RENDER_CLASS)
1085                 return;
1086
1087         gen9_whitelist_build(w);
1088
1089         /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1090         whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1091 }
1092
1093 static void cfl_whitelist_build(struct intel_engine_cs *engine)
1094 {
1095         struct i915_wa_list *w = &engine->whitelist;
1096
1097         if (engine->class != RENDER_CLASS)
1098                 return;
1099
1100         gen9_whitelist_build(w);
1101
1102         /*
1103          * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1104          *
1105          * This covers 4 register which are next to one another :
1106          *   - PS_INVOCATION_COUNT
1107          *   - PS_INVOCATION_COUNT_UDW
1108          *   - PS_DEPTH_COUNT
1109          *   - PS_DEPTH_COUNT_UDW
1110          */
1111         whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1112                           RING_FORCE_TO_NONPRIV_RD |
1113                           RING_FORCE_TO_NONPRIV_RANGE_4);
1114 }
1115
1116 static void cnl_whitelist_build(struct intel_engine_cs *engine)
1117 {
1118         struct i915_wa_list *w = &engine->whitelist;
1119
1120         if (engine->class != RENDER_CLASS)
1121                 return;
1122
1123         /* WaEnablePreemptionGranularityControlByUMD:cnl */
1124         whitelist_reg(w, GEN8_CS_CHICKEN1);
1125 }
1126
1127 static void icl_whitelist_build(struct intel_engine_cs *engine)
1128 {
1129         struct i915_wa_list *w = &engine->whitelist;
1130
1131         switch (engine->class) {
1132         case RENDER_CLASS:
1133                 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
1134                 whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
1135
1136                 /* WaAllowUMDToModifySamplerMode:icl */
1137                 whitelist_reg(w, GEN10_SAMPLER_MODE);
1138
1139                 /* WaEnableStateCacheRedirectToCS:icl */
1140                 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1141
1142                 /*
1143                  * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
1144                  *
1145                  * This covers 4 register which are next to one another :
1146                  *   - PS_INVOCATION_COUNT
1147                  *   - PS_INVOCATION_COUNT_UDW
1148                  *   - PS_DEPTH_COUNT
1149                  *   - PS_DEPTH_COUNT_UDW
1150                  */
1151                 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1152                                   RING_FORCE_TO_NONPRIV_RD |
1153                                   RING_FORCE_TO_NONPRIV_RANGE_4);
1154                 break;
1155
1156         case VIDEO_DECODE_CLASS:
1157                 /* hucStatusRegOffset */
1158                 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1159                                   RING_FORCE_TO_NONPRIV_RD);
1160                 /* hucUKernelHdrInfoRegOffset */
1161                 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
1162                                   RING_FORCE_TO_NONPRIV_RD);
1163                 /* hucStatus2RegOffset */
1164                 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
1165                                   RING_FORCE_TO_NONPRIV_RD);
1166                 break;
1167
1168         default:
1169                 break;
1170         }
1171 }
1172
1173 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
1174 {
1175         struct drm_i915_private *i915 = engine->i915;
1176         struct i915_wa_list *w = &engine->whitelist;
1177
1178         wa_init_start(w, "whitelist");
1179
1180         if (IS_GEN(i915, 11))
1181                 icl_whitelist_build(engine);
1182         else if (IS_CANNONLAKE(i915))
1183                 cnl_whitelist_build(engine);
1184         else if (IS_COFFEELAKE(i915))
1185                 cfl_whitelist_build(engine);
1186         else if (IS_GEMINILAKE(i915))
1187                 glk_whitelist_build(engine);
1188         else if (IS_KABYLAKE(i915))
1189                 kbl_whitelist_build(engine);
1190         else if (IS_BROXTON(i915))
1191                 bxt_whitelist_build(engine);
1192         else if (IS_SKYLAKE(i915))
1193                 skl_whitelist_build(engine);
1194         else if (INTEL_GEN(i915) <= 8)
1195                 return;
1196         else
1197                 MISSING_CASE(INTEL_GEN(i915));
1198
1199         wa_init_finish(w);
1200 }
1201
1202 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1203 {
1204         const struct i915_wa_list *wal = &engine->whitelist;
1205         struct intel_uncore *uncore = engine->uncore;
1206         const u32 base = engine->mmio_base;
1207         struct i915_wa *wa;
1208         unsigned int i;
1209
1210         if (!wal->count)
1211                 return;
1212
1213         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1214                 intel_uncore_write(uncore,
1215                                    RING_FORCE_TO_NONPRIV(base, i),
1216                                    i915_mmio_reg_offset(wa->reg));
1217
1218         /* And clear the rest just in case of garbage */
1219         for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1220                 intel_uncore_write(uncore,
1221                                    RING_FORCE_TO_NONPRIV(base, i),
1222                                    i915_mmio_reg_offset(RING_NOPID(base)));
1223 }
1224
1225 static void
1226 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1227 {
1228         struct drm_i915_private *i915 = engine->i915;
1229
1230         if (IS_GEN(i915, 11)) {
1231                 /* This is not an Wa. Enable for better image quality */
1232                 wa_masked_en(wal,
1233                              _3D_CHICKEN3,
1234                              _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
1235
1236                 /* WaPipelineFlushCoherentLines:icl */
1237                 ignore_wa_write_or(wal,
1238                                    GEN8_L3SQCREG4,
1239                                    GEN8_LQSC_FLUSH_COHERENT_LINES,
1240                                    GEN8_LQSC_FLUSH_COHERENT_LINES);
1241
1242                 /*
1243                  * Wa_1405543622:icl
1244                  * Formerly known as WaGAPZPriorityScheme
1245                  */
1246                 wa_write_or(wal,
1247                             GEN8_GARBCNTL,
1248                             GEN11_ARBITRATION_PRIO_ORDER_MASK);
1249
1250                 /*
1251                  * Wa_1604223664:icl
1252                  * Formerly known as WaL3BankAddressHashing
1253                  */
1254                 wa_write_masked_or(wal,
1255                                    GEN8_GARBCNTL,
1256                                    GEN11_HASH_CTRL_EXCL_MASK,
1257                                    GEN11_HASH_CTRL_EXCL_BIT0);
1258                 wa_write_masked_or(wal,
1259                                    GEN11_GLBLINVL,
1260                                    GEN11_BANK_HASH_ADDR_EXCL_MASK,
1261                                    GEN11_BANK_HASH_ADDR_EXCL_BIT0);
1262
1263                 /*
1264                  * Wa_1405733216:icl
1265                  * Formerly known as WaDisableCleanEvicts
1266                  */
1267                 ignore_wa_write_or(wal,
1268                                    GEN8_L3SQCREG4,
1269                                    GEN11_LQSC_CLEAN_EVICT_DISABLE,
1270                                    GEN11_LQSC_CLEAN_EVICT_DISABLE);
1271
1272                 /* WaForwardProgressSoftReset:icl */
1273                 wa_write_or(wal,
1274                             GEN10_SCRATCH_LNCF2,
1275                             PMFLUSHDONE_LNICRSDROP |
1276                             PMFLUSH_GAPL3UNBLOCK |
1277                             PMFLUSHDONE_LNEBLK);
1278
1279                 /* Wa_1406609255:icl (pre-prod) */
1280                 if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
1281                         wa_write_or(wal,
1282                                     GEN7_SARCHKMD,
1283                                     GEN7_DISABLE_DEMAND_PREFETCH);
1284
1285                 /* Wa_1606682166:icl */
1286                 wa_write_or(wal,
1287                             GEN7_SARCHKMD,
1288                             GEN7_DISABLE_SAMPLER_PREFETCH);
1289         }
1290
1291         if (IS_GEN_RANGE(i915, 9, 11)) {
1292                 /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
1293                 wa_masked_en(wal,
1294                              GEN7_FF_SLICE_CS_CHICKEN1,
1295                              GEN9_FFSC_PERCTX_PREEMPT_CTRL);
1296         }
1297
1298         if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
1299                 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
1300                 wa_write_or(wal,
1301                             GEN8_GARBCNTL,
1302                             GEN9_GAPS_TSV_CREDIT_DISABLE);
1303         }
1304
1305         if (IS_BROXTON(i915)) {
1306                 /* WaDisablePooledEuLoadBalancingFix:bxt */
1307                 wa_masked_en(wal,
1308                              FF_SLICE_CS_CHICKEN2,
1309                              GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1310         }
1311
1312         if (IS_GEN(i915, 9)) {
1313                 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
1314                 wa_masked_en(wal,
1315                              GEN9_CSFE_CHICKEN1_RCS,
1316                              GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
1317
1318                 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
1319                 wa_write_or(wal,
1320                             BDW_SCRATCH1,
1321                             GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
1322
1323                 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
1324                 if (IS_GEN9_LP(i915))
1325                         wa_write_masked_or(wal,
1326                                            GEN8_L3SQCREG1,
1327                                            L3_PRIO_CREDITS_MASK,
1328                                            L3_GENERAL_PRIO_CREDITS(62) |
1329                                            L3_HIGH_PRIO_CREDITS(2));
1330
1331                 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
1332                 wa_write_or(wal,
1333                             GEN8_L3SQCREG4,
1334                             GEN8_LQSC_FLUSH_COHERENT_LINES);
1335         }
1336 }
1337
1338 static void
1339 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1340 {
1341         struct drm_i915_private *i915 = engine->i915;
1342
1343         /* WaKBLVECSSemaphoreWaitPoll:kbl */
1344         if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
1345                 wa_write(wal,
1346                          RING_SEMA_WAIT_POLL(engine->mmio_base),
1347                          1);
1348         }
1349 }
1350
1351 static void
1352 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1353 {
1354         if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
1355                 return;
1356
1357         if (engine->id == RCS0)
1358                 rcs_engine_wa_init(engine, wal);
1359         else
1360                 xcs_engine_wa_init(engine, wal);
1361 }
1362
1363 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
1364 {
1365         struct i915_wa_list *wal = &engine->wa_list;
1366
1367         if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
1368                 return;
1369
1370         wa_init_start(wal, engine->name);
1371         engine_init_workarounds(engine, wal);
1372         wa_init_finish(wal);
1373 }
1374
1375 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
1376 {
1377         wa_list_apply(engine->uncore, &engine->wa_list);
1378 }
1379
1380 static struct i915_vma *
1381 create_scratch(struct i915_address_space *vm, int count)
1382 {
1383         struct drm_i915_gem_object *obj;
1384         struct i915_vma *vma;
1385         unsigned int size;
1386         int err;
1387
1388         size = round_up(count * sizeof(u32), PAGE_SIZE);
1389         obj = i915_gem_object_create_internal(vm->i915, size);
1390         if (IS_ERR(obj))
1391                 return ERR_CAST(obj);
1392
1393         i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
1394
1395         vma = i915_vma_instance(obj, vm, NULL);
1396         if (IS_ERR(vma)) {
1397                 err = PTR_ERR(vma);
1398                 goto err_obj;
1399         }
1400
1401         err = i915_vma_pin(vma, 0, 0,
1402                            i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
1403         if (err)
1404                 goto err_obj;
1405
1406         return vma;
1407
1408 err_obj:
1409         i915_gem_object_put(obj);
1410         return ERR_PTR(err);
1411 }
1412
1413 static int
1414 wa_list_srm(struct i915_request *rq,
1415             const struct i915_wa_list *wal,
1416             struct i915_vma *vma)
1417 {
1418         const struct i915_wa *wa;
1419         unsigned int i;
1420         u32 srm, *cs;
1421
1422         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1423         if (INTEL_GEN(rq->i915) >= 8)
1424                 srm++;
1425
1426         cs = intel_ring_begin(rq, 4 * wal->count);
1427         if (IS_ERR(cs))
1428                 return PTR_ERR(cs);
1429
1430         for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1431                 *cs++ = srm;
1432                 *cs++ = i915_mmio_reg_offset(wa->reg);
1433                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
1434                 *cs++ = 0;
1435         }
1436         intel_ring_advance(rq, cs);
1437
1438         return 0;
1439 }
1440
1441 static int engine_wa_list_verify(struct intel_context *ce,
1442                                  const struct i915_wa_list * const wal,
1443                                  const char *from)
1444 {
1445         const struct i915_wa *wa;
1446         struct i915_request *rq;
1447         struct i915_vma *vma;
1448         unsigned int i;
1449         u32 *results;
1450         int err;
1451
1452         if (!wal->count)
1453                 return 0;
1454
1455         vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
1456         if (IS_ERR(vma))
1457                 return PTR_ERR(vma);
1458
1459         rq = intel_context_create_request(ce);
1460         if (IS_ERR(rq)) {
1461                 err = PTR_ERR(rq);
1462                 goto err_vma;
1463         }
1464
1465         err = wa_list_srm(rq, wal, vma);
1466         if (err)
1467                 goto err_vma;
1468
1469         i915_request_add(rq);
1470         if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1471                 err = -ETIME;
1472                 goto err_vma;
1473         }
1474
1475         results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1476         if (IS_ERR(results)) {
1477                 err = PTR_ERR(results);
1478                 goto err_vma;
1479         }
1480
1481         err = 0;
1482         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1483                 if (!wa_verify(wa, results[i], wal->name, from))
1484                         err = -ENXIO;
1485
1486         i915_gem_object_unpin_map(vma->obj);
1487
1488 err_vma:
1489         i915_vma_unpin(vma);
1490         i915_vma_put(vma);
1491         return err;
1492 }
1493
1494 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
1495                                     const char *from)
1496 {
1497         return engine_wa_list_verify(engine->kernel_context,
1498                                      &engine->wa_list,
1499                                      from);
1500 }
1501
1502 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1503 #include "selftest_workarounds.c"
1504 #endif