Merge branch 'next/drivers' into next/late
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2018 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "intel_workarounds.h"
9
10 /**
11  * DOC: Hardware workarounds
12  *
13  * This file is intended as a central place to implement most [1]_ of the
14  * required workarounds for hardware to work as originally intended. They fall
15  * in five basic categories depending on how/when they are applied:
16  *
17  * - Workarounds that touch registers that are saved/restored to/from the HW
18  *   context image. The list is emitted (via Load Register Immediate commands)
19  *   everytime a new context is created.
20  * - GT workarounds. The list of these WAs is applied whenever these registers
21  *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
22  * - Display workarounds. The list is applied during display clock-gating
23  *   initialization.
24  * - Workarounds that whitelist a privileged register, so that UMDs can manage
25  *   them directly. This is just a special case of a MMMIO workaround (as we
26  *   write the list of these to/be-whitelisted registers to some special HW
27  *   registers).
28  * - Workaround batchbuffers, that get executed automatically by the hardware
29  *   on every HW context restore.
30  *
31  * .. [1] Please notice that there are other WAs that, due to their nature,
32  *    cannot be applied from a central place. Those are peppered around the rest
33  *    of the code, as needed.
34  *
35  * .. [2] Technically, some registers are powercontext saved & restored, so they
36  *    survive a suspend/resume. In practice, writing them again is not too
37  *    costly and simplifies things. We can revisit this in the future.
38  *
39  * Layout
40  * ''''''
41  *
42  * Keep things in this file ordered by WA type, as per the above (context, GT,
43  * display, register whitelist, batchbuffer). Then, inside each type, keep the
44  * following order:
45  *
46  * - Infrastructure functions and macros
47  * - WAs per platform in standard gen/chrono order
48  * - Public functions to init or apply the given workaround type.
49  */
50
51 static void wa_init_start(struct i915_wa_list *wal, const char *name)
52 {
53         wal->name = name;
54 }
55
56 static void wa_init_finish(struct i915_wa_list *wal)
57 {
58         if (!wal->count)
59                 return;
60
61         DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
62                          wal->count, wal->name);
63 }
64
65 static void wa_add(struct drm_i915_private *i915,
66                    i915_reg_t reg, const u32 mask, const u32 val)
67 {
68         struct i915_workarounds *wa = &i915->workarounds;
69         unsigned int start = 0, end = wa->count;
70         unsigned int addr = i915_mmio_reg_offset(reg);
71         struct i915_wa_reg *r;
72
73         while (start < end) {
74                 unsigned int mid = start + (end - start) / 2;
75
76                 if (wa->reg[mid].addr < addr) {
77                         start = mid + 1;
78                 } else if (wa->reg[mid].addr > addr) {
79                         end = mid;
80                 } else {
81                         r = &wa->reg[mid];
82
83                         if ((mask & ~r->mask) == 0) {
84                                 DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
85                                           addr, r->mask, r->value);
86
87                                 r->value &= ~mask;
88                         }
89
90                         r->value |= val;
91                         r->mask  |= mask;
92                         return;
93                 }
94         }
95
96         if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
97                 DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
98                           addr, mask, val);
99                 return;
100         }
101
102         r = &wa->reg[wa->count++];
103         r->addr  = addr;
104         r->value = val;
105         r->mask  = mask;
106
107         while (r-- > wa->reg) {
108                 GEM_BUG_ON(r[0].addr == r[1].addr);
109                 if (r[1].addr > r[0].addr)
110                         break;
111
112                 swap(r[1], r[0]);
113         }
114 }
115
116 #define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
117
118 #define WA_SET_BIT_MASKED(addr, mask) \
119         WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
120
121 #define WA_CLR_BIT_MASKED(addr, mask) \
122         WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
123
124 #define WA_SET_FIELD_MASKED(addr, mask, value) \
125         WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
126
127 static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
128 {
129         WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
130
131         /* WaDisableAsyncFlipPerfMode:bdw,chv */
132         WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
133
134         /* WaDisablePartialInstShootdown:bdw,chv */
135         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
136                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
137
138         /* Use Force Non-Coherent whenever executing a 3D context. This is a
139          * workaround for for a possible hang in the unlikely event a TLB
140          * invalidation occurs during a PSD flush.
141          */
142         /* WaForceEnableNonCoherent:bdw,chv */
143         /* WaHdcDisableFetchWhenMasked:bdw,chv */
144         WA_SET_BIT_MASKED(HDC_CHICKEN0,
145                           HDC_DONOT_FETCH_MEM_WHEN_MASKED |
146                           HDC_FORCE_NON_COHERENT);
147
148         /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
149          * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
150          *  polygons in the same 8x4 pixel/sample area to be processed without
151          *  stalling waiting for the earlier ones to write to Hierarchical Z
152          *  buffer."
153          *
154          * This optimization is off by default for BDW and CHV; turn it on.
155          */
156         WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
157
158         /* Wa4x4STCOptimizationDisable:bdw,chv */
159         WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
160
161         /*
162          * BSpec recommends 8x4 when MSAA is used,
163          * however in practice 16x4 seems fastest.
164          *
165          * Note that PS/WM thread counts depend on the WIZ hashing
166          * disable bit, which we don't touch here, but it's good
167          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
168          */
169         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
170                             GEN6_WIZ_HASHING_MASK,
171                             GEN6_WIZ_HASHING_16x4);
172
173         return 0;
174 }
175
176 static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
177 {
178         int ret;
179
180         ret = gen8_ctx_workarounds_init(dev_priv);
181         if (ret)
182                 return ret;
183
184         /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
185         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
186
187         /* WaDisableDopClockGating:bdw
188          *
189          * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
190          * to disable EUTC clock gating.
191          */
192         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
193                           DOP_CLOCK_GATING_DISABLE);
194
195         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
196                           GEN8_SAMPLER_POWER_BYPASS_DIS);
197
198         WA_SET_BIT_MASKED(HDC_CHICKEN0,
199                           /* WaForceContextSaveRestoreNonCoherent:bdw */
200                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
201                           /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
202                           (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
203
204         return 0;
205 }
206
207 static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
208 {
209         int ret;
210
211         ret = gen8_ctx_workarounds_init(dev_priv);
212         if (ret)
213                 return ret;
214
215         /* WaDisableThreadStallDopClockGating:chv */
216         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
217
218         /* Improve HiZ throughput on CHV. */
219         WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
220
221         return 0;
222 }
223
224 static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
225 {
226         if (HAS_LLC(dev_priv)) {
227                 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
228                  *
229                  * Must match Display Engine. See
230                  * WaCompressedResourceDisplayNewHashMode.
231                  */
232                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
233                                   GEN9_PBE_COMPRESSED_HASH_SELECTION);
234                 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
235                                   GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
236         }
237
238         /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
239         /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
240         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
241                           FLOW_CONTROL_ENABLE |
242                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
243
244         /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
245         if (!IS_COFFEELAKE(dev_priv))
246                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
247                                   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
248
249         /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
250         /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
251         WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
252                           GEN9_ENABLE_YV12_BUGFIX |
253                           GEN9_ENABLE_GPGPU_PREEMPTION);
254
255         /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
256         /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
257         WA_SET_BIT_MASKED(CACHE_MODE_1,
258                           GEN8_4x4_STC_OPTIMIZATION_DISABLE |
259                           GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
260
261         /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
262         WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
263                           GEN9_CCS_TLB_PREFETCH_ENABLE);
264
265         /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
266         WA_SET_BIT_MASKED(HDC_CHICKEN0,
267                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
268                           HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
269
270         /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
271          * both tied to WaForceContextSaveRestoreNonCoherent
272          * in some hsds for skl. We keep the tie for all gen9. The
273          * documentation is a bit hazy and so we want to get common behaviour,
274          * even though there is no clear evidence we would need both on kbl/bxt.
275          * This area has been source of system hangs so we play it safe
276          * and mimic the skl regardless of what bspec says.
277          *
278          * Use Force Non-Coherent whenever executing a 3D context. This
279          * is a workaround for a possible hang in the unlikely event
280          * a TLB invalidation occurs during a PSD flush.
281          */
282
283         /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
284         WA_SET_BIT_MASKED(HDC_CHICKEN0,
285                           HDC_FORCE_NON_COHERENT);
286
287         /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
288         if (IS_SKYLAKE(dev_priv) ||
289             IS_KABYLAKE(dev_priv) ||
290             IS_COFFEELAKE(dev_priv))
291                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
292                                   GEN8_SAMPLER_POWER_BYPASS_DIS);
293
294         /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
295         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
296
297         /*
298          * Supporting preemption with fine-granularity requires changes in the
299          * batch buffer programming. Since we can't break old userspace, we
300          * need to set our default preemption level to safe value. Userspace is
301          * still able to use more fine-grained preemption levels, since in
302          * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
303          * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
304          * not real HW workarounds, but merely a way to start using preemption
305          * while maintaining old contract with userspace.
306          */
307
308         /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
309         WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
310
311         /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
312         WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
313                             GEN9_PREEMPT_GPGPU_LEVEL_MASK,
314                             GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
315
316         /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
317         if (IS_GEN9_LP(dev_priv))
318                 WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
319
320         return 0;
321 }
322
323 static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
324 {
325         u8 vals[3] = { 0, 0, 0 };
326         unsigned int i;
327
328         for (i = 0; i < 3; i++) {
329                 u8 ss;
330
331                 /*
332                  * Only consider slices where one, and only one, subslice has 7
333                  * EUs
334                  */
335                 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
336                         continue;
337
338                 /*
339                  * subslice_7eu[i] != 0 (because of the check above) and
340                  * ss_max == 4 (maximum number of subslices possible per slice)
341                  *
342                  * ->    0 <= ss <= 3;
343                  */
344                 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
345                 vals[i] = 3 - ss;
346         }
347
348         if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
349                 return 0;
350
351         /* Tune IZ hashing. See intel_device_info_runtime_init() */
352         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
353                             GEN9_IZ_HASHING_MASK(2) |
354                             GEN9_IZ_HASHING_MASK(1) |
355                             GEN9_IZ_HASHING_MASK(0),
356                             GEN9_IZ_HASHING(2, vals[2]) |
357                             GEN9_IZ_HASHING(1, vals[1]) |
358                             GEN9_IZ_HASHING(0, vals[0]));
359
360         return 0;
361 }
362
363 static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
364 {
365         int ret;
366
367         ret = gen9_ctx_workarounds_init(dev_priv);
368         if (ret)
369                 return ret;
370
371         return skl_tune_iz_hashing(dev_priv);
372 }
373
374 static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
375 {
376         int ret;
377
378         ret = gen9_ctx_workarounds_init(dev_priv);
379         if (ret)
380                 return ret;
381
382         /* WaDisableThreadStallDopClockGating:bxt */
383         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
384                           STALL_DOP_GATING_DISABLE);
385
386         /* WaToEnableHwFixForPushConstHWBug:bxt */
387         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
388                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
389
390         return 0;
391 }
392
393 static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
394 {
395         int ret;
396
397         ret = gen9_ctx_workarounds_init(dev_priv);
398         if (ret)
399                 return ret;
400
401         /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
402         if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
403                 WA_SET_BIT_MASKED(HDC_CHICKEN0,
404                                   HDC_FENCE_DEST_SLM_DISABLE);
405
406         /* WaToEnableHwFixForPushConstHWBug:kbl */
407         if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
408                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
409                                   GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
410
411         /* WaDisableSbeCacheDispatchPortSharing:kbl */
412         WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
413                           GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
414
415         return 0;
416 }
417
418 static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
419 {
420         int ret;
421
422         ret = gen9_ctx_workarounds_init(dev_priv);
423         if (ret)
424                 return ret;
425
426         /* WaToEnableHwFixForPushConstHWBug:glk */
427         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
428                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
429
430         return 0;
431 }
432
433 static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
434 {
435         int ret;
436
437         ret = gen9_ctx_workarounds_init(dev_priv);
438         if (ret)
439                 return ret;
440
441         /* WaToEnableHwFixForPushConstHWBug:cfl */
442         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
443                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
444
445         /* WaDisableSbeCacheDispatchPortSharing:cfl */
446         WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
447                           GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
448
449         return 0;
450 }
451
452 static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
453 {
454         /* WaForceContextSaveRestoreNonCoherent:cnl */
455         WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
456                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
457
458         /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
459         if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
460                 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
461
462         /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
463         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
464                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
465
466         /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
467         if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
468                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
469                                   GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
470
471         /* WaPushConstantDereferenceHoldDisable:cnl */
472         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
473
474         /* FtrEnableFastAnisoL1BankingFix:cnl */
475         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
476
477         /* WaDisable3DMidCmdPreemption:cnl */
478         WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
479
480         /* WaDisableGPGPUMidCmdPreemption:cnl */
481         WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
482                             GEN9_PREEMPT_GPGPU_LEVEL_MASK,
483                             GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
484
485         /* WaDisableEarlyEOT:cnl */
486         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
487
488         return 0;
489 }
490
491 static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
492 {
493         /* Wa_1604370585:icl (pre-prod)
494          * Formerly known as WaPushConstantDereferenceHoldDisable
495          */
496         if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
497                 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
498                                   PUSH_CONSTANT_DEREF_DISABLE);
499
500         /* WaForceEnableNonCoherent:icl
501          * This is not the same workaround as in early Gen9 platforms, where
502          * lacking this could cause system hangs, but coherency performance
503          * overhead is high and only a few compute workloads really need it
504          * (the register is whitelisted in hardware now, so UMDs can opt in
505          * for coherency if they have a good reason).
506          */
507         WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
508
509         /* Wa_2006611047:icl (pre-prod)
510          * Formerly known as WaDisableImprovedTdlClkGating
511          */
512         if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
513                 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
514                                   GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
515
516         /* WaEnableStateCacheRedirectToCS:icl */
517         WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
518                           GEN11_STATE_CACHE_REDIRECT_TO_CS);
519
520         /* Wa_2006665173:icl (pre-prod) */
521         if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
522                 WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
523                                   GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
524
525         return 0;
526 }
527
528 int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
529 {
530         int err = 0;
531
532         dev_priv->workarounds.count = 0;
533
534         if (INTEL_GEN(dev_priv) < 8)
535                 err = 0;
536         else if (IS_BROADWELL(dev_priv))
537                 err = bdw_ctx_workarounds_init(dev_priv);
538         else if (IS_CHERRYVIEW(dev_priv))
539                 err = chv_ctx_workarounds_init(dev_priv);
540         else if (IS_SKYLAKE(dev_priv))
541                 err = skl_ctx_workarounds_init(dev_priv);
542         else if (IS_BROXTON(dev_priv))
543                 err = bxt_ctx_workarounds_init(dev_priv);
544         else if (IS_KABYLAKE(dev_priv))
545                 err = kbl_ctx_workarounds_init(dev_priv);
546         else if (IS_GEMINILAKE(dev_priv))
547                 err = glk_ctx_workarounds_init(dev_priv);
548         else if (IS_COFFEELAKE(dev_priv))
549                 err = cfl_ctx_workarounds_init(dev_priv);
550         else if (IS_CANNONLAKE(dev_priv))
551                 err = cnl_ctx_workarounds_init(dev_priv);
552         else if (IS_ICELAKE(dev_priv))
553                 err = icl_ctx_workarounds_init(dev_priv);
554         else
555                 MISSING_CASE(INTEL_GEN(dev_priv));
556         if (err)
557                 return err;
558
559         DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
560                          dev_priv->workarounds.count);
561         return 0;
562 }
563
564 int intel_ctx_workarounds_emit(struct i915_request *rq)
565 {
566         struct i915_workarounds *w = &rq->i915->workarounds;
567         u32 *cs;
568         int ret, i;
569
570         if (w->count == 0)
571                 return 0;
572
573         ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
574         if (ret)
575                 return ret;
576
577         cs = intel_ring_begin(rq, (w->count * 2 + 2));
578         if (IS_ERR(cs))
579                 return PTR_ERR(cs);
580
581         *cs++ = MI_LOAD_REGISTER_IMM(w->count);
582         for (i = 0; i < w->count; i++) {
583                 *cs++ = w->reg[i].addr;
584                 *cs++ = w->reg[i].value;
585         }
586         *cs++ = MI_NOOP;
587
588         intel_ring_advance(rq, cs);
589
590         ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
591         if (ret)
592                 return ret;
593
594         return 0;
595 }
596
597 static void
598 wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
599 {
600         const unsigned int grow = 1 << 4;
601
602         GEM_BUG_ON(!is_power_of_2(grow));
603
604         if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
605                 struct i915_wa *list;
606
607                 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
608                                      GFP_KERNEL);
609                 if (!list) {
610                         DRM_ERROR("No space for workaround init!\n");
611                         return;
612                 }
613
614                 if (wal->list)
615                         memcpy(list, wal->list, sizeof(*wa) * wal->count);
616
617                 wal->list = list;
618         }
619
620         wal->list[wal->count++] = *wa;
621 }
622
623 static void
624 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
625 {
626         struct i915_wa wa = {
627                 .reg = reg,
628                 .mask = val,
629                 .val = _MASKED_BIT_ENABLE(val)
630         };
631
632         wal_add(wal, &wa);
633 }
634
635 static void
636 wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
637                    u32 val)
638 {
639         struct i915_wa wa = {
640                 .reg = reg,
641                 .mask = mask,
642                 .val = val
643         };
644
645         wal_add(wal, &wa);
646 }
647
648 static void
649 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
650 {
651         wa_write_masked_or(wal, reg, ~0, val);
652 }
653
654 static void
655 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
656 {
657         wa_write_masked_or(wal, reg, val, val);
658 }
659
660 static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
661 {
662         struct i915_wa_list *wal = &i915->gt_wa_list;
663
664         /* WaDisableKillLogic:bxt,skl,kbl */
665         if (!IS_COFFEELAKE(i915))
666                 wa_write_or(wal,
667                             GAM_ECOCHK,
668                             ECOCHK_DIS_TLB);
669
670         if (HAS_LLC(i915)) {
671                 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
672                  *
673                  * Must match Display Engine. See
674                  * WaCompressedResourceDisplayNewHashMode.
675                  */
676                 wa_write_or(wal,
677                             MMCD_MISC_CTRL,
678                             MMCD_PCLA | MMCD_HOTSPOT_EN);
679         }
680
681         /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
682         wa_write_or(wal,
683                     GAM_ECOCHK,
684                     BDW_DISABLE_HDC_INVALIDATION);
685 }
686
687 static void skl_gt_workarounds_init(struct drm_i915_private *i915)
688 {
689         struct i915_wa_list *wal = &i915->gt_wa_list;
690
691         gen9_gt_workarounds_init(i915);
692
693         /* WaDisableGafsUnitClkGating:skl */
694         wa_write_or(wal,
695                     GEN7_UCGCTL4,
696                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
697
698         /* WaInPlaceDecompressionHang:skl */
699         if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
700                 wa_write_or(wal,
701                             GEN9_GAMT_ECO_REG_RW_IA,
702                             GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
703 }
704
705 static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
706 {
707         struct i915_wa_list *wal = &i915->gt_wa_list;
708
709         gen9_gt_workarounds_init(i915);
710
711         /* WaInPlaceDecompressionHang:bxt */
712         wa_write_or(wal,
713                     GEN9_GAMT_ECO_REG_RW_IA,
714                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
715 }
716
717 static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
718 {
719         struct i915_wa_list *wal = &i915->gt_wa_list;
720
721         gen9_gt_workarounds_init(i915);
722
723         /* WaDisableDynamicCreditSharing:kbl */
724         if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
725                 wa_write_or(wal,
726                             GAMT_CHKN_BIT_REG,
727                             GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
728
729         /* WaDisableGafsUnitClkGating:kbl */
730         wa_write_or(wal,
731                     GEN7_UCGCTL4,
732                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
733
734         /* WaInPlaceDecompressionHang:kbl */
735         wa_write_or(wal,
736                     GEN9_GAMT_ECO_REG_RW_IA,
737                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
738 }
739
740 static void glk_gt_workarounds_init(struct drm_i915_private *i915)
741 {
742         gen9_gt_workarounds_init(i915);
743 }
744
745 static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
746 {
747         struct i915_wa_list *wal = &i915->gt_wa_list;
748
749         gen9_gt_workarounds_init(i915);
750
751         /* WaDisableGafsUnitClkGating:cfl */
752         wa_write_or(wal,
753                     GEN7_UCGCTL4,
754                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
755
756         /* WaInPlaceDecompressionHang:cfl */
757         wa_write_or(wal,
758                     GEN9_GAMT_ECO_REG_RW_IA,
759                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
760 }
761
762 static void wa_init_mcr(struct drm_i915_private *dev_priv)
763 {
764         const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
765         struct i915_wa_list *wal = &dev_priv->gt_wa_list;
766         u32 mcr_slice_subslice_mask;
767
768         /*
769          * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
770          * L3Banks could be fused off in single slice scenario. If that is
771          * the case, we might need to program MCR select to a valid L3Bank
772          * by default, to make sure we correctly read certain registers
773          * later on (in the range 0xB100 - 0xB3FF).
774          * This might be incompatible with
775          * WaProgramMgsrForCorrectSliceSpecificMmioReads.
776          * Fortunately, this should not happen in production hardware, so
777          * we only assert that this is the case (instead of implementing
778          * something more complex that requires checking the range of every
779          * MMIO read).
780          */
781         if (INTEL_GEN(dev_priv) >= 10 &&
782             is_power_of_2(sseu->slice_mask)) {
783                 /*
784                  * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
785                  * enabled subslice, no need to redirect MCR packet
786                  */
787                 u32 slice = fls(sseu->slice_mask);
788                 u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
789                 u8 ss_mask = sseu->subslice_mask[slice];
790
791                 u8 enabled_mask = (ss_mask | ss_mask >>
792                                    GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
793                 u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
794
795                 /*
796                  * Production silicon should have matched L3Bank and
797                  * subslice enabled
798                  */
799                 WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
800         }
801
802         if (INTEL_GEN(dev_priv) >= 11)
803                 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
804                                           GEN11_MCR_SUBSLICE_MASK;
805         else
806                 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
807                                           GEN8_MCR_SUBSLICE_MASK;
808         /*
809          * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
810          * Before any MMIO read into slice/subslice specific registers, MCR
811          * packet control register needs to be programmed to point to any
812          * enabled s/ss pair. Otherwise, incorrect values will be returned.
813          * This means each subsequent MMIO read will be forwarded to an
814          * specific s/ss combination, but this is OK since these registers
815          * are consistent across s/ss in almost all cases. In the rare
816          * occasions, such as INSTDONE, where this value is dependent
817          * on s/ss combo, the read should be done with read_subslice_reg.
818          */
819         wa_write_masked_or(wal,
820                            GEN8_MCR_SELECTOR,
821                            mcr_slice_subslice_mask,
822                            intel_calculate_mcr_s_ss_select(dev_priv));
823 }
824
825 static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
826 {
827         struct i915_wa_list *wal = &i915->gt_wa_list;
828
829         wa_init_mcr(i915);
830
831         /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
832         if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
833                 wa_write_or(wal,
834                             GAMT_CHKN_BIT_REG,
835                             GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
836
837         /* WaInPlaceDecompressionHang:cnl */
838         wa_write_or(wal,
839                     GEN9_GAMT_ECO_REG_RW_IA,
840                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
841 }
842
843 static void icl_gt_workarounds_init(struct drm_i915_private *i915)
844 {
845         struct i915_wa_list *wal = &i915->gt_wa_list;
846
847         wa_init_mcr(i915);
848
849         /* WaInPlaceDecompressionHang:icl */
850         wa_write_or(wal,
851                     GEN9_GAMT_ECO_REG_RW_IA,
852                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
853
854         /* WaModifyGamTlbPartitioning:icl */
855         wa_write_masked_or(wal,
856                            GEN11_GACB_PERF_CTRL,
857                            GEN11_HASH_CTRL_MASK,
858                            GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
859
860         /* Wa_1405766107:icl
861          * Formerly known as WaCL2SFHalfMaxAlloc
862          */
863         wa_write_or(wal,
864                     GEN11_LSN_UNSLCVC,
865                     GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
866                     GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
867
868         /* Wa_220166154:icl
869          * Formerly known as WaDisCtxReload
870          */
871         wa_write_or(wal,
872                     GEN8_GAMW_ECO_DEV_RW_IA,
873                     GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
874
875         /* Wa_1405779004:icl (pre-prod) */
876         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
877                 wa_write_or(wal,
878                             SLICE_UNIT_LEVEL_CLKGATE,
879                             MSCUNIT_CLKGATE_DIS);
880
881         /* Wa_1406680159:icl */
882         wa_write_or(wal,
883                     SUBSLICE_UNIT_LEVEL_CLKGATE,
884                     GWUNIT_CLKGATE_DIS);
885
886         /* Wa_1406838659:icl (pre-prod) */
887         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
888                 wa_write_or(wal,
889                             INF_UNIT_LEVEL_CLKGATE,
890                             CGPSF_CLKGATE_DIS);
891
892         /* Wa_1406463099:icl
893          * Formerly known as WaGamTlbPendError
894          */
895         wa_write_or(wal,
896                     GAMT_CHKN_BIT_REG,
897                     GAMT_CHKN_DISABLE_L3_COH_PIPE);
898 }
899
900 void intel_gt_init_workarounds(struct drm_i915_private *i915)
901 {
902         struct i915_wa_list *wal = &i915->gt_wa_list;
903
904         wa_init_start(wal, "GT");
905
906         if (INTEL_GEN(i915) < 8)
907                 return;
908         else if (IS_BROADWELL(i915))
909                 return;
910         else if (IS_CHERRYVIEW(i915))
911                 return;
912         else if (IS_SKYLAKE(i915))
913                 skl_gt_workarounds_init(i915);
914         else if (IS_BROXTON(i915))
915                 bxt_gt_workarounds_init(i915);
916         else if (IS_KABYLAKE(i915))
917                 kbl_gt_workarounds_init(i915);
918         else if (IS_GEMINILAKE(i915))
919                 glk_gt_workarounds_init(i915);
920         else if (IS_COFFEELAKE(i915))
921                 cfl_gt_workarounds_init(i915);
922         else if (IS_CANNONLAKE(i915))
923                 cnl_gt_workarounds_init(i915);
924         else if (IS_ICELAKE(i915))
925                 icl_gt_workarounds_init(i915);
926         else
927                 MISSING_CASE(INTEL_GEN(i915));
928
929         wa_init_finish(wal);
930 }
931
932 static enum forcewake_domains
933 wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
934                    const struct i915_wa_list *wal)
935 {
936         enum forcewake_domains fw = 0;
937         struct i915_wa *wa;
938         unsigned int i;
939
940         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
941                 fw |= intel_uncore_forcewake_for_reg(dev_priv,
942                                                      wa->reg,
943                                                      FW_REG_READ |
944                                                      FW_REG_WRITE);
945
946         return fw;
947 }
948
949 static void
950 wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
951 {
952         enum forcewake_domains fw;
953         unsigned long flags;
954         struct i915_wa *wa;
955         unsigned int i;
956
957         if (!wal->count)
958                 return;
959
960         fw = wal_get_fw_for_rmw(dev_priv, wal);
961
962         spin_lock_irqsave(&dev_priv->uncore.lock, flags);
963         intel_uncore_forcewake_get__locked(dev_priv, fw);
964
965         for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
966                 u32 val = I915_READ_FW(wa->reg);
967
968                 val &= ~wa->mask;
969                 val |= wa->val;
970
971                 I915_WRITE_FW(wa->reg, val);
972         }
973
974         intel_uncore_forcewake_put__locked(dev_priv, fw);
975         spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
976
977         DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
978 }
979
980 void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
981 {
982         wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
983 }
984
985 struct whitelist {
986         i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
987         unsigned int count;
988         u32 nopid;
989 };
990
991 static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
992 {
993         if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
994                 return;
995
996         w->reg[w->count++] = reg;
997 }
998
999 static void bdw_whitelist_build(struct whitelist *w)
1000 {
1001 }
1002
1003 static void chv_whitelist_build(struct whitelist *w)
1004 {
1005 }
1006
1007 static void gen9_whitelist_build(struct whitelist *w)
1008 {
1009         /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1010         whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1011
1012         /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1013         whitelist_reg(w, GEN8_CS_CHICKEN1);
1014
1015         /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1016         whitelist_reg(w, GEN8_HDC_CHICKEN1);
1017 }
1018
1019 static void skl_whitelist_build(struct whitelist *w)
1020 {
1021         gen9_whitelist_build(w);
1022
1023         /* WaDisableLSQCROPERFforOCL:skl */
1024         whitelist_reg(w, GEN8_L3SQCREG4);
1025 }
1026
1027 static void bxt_whitelist_build(struct whitelist *w)
1028 {
1029         gen9_whitelist_build(w);
1030 }
1031
1032 static void kbl_whitelist_build(struct whitelist *w)
1033 {
1034         gen9_whitelist_build(w);
1035
1036         /* WaDisableLSQCROPERFforOCL:kbl */
1037         whitelist_reg(w, GEN8_L3SQCREG4);
1038 }
1039
1040 static void glk_whitelist_build(struct whitelist *w)
1041 {
1042         gen9_whitelist_build(w);
1043
1044         /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1045         whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1046 }
1047
1048 static void cfl_whitelist_build(struct whitelist *w)
1049 {
1050         gen9_whitelist_build(w);
1051 }
1052
1053 static void cnl_whitelist_build(struct whitelist *w)
1054 {
1055         /* WaEnablePreemptionGranularityControlByUMD:cnl */
1056         whitelist_reg(w, GEN8_CS_CHICKEN1);
1057 }
1058
1059 static void icl_whitelist_build(struct whitelist *w)
1060 {
1061 }
1062
1063 static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
1064                                          struct whitelist *w)
1065 {
1066         struct drm_i915_private *i915 = engine->i915;
1067
1068         GEM_BUG_ON(engine->id != RCS);
1069
1070         w->count = 0;
1071         w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
1072
1073         if (INTEL_GEN(i915) < 8)
1074                 return NULL;
1075         else if (IS_BROADWELL(i915))
1076                 bdw_whitelist_build(w);
1077         else if (IS_CHERRYVIEW(i915))
1078                 chv_whitelist_build(w);
1079         else if (IS_SKYLAKE(i915))
1080                 skl_whitelist_build(w);
1081         else if (IS_BROXTON(i915))
1082                 bxt_whitelist_build(w);
1083         else if (IS_KABYLAKE(i915))
1084                 kbl_whitelist_build(w);
1085         else if (IS_GEMINILAKE(i915))
1086                 glk_whitelist_build(w);
1087         else if (IS_COFFEELAKE(i915))
1088                 cfl_whitelist_build(w);
1089         else if (IS_CANNONLAKE(i915))
1090                 cnl_whitelist_build(w);
1091         else if (IS_ICELAKE(i915))
1092                 icl_whitelist_build(w);
1093         else
1094                 MISSING_CASE(INTEL_GEN(i915));
1095
1096         return w;
1097 }
1098
1099 static void whitelist_apply(struct intel_engine_cs *engine,
1100                             const struct whitelist *w)
1101 {
1102         struct drm_i915_private *dev_priv = engine->i915;
1103         const u32 base = engine->mmio_base;
1104         unsigned int i;
1105
1106         if (!w)
1107                 return;
1108
1109         intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
1110
1111         for (i = 0; i < w->count; i++)
1112                 I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
1113                               i915_mmio_reg_offset(w->reg[i]));
1114
1115         /* And clear the rest just in case of garbage */
1116         for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1117                 I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
1118
1119         intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
1120 }
1121
1122 void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
1123 {
1124         struct whitelist w;
1125
1126         whitelist_apply(engine, whitelist_build(engine, &w));
1127 }
1128
1129 static void rcs_engine_wa_init(struct intel_engine_cs *engine)
1130 {
1131         struct drm_i915_private *i915 = engine->i915;
1132         struct i915_wa_list *wal = &engine->wa_list;
1133
1134         if (IS_ICELAKE(i915)) {
1135                 /* This is not an Wa. Enable for better image quality */
1136                 wa_masked_en(wal,
1137                              _3D_CHICKEN3,
1138                              _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
1139
1140                 /* WaPipelineFlushCoherentLines:icl */
1141                 wa_write_or(wal,
1142                             GEN8_L3SQCREG4,
1143                             GEN8_LQSC_FLUSH_COHERENT_LINES);
1144
1145                 /*
1146                  * Wa_1405543622:icl
1147                  * Formerly known as WaGAPZPriorityScheme
1148                  */
1149                 wa_write_or(wal,
1150                             GEN8_GARBCNTL,
1151                             GEN11_ARBITRATION_PRIO_ORDER_MASK);
1152
1153                 /*
1154                  * Wa_1604223664:icl
1155                  * Formerly known as WaL3BankAddressHashing
1156                  */
1157                 wa_write_masked_or(wal,
1158                                    GEN8_GARBCNTL,
1159                                    GEN11_HASH_CTRL_EXCL_MASK,
1160                                    GEN11_HASH_CTRL_EXCL_BIT0);
1161                 wa_write_masked_or(wal,
1162                                    GEN11_GLBLINVL,
1163                                    GEN11_BANK_HASH_ADDR_EXCL_MASK,
1164                                    GEN11_BANK_HASH_ADDR_EXCL_BIT0);
1165
1166                 /*
1167                  * Wa_1405733216:icl
1168                  * Formerly known as WaDisableCleanEvicts
1169                  */
1170                 wa_write_or(wal,
1171                             GEN8_L3SQCREG4,
1172                             GEN11_LQSC_CLEAN_EVICT_DISABLE);
1173
1174                 /* Wa_1604302699:icl */
1175                 wa_write_or(wal,
1176                             GEN10_L3_CHICKEN_MODE_REGISTER,
1177                             GEN11_I2M_WRITE_DISABLE);
1178
1179                 /* WaForwardProgressSoftReset:icl */
1180                 wa_write_or(wal,
1181                             GEN10_SCRATCH_LNCF2,
1182                             PMFLUSHDONE_LNICRSDROP |
1183                             PMFLUSH_GAPL3UNBLOCK |
1184                             PMFLUSHDONE_LNEBLK);
1185         }
1186
1187         if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
1188                 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
1189                 wa_masked_en(wal,
1190                              GEN7_FF_SLICE_CS_CHICKEN1,
1191                              GEN9_FFSC_PERCTX_PREEMPT_CTRL);
1192         }
1193
1194         if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
1195                 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
1196                 wa_write_or(wal,
1197                             GEN8_GARBCNTL,
1198                             GEN9_GAPS_TSV_CREDIT_DISABLE);
1199         }
1200
1201         if (IS_BROXTON(i915)) {
1202                 /* WaDisablePooledEuLoadBalancingFix:bxt */
1203                 wa_masked_en(wal,
1204                              FF_SLICE_CS_CHICKEN2,
1205                              GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1206         }
1207
1208         if (IS_GEN9(i915)) {
1209                 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
1210                 wa_masked_en(wal,
1211                              GEN9_CSFE_CHICKEN1_RCS,
1212                              GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
1213
1214                 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
1215                 wa_write_or(wal,
1216                             BDW_SCRATCH1,
1217                             GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
1218
1219                 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
1220                 if (IS_GEN9_LP(i915))
1221                         wa_write_masked_or(wal,
1222                                            GEN8_L3SQCREG1,
1223                                            L3_PRIO_CREDITS_MASK,
1224                                            L3_GENERAL_PRIO_CREDITS(62) |
1225                                            L3_HIGH_PRIO_CREDITS(2));
1226
1227                 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
1228                 wa_write_or(wal,
1229                             GEN8_L3SQCREG4,
1230                             GEN8_LQSC_FLUSH_COHERENT_LINES);
1231         }
1232 }
1233
1234 static void xcs_engine_wa_init(struct intel_engine_cs *engine)
1235 {
1236         struct drm_i915_private *i915 = engine->i915;
1237         struct i915_wa_list *wal = &engine->wa_list;
1238
1239         /* WaKBLVECSSemaphoreWaitPoll:kbl */
1240         if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
1241                 wa_write(wal,
1242                          RING_SEMA_WAIT_POLL(engine->mmio_base),
1243                          1);
1244         }
1245 }
1246
1247 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
1248 {
1249         struct i915_wa_list *wal = &engine->wa_list;
1250
1251         if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
1252                 return;
1253
1254         wa_init_start(wal, engine->name);
1255
1256         if (engine->id == RCS)
1257                 rcs_engine_wa_init(engine);
1258         else
1259                 xcs_engine_wa_init(engine);
1260
1261         wa_init_finish(wal);
1262 }
1263
1264 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
1265 {
1266         wa_list_apply(engine->i915, &engine->wa_list);
1267 }
1268
1269 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1270 #include "selftests/intel_workarounds.c"
1271 #endif