Merge drm/drm-next into drm-intel-next-queued
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / intel_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2018 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "intel_context.h"
9 #include "intel_gt.h"
10 #include "intel_workarounds.h"
11
12 /**
13  * DOC: Hardware workarounds
14  *
15  * This file is intended as a central place to implement most [1]_ of the
16  * required workarounds for hardware to work as originally intended. They fall
17  * in five basic categories depending on how/when they are applied:
18  *
19  * - Workarounds that touch registers that are saved/restored to/from the HW
20  *   context image. The list is emitted (via Load Register Immediate commands)
21  *   everytime a new context is created.
22  * - GT workarounds. The list of these WAs is applied whenever these registers
23  *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
24  * - Display workarounds. The list is applied during display clock-gating
25  *   initialization.
26  * - Workarounds that whitelist a privileged register, so that UMDs can manage
27  *   them directly. This is just a special case of a MMMIO workaround (as we
28  *   write the list of these to/be-whitelisted registers to some special HW
29  *   registers).
30  * - Workaround batchbuffers, that get executed automatically by the hardware
31  *   on every HW context restore.
32  *
33  * .. [1] Please notice that there are other WAs that, due to their nature,
34  *    cannot be applied from a central place. Those are peppered around the rest
35  *    of the code, as needed.
36  *
37  * .. [2] Technically, some registers are powercontext saved & restored, so they
38  *    survive a suspend/resume. In practice, writing them again is not too
39  *    costly and simplifies things. We can revisit this in the future.
40  *
41  * Layout
42  * ~~~~~~
43  *
44  * Keep things in this file ordered by WA type, as per the above (context, GT,
45  * display, register whitelist, batchbuffer). Then, inside each type, keep the
46  * following order:
47  *
48  * - Infrastructure functions and macros
49  * - WAs per platform in standard gen/chrono order
50  * - Public functions to init or apply the given workaround type.
51  */
52
53 static void wa_init_start(struct i915_wa_list *wal, const char *name)
54 {
55         wal->name = name;
56 }
57
58 #define WA_LIST_CHUNK (1 << 4)
59
60 static void wa_init_finish(struct i915_wa_list *wal)
61 {
62         /* Trim unused entries. */
63         if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
64                 struct i915_wa *list = kmemdup(wal->list,
65                                                wal->count * sizeof(*list),
66                                                GFP_KERNEL);
67
68                 if (list) {
69                         kfree(wal->list);
70                         wal->list = list;
71                 }
72         }
73
74         if (!wal->count)
75                 return;
76
77         DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
78                          wal->wa_count, wal->name);
79 }
80
81 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
82 {
83         unsigned int addr = i915_mmio_reg_offset(wa->reg);
84         unsigned int start = 0, end = wal->count;
85         const unsigned int grow = WA_LIST_CHUNK;
86         struct i915_wa *wa_;
87
88         GEM_BUG_ON(!is_power_of_2(grow));
89
90         if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
91                 struct i915_wa *list;
92
93                 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
94                                      GFP_KERNEL);
95                 if (!list) {
96                         DRM_ERROR("No space for workaround init!\n");
97                         return;
98                 }
99
100                 if (wal->list)
101                         memcpy(list, wal->list, sizeof(*wa) * wal->count);
102
103                 wal->list = list;
104         }
105
106         while (start < end) {
107                 unsigned int mid = start + (end - start) / 2;
108
109                 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
110                         start = mid + 1;
111                 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
112                         end = mid;
113                 } else {
114                         wa_ = &wal->list[mid];
115
116                         if ((wa->mask & ~wa_->mask) == 0) {
117                                 DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
118                                           i915_mmio_reg_offset(wa_->reg),
119                                           wa_->mask, wa_->val);
120
121                                 wa_->val &= ~wa->mask;
122                         }
123
124                         wal->wa_count++;
125                         wa_->val |= wa->val;
126                         wa_->mask |= wa->mask;
127                         wa_->read |= wa->read;
128                         return;
129                 }
130         }
131
132         wal->wa_count++;
133         wa_ = &wal->list[wal->count++];
134         *wa_ = *wa;
135
136         while (wa_-- > wal->list) {
137                 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
138                            i915_mmio_reg_offset(wa_[1].reg));
139                 if (i915_mmio_reg_offset(wa_[1].reg) >
140                     i915_mmio_reg_offset(wa_[0].reg))
141                         break;
142
143                 swap(wa_[1], wa_[0]);
144         }
145 }
146
147 static void
148 wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
149                    u32 val)
150 {
151         struct i915_wa wa = {
152                 .reg  = reg,
153                 .mask = mask,
154                 .val  = val,
155                 .read = mask,
156         };
157
158         _wa_add(wal, &wa);
159 }
160
161 static void
162 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
163 {
164         wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
165 }
166
167 static void
168 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
169 {
170         wa_write_masked_or(wal, reg, ~0, val);
171 }
172
173 static void
174 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
175 {
176         wa_write_masked_or(wal, reg, val, val);
177 }
178
179 static void
180 ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
181 {
182         struct i915_wa wa = {
183                 .reg  = reg,
184                 .mask = mask,
185                 .val  = val,
186                 /* Bonkers HW, skip verifying */
187         };
188
189         _wa_add(wal, &wa);
190 }
191
192 #define WA_SET_BIT_MASKED(addr, mask) \
193         wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
194
195 #define WA_CLR_BIT_MASKED(addr, mask) \
196         wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
197
198 #define WA_SET_FIELD_MASKED(addr, mask, value) \
199         wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
200
201 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
202                                       struct i915_wa_list *wal)
203 {
204         WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
205
206         /* WaDisableAsyncFlipPerfMode:bdw,chv */
207         WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
208
209         /* WaDisablePartialInstShootdown:bdw,chv */
210         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
211                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
212
213         /* Use Force Non-Coherent whenever executing a 3D context. This is a
214          * workaround for for a possible hang in the unlikely event a TLB
215          * invalidation occurs during a PSD flush.
216          */
217         /* WaForceEnableNonCoherent:bdw,chv */
218         /* WaHdcDisableFetchWhenMasked:bdw,chv */
219         WA_SET_BIT_MASKED(HDC_CHICKEN0,
220                           HDC_DONOT_FETCH_MEM_WHEN_MASKED |
221                           HDC_FORCE_NON_COHERENT);
222
223         /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
224          * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
225          *  polygons in the same 8x4 pixel/sample area to be processed without
226          *  stalling waiting for the earlier ones to write to Hierarchical Z
227          *  buffer."
228          *
229          * This optimization is off by default for BDW and CHV; turn it on.
230          */
231         WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
232
233         /* Wa4x4STCOptimizationDisable:bdw,chv */
234         WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
235
236         /*
237          * BSpec recommends 8x4 when MSAA is used,
238          * however in practice 16x4 seems fastest.
239          *
240          * Note that PS/WM thread counts depend on the WIZ hashing
241          * disable bit, which we don't touch here, but it's good
242          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
243          */
244         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
245                             GEN6_WIZ_HASHING_MASK,
246                             GEN6_WIZ_HASHING_16x4);
247 }
248
249 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
250                                      struct i915_wa_list *wal)
251 {
252         struct drm_i915_private *i915 = engine->i915;
253
254         gen8_ctx_workarounds_init(engine, wal);
255
256         /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
257         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
258
259         /* WaDisableDopClockGating:bdw
260          *
261          * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
262          * to disable EUTC clock gating.
263          */
264         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
265                           DOP_CLOCK_GATING_DISABLE);
266
267         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
268                           GEN8_SAMPLER_POWER_BYPASS_DIS);
269
270         WA_SET_BIT_MASKED(HDC_CHICKEN0,
271                           /* WaForceContextSaveRestoreNonCoherent:bdw */
272                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
273                           /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
274                           (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
275 }
276
277 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
278                                      struct i915_wa_list *wal)
279 {
280         gen8_ctx_workarounds_init(engine, wal);
281
282         /* WaDisableThreadStallDopClockGating:chv */
283         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
284
285         /* Improve HiZ throughput on CHV. */
286         WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
287 }
288
289 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
290                                       struct i915_wa_list *wal)
291 {
292         struct drm_i915_private *i915 = engine->i915;
293
294         if (HAS_LLC(i915)) {
295                 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
296                  *
297                  * Must match Display Engine. See
298                  * WaCompressedResourceDisplayNewHashMode.
299                  */
300                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
301                                   GEN9_PBE_COMPRESSED_HASH_SELECTION);
302                 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
303                                   GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
304         }
305
306         /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
307         /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
308         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
309                           FLOW_CONTROL_ENABLE |
310                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
311
312         /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
313         if (!IS_COFFEELAKE(i915))
314                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
315                                   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
316
317         /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
318         /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
319         WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
320                           GEN9_ENABLE_YV12_BUGFIX |
321                           GEN9_ENABLE_GPGPU_PREEMPTION);
322
323         /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
324         /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
325         WA_SET_BIT_MASKED(CACHE_MODE_1,
326                           GEN8_4x4_STC_OPTIMIZATION_DISABLE |
327                           GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
328
329         /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
330         WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
331                           GEN9_CCS_TLB_PREFETCH_ENABLE);
332
333         /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
334         WA_SET_BIT_MASKED(HDC_CHICKEN0,
335                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
336                           HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
337
338         /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
339          * both tied to WaForceContextSaveRestoreNonCoherent
340          * in some hsds for skl. We keep the tie for all gen9. The
341          * documentation is a bit hazy and so we want to get common behaviour,
342          * even though there is no clear evidence we would need both on kbl/bxt.
343          * This area has been source of system hangs so we play it safe
344          * and mimic the skl regardless of what bspec says.
345          *
346          * Use Force Non-Coherent whenever executing a 3D context. This
347          * is a workaround for a possible hang in the unlikely event
348          * a TLB invalidation occurs during a PSD flush.
349          */
350
351         /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
352         WA_SET_BIT_MASKED(HDC_CHICKEN0,
353                           HDC_FORCE_NON_COHERENT);
354
355         /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
356         if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
357                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
358                                   GEN8_SAMPLER_POWER_BYPASS_DIS);
359
360         /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
361         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
362
363         /*
364          * Supporting preemption with fine-granularity requires changes in the
365          * batch buffer programming. Since we can't break old userspace, we
366          * need to set our default preemption level to safe value. Userspace is
367          * still able to use more fine-grained preemption levels, since in
368          * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
369          * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
370          * not real HW workarounds, but merely a way to start using preemption
371          * while maintaining old contract with userspace.
372          */
373
374         /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
375         WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
376
377         /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
378         WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
379                             GEN9_PREEMPT_GPGPU_LEVEL_MASK,
380                             GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
381
382         /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
383         if (IS_GEN9_LP(i915))
384                 WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
385 }
386
387 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
388                                 struct i915_wa_list *wal)
389 {
390         struct drm_i915_private *i915 = engine->i915;
391         u8 vals[3] = { 0, 0, 0 };
392         unsigned int i;
393
394         for (i = 0; i < 3; i++) {
395                 u8 ss;
396
397                 /*
398                  * Only consider slices where one, and only one, subslice has 7
399                  * EUs
400                  */
401                 if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
402                         continue;
403
404                 /*
405                  * subslice_7eu[i] != 0 (because of the check above) and
406                  * ss_max == 4 (maximum number of subslices possible per slice)
407                  *
408                  * ->    0 <= ss <= 3;
409                  */
410                 ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
411                 vals[i] = 3 - ss;
412         }
413
414         if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
415                 return;
416
417         /* Tune IZ hashing. See intel_device_info_runtime_init() */
418         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
419                             GEN9_IZ_HASHING_MASK(2) |
420                             GEN9_IZ_HASHING_MASK(1) |
421                             GEN9_IZ_HASHING_MASK(0),
422                             GEN9_IZ_HASHING(2, vals[2]) |
423                             GEN9_IZ_HASHING(1, vals[1]) |
424                             GEN9_IZ_HASHING(0, vals[0]));
425 }
426
427 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
428                                      struct i915_wa_list *wal)
429 {
430         gen9_ctx_workarounds_init(engine, wal);
431         skl_tune_iz_hashing(engine, wal);
432 }
433
434 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
435                                      struct i915_wa_list *wal)
436 {
437         gen9_ctx_workarounds_init(engine, wal);
438
439         /* WaDisableThreadStallDopClockGating:bxt */
440         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
441                           STALL_DOP_GATING_DISABLE);
442
443         /* WaToEnableHwFixForPushConstHWBug:bxt */
444         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
445                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
446 }
447
448 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
449                                      struct i915_wa_list *wal)
450 {
451         struct drm_i915_private *i915 = engine->i915;
452
453         gen9_ctx_workarounds_init(engine, wal);
454
455         /* WaToEnableHwFixForPushConstHWBug:kbl */
456         if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
457                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
458                                   GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
459
460         /* WaDisableSbeCacheDispatchPortSharing:kbl */
461         WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
462                           GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
463 }
464
465 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
466                                      struct i915_wa_list *wal)
467 {
468         gen9_ctx_workarounds_init(engine, wal);
469
470         /* WaToEnableHwFixForPushConstHWBug:glk */
471         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
472                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
473 }
474
475 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
476                                      struct i915_wa_list *wal)
477 {
478         gen9_ctx_workarounds_init(engine, wal);
479
480         /* WaToEnableHwFixForPushConstHWBug:cfl */
481         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
482                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
483
484         /* WaDisableSbeCacheDispatchPortSharing:cfl */
485         WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
486                           GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
487 }
488
489 static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
490                                      struct i915_wa_list *wal)
491 {
492         struct drm_i915_private *i915 = engine->i915;
493
494         /* WaForceContextSaveRestoreNonCoherent:cnl */
495         WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
496                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
497
498         /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
499         if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
500                 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
501
502         /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
503         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
504                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
505
506         /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
507         if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
508                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
509                                   GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
510
511         /* WaPushConstantDereferenceHoldDisable:cnl */
512         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
513
514         /* FtrEnableFastAnisoL1BankingFix:cnl */
515         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
516
517         /* WaDisable3DMidCmdPreemption:cnl */
518         WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
519
520         /* WaDisableGPGPUMidCmdPreemption:cnl */
521         WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
522                             GEN9_PREEMPT_GPGPU_LEVEL_MASK,
523                             GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
524
525         /* WaDisableEarlyEOT:cnl */
526         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
527 }
528
529 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
530                                      struct i915_wa_list *wal)
531 {
532         struct drm_i915_private *i915 = engine->i915;
533
534         /* WaDisableBankHangMode:icl */
535         wa_write(wal,
536                  GEN8_L3CNTLREG,
537                  intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
538                  GEN8_ERRDETBCTRL);
539
540         /* WaDisableBankHangMode:icl */
541         wa_write(wal,
542                  GEN8_L3CNTLREG,
543                  intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
544                  GEN8_ERRDETBCTRL);
545
546         /* Wa_1604370585:icl (pre-prod)
547          * Formerly known as WaPushConstantDereferenceHoldDisable
548          */
549         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
550                 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
551                                   PUSH_CONSTANT_DEREF_DISABLE);
552
553         /* WaForceEnableNonCoherent:icl
554          * This is not the same workaround as in early Gen9 platforms, where
555          * lacking this could cause system hangs, but coherency performance
556          * overhead is high and only a few compute workloads really need it
557          * (the register is whitelisted in hardware now, so UMDs can opt in
558          * for coherency if they have a good reason).
559          */
560         WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
561
562         /* Wa_2006611047:icl (pre-prod)
563          * Formerly known as WaDisableImprovedTdlClkGating
564          */
565         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
566                 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
567                                   GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
568
569         /* Wa_2006665173:icl (pre-prod) */
570         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
571                 WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
572                                   GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
573
574         /* WaEnableFloatBlendOptimization:icl */
575         wa_write_masked_or(wal,
576                            GEN10_CACHE_MODE_SS,
577                            0, /* write-only, so skip validation */
578                            _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
579
580         /* WaDisableGPGPUMidThreadPreemption:icl */
581         WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
582                             GEN9_PREEMPT_GPGPU_LEVEL_MASK,
583                             GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
584
585         /* allow headerless messages for preemptible GPGPU context */
586         WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
587                           GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
588 }
589
590 static void
591 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
592                            struct i915_wa_list *wal,
593                            const char *name)
594 {
595         struct drm_i915_private *i915 = engine->i915;
596
597         if (engine->class != RENDER_CLASS)
598                 return;
599
600         wa_init_start(wal, name);
601
602         if (IS_GEN(i915, 11))
603                 icl_ctx_workarounds_init(engine, wal);
604         else if (IS_CANNONLAKE(i915))
605                 cnl_ctx_workarounds_init(engine, wal);
606         else if (IS_COFFEELAKE(i915))
607                 cfl_ctx_workarounds_init(engine, wal);
608         else if (IS_GEMINILAKE(i915))
609                 glk_ctx_workarounds_init(engine, wal);
610         else if (IS_KABYLAKE(i915))
611                 kbl_ctx_workarounds_init(engine, wal);
612         else if (IS_BROXTON(i915))
613                 bxt_ctx_workarounds_init(engine, wal);
614         else if (IS_SKYLAKE(i915))
615                 skl_ctx_workarounds_init(engine, wal);
616         else if (IS_CHERRYVIEW(i915))
617                 chv_ctx_workarounds_init(engine, wal);
618         else if (IS_BROADWELL(i915))
619                 bdw_ctx_workarounds_init(engine, wal);
620         else if (INTEL_GEN(i915) < 8)
621                 return;
622         else
623                 MISSING_CASE(INTEL_GEN(i915));
624
625         wa_init_finish(wal);
626 }
627
628 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
629 {
630         __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
631 }
632
633 int intel_engine_emit_ctx_wa(struct i915_request *rq)
634 {
635         struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
636         struct i915_wa *wa;
637         unsigned int i;
638         u32 *cs;
639         int ret;
640
641         if (wal->count == 0)
642                 return 0;
643
644         ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
645         if (ret)
646                 return ret;
647
648         cs = intel_ring_begin(rq, (wal->count * 2 + 2));
649         if (IS_ERR(cs))
650                 return PTR_ERR(cs);
651
652         *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
653         for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
654                 *cs++ = i915_mmio_reg_offset(wa->reg);
655                 *cs++ = wa->val;
656         }
657         *cs++ = MI_NOOP;
658
659         intel_ring_advance(rq, cs);
660
661         ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
662         if (ret)
663                 return ret;
664
665         return 0;
666 }
667
668 static void
669 gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
670 {
671         /* WaDisableKillLogic:bxt,skl,kbl */
672         if (!IS_COFFEELAKE(i915))
673                 wa_write_or(wal,
674                             GAM_ECOCHK,
675                             ECOCHK_DIS_TLB);
676
677         if (HAS_LLC(i915)) {
678                 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
679                  *
680                  * Must match Display Engine. See
681                  * WaCompressedResourceDisplayNewHashMode.
682                  */
683                 wa_write_or(wal,
684                             MMCD_MISC_CTRL,
685                             MMCD_PCLA | MMCD_HOTSPOT_EN);
686         }
687
688         /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
689         wa_write_or(wal,
690                     GAM_ECOCHK,
691                     BDW_DISABLE_HDC_INVALIDATION);
692 }
693
694 static void
695 skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
696 {
697         gen9_gt_workarounds_init(i915, wal);
698
699         /* WaDisableGafsUnitClkGating:skl */
700         wa_write_or(wal,
701                     GEN7_UCGCTL4,
702                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
703
704         /* WaInPlaceDecompressionHang:skl */
705         if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
706                 wa_write_or(wal,
707                             GEN9_GAMT_ECO_REG_RW_IA,
708                             GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
709 }
710
711 static void
712 bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
713 {
714         gen9_gt_workarounds_init(i915, wal);
715
716         /* WaInPlaceDecompressionHang:bxt */
717         wa_write_or(wal,
718                     GEN9_GAMT_ECO_REG_RW_IA,
719                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
720 }
721
722 static void
723 kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
724 {
725         gen9_gt_workarounds_init(i915, wal);
726
727         /* WaDisableDynamicCreditSharing:kbl */
728         if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
729                 wa_write_or(wal,
730                             GAMT_CHKN_BIT_REG,
731                             GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
732
733         /* WaDisableGafsUnitClkGating:kbl */
734         wa_write_or(wal,
735                     GEN7_UCGCTL4,
736                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
737
738         /* WaInPlaceDecompressionHang:kbl */
739         wa_write_or(wal,
740                     GEN9_GAMT_ECO_REG_RW_IA,
741                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
742 }
743
744 static void
745 glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
746 {
747         gen9_gt_workarounds_init(i915, wal);
748 }
749
750 static void
751 cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
752 {
753         gen9_gt_workarounds_init(i915, wal);
754
755         /* WaDisableGafsUnitClkGating:cfl */
756         wa_write_or(wal,
757                     GEN7_UCGCTL4,
758                     GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
759
760         /* WaInPlaceDecompressionHang:cfl */
761         wa_write_or(wal,
762                     GEN9_GAMT_ECO_REG_RW_IA,
763                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
764 }
765
766 static void
767 wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
768 {
769         const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
770         u32 mcr_slice_subslice_mask;
771
772         /*
773          * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
774          * L3Banks could be fused off in single slice scenario. If that is
775          * the case, we might need to program MCR select to a valid L3Bank
776          * by default, to make sure we correctly read certain registers
777          * later on (in the range 0xB100 - 0xB3FF).
778          * This might be incompatible with
779          * WaProgramMgsrForCorrectSliceSpecificMmioReads.
780          * Fortunately, this should not happen in production hardware, so
781          * we only assert that this is the case (instead of implementing
782          * something more complex that requires checking the range of every
783          * MMIO read).
784          */
785         if (INTEL_GEN(i915) >= 10 &&
786             is_power_of_2(sseu->slice_mask)) {
787                 /*
788                  * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
789                  * enabled subslice, no need to redirect MCR packet
790                  */
791                 u32 slice = fls(sseu->slice_mask);
792                 u32 fuse3 =
793                         intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
794                 u8 ss_mask = sseu->subslice_mask[slice];
795
796                 u8 enabled_mask = (ss_mask | ss_mask >>
797                                    GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
798                 u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
799
800                 /*
801                  * Production silicon should have matched L3Bank and
802                  * subslice enabled
803                  */
804                 WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
805         }
806
807         if (INTEL_GEN(i915) >= 11)
808                 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
809                                           GEN11_MCR_SUBSLICE_MASK;
810         else
811                 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
812                                           GEN8_MCR_SUBSLICE_MASK;
813         /*
814          * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
815          * Before any MMIO read into slice/subslice specific registers, MCR
816          * packet control register needs to be programmed to point to any
817          * enabled s/ss pair. Otherwise, incorrect values will be returned.
818          * This means each subsequent MMIO read will be forwarded to an
819          * specific s/ss combination, but this is OK since these registers
820          * are consistent across s/ss in almost all cases. In the rare
821          * occasions, such as INSTDONE, where this value is dependent
822          * on s/ss combo, the read should be done with read_subslice_reg.
823          */
824         wa_write_masked_or(wal,
825                            GEN8_MCR_SELECTOR,
826                            mcr_slice_subslice_mask,
827                            intel_calculate_mcr_s_ss_select(i915));
828 }
829
830 static void
831 cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
832 {
833         wa_init_mcr(i915, wal);
834
835         /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
836         if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
837                 wa_write_or(wal,
838                             GAMT_CHKN_BIT_REG,
839                             GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
840
841         /* WaInPlaceDecompressionHang:cnl */
842         wa_write_or(wal,
843                     GEN9_GAMT_ECO_REG_RW_IA,
844                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
845 }
846
847 static void
848 icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
849 {
850         wa_init_mcr(i915, wal);
851
852         /* WaInPlaceDecompressionHang:icl */
853         wa_write_or(wal,
854                     GEN9_GAMT_ECO_REG_RW_IA,
855                     GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
856
857         /* WaModifyGamTlbPartitioning:icl */
858         wa_write_masked_or(wal,
859                            GEN11_GACB_PERF_CTRL,
860                            GEN11_HASH_CTRL_MASK,
861                            GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
862
863         /* Wa_1405766107:icl
864          * Formerly known as WaCL2SFHalfMaxAlloc
865          */
866         wa_write_or(wal,
867                     GEN11_LSN_UNSLCVC,
868                     GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
869                     GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
870
871         /* Wa_220166154:icl
872          * Formerly known as WaDisCtxReload
873          */
874         wa_write_or(wal,
875                     GEN8_GAMW_ECO_DEV_RW_IA,
876                     GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
877
878         /* Wa_1405779004:icl (pre-prod) */
879         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
880                 wa_write_or(wal,
881                             SLICE_UNIT_LEVEL_CLKGATE,
882                             MSCUNIT_CLKGATE_DIS);
883
884         /* Wa_1406680159:icl */
885         wa_write_or(wal,
886                     SUBSLICE_UNIT_LEVEL_CLKGATE,
887                     GWUNIT_CLKGATE_DIS);
888
889         /* Wa_1406838659:icl (pre-prod) */
890         if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
891                 wa_write_or(wal,
892                             INF_UNIT_LEVEL_CLKGATE,
893                             CGPSF_CLKGATE_DIS);
894
895         /* Wa_1406463099:icl
896          * Formerly known as WaGamTlbPendError
897          */
898         wa_write_or(wal,
899                     GAMT_CHKN_BIT_REG,
900                     GAMT_CHKN_DISABLE_L3_COH_PIPE);
901 }
902
903 static void
904 gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
905 {
906         if (IS_GEN(i915, 11))
907                 icl_gt_workarounds_init(i915, wal);
908         else if (IS_CANNONLAKE(i915))
909                 cnl_gt_workarounds_init(i915, wal);
910         else if (IS_COFFEELAKE(i915))
911                 cfl_gt_workarounds_init(i915, wal);
912         else if (IS_GEMINILAKE(i915))
913                 glk_gt_workarounds_init(i915, wal);
914         else if (IS_KABYLAKE(i915))
915                 kbl_gt_workarounds_init(i915, wal);
916         else if (IS_BROXTON(i915))
917                 bxt_gt_workarounds_init(i915, wal);
918         else if (IS_SKYLAKE(i915))
919                 skl_gt_workarounds_init(i915, wal);
920         else if (INTEL_GEN(i915) <= 8)
921                 return;
922         else
923                 MISSING_CASE(INTEL_GEN(i915));
924 }
925
926 void intel_gt_init_workarounds(struct drm_i915_private *i915)
927 {
928         struct i915_wa_list *wal = &i915->gt_wa_list;
929
930         wa_init_start(wal, "GT");
931         gt_init_workarounds(i915, wal);
932         wa_init_finish(wal);
933 }
934
935 static enum forcewake_domains
936 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
937 {
938         enum forcewake_domains fw = 0;
939         struct i915_wa *wa;
940         unsigned int i;
941
942         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
943                 fw |= intel_uncore_forcewake_for_reg(uncore,
944                                                      wa->reg,
945                                                      FW_REG_READ |
946                                                      FW_REG_WRITE);
947
948         return fw;
949 }
950
951 static bool
952 wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
953 {
954         if ((cur ^ wa->val) & wa->read) {
955                 DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
956                           name, from, i915_mmio_reg_offset(wa->reg),
957                           cur, cur & wa->read,
958                           wa->val, wa->mask);
959
960                 return false;
961         }
962
963         return true;
964 }
965
966 static void
967 wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
968 {
969         enum forcewake_domains fw;
970         unsigned long flags;
971         struct i915_wa *wa;
972         unsigned int i;
973
974         if (!wal->count)
975                 return;
976
977         fw = wal_get_fw_for_rmw(uncore, wal);
978
979         spin_lock_irqsave(&uncore->lock, flags);
980         intel_uncore_forcewake_get__locked(uncore, fw);
981
982         for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
983                 intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
984                 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
985                         wa_verify(wa,
986                                   intel_uncore_read_fw(uncore, wa->reg),
987                                   wal->name, "application");
988         }
989
990         intel_uncore_forcewake_put__locked(uncore, fw);
991         spin_unlock_irqrestore(&uncore->lock, flags);
992 }
993
994 void intel_gt_apply_workarounds(struct intel_gt *gt)
995 {
996         wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
997 }
998
999 static bool wa_list_verify(struct intel_uncore *uncore,
1000                            const struct i915_wa_list *wal,
1001                            const char *from)
1002 {
1003         struct i915_wa *wa;
1004         unsigned int i;
1005         bool ok = true;
1006
1007         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1008                 ok &= wa_verify(wa,
1009                                 intel_uncore_read(uncore, wa->reg),
1010                                 wal->name, from);
1011
1012         return ok;
1013 }
1014
1015 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1016 {
1017         return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
1018 }
1019
1020 static void
1021 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1022 {
1023         struct i915_wa wa = {
1024                 .reg = reg
1025         };
1026
1027         if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1028                 return;
1029
1030         wa.reg.reg |= flags;
1031         _wa_add(wal, &wa);
1032 }
1033
1034 static void
1035 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1036 {
1037         whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
1038 }
1039
1040 static void gen9_whitelist_build(struct i915_wa_list *w)
1041 {
1042         /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1043         whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1044
1045         /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1046         whitelist_reg(w, GEN8_CS_CHICKEN1);
1047
1048         /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1049         whitelist_reg(w, GEN8_HDC_CHICKEN1);
1050 }
1051
1052 static void skl_whitelist_build(struct intel_engine_cs *engine)
1053 {
1054         struct i915_wa_list *w = &engine->whitelist;
1055
1056         if (engine->class != RENDER_CLASS)
1057                 return;
1058
1059         gen9_whitelist_build(w);
1060
1061         /* WaDisableLSQCROPERFforOCL:skl */
1062         whitelist_reg(w, GEN8_L3SQCREG4);
1063 }
1064
1065 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1066 {
1067         if (engine->class != RENDER_CLASS)
1068                 return;
1069
1070         gen9_whitelist_build(&engine->whitelist);
1071 }
1072
1073 static void kbl_whitelist_build(struct intel_engine_cs *engine)
1074 {
1075         struct i915_wa_list *w = &engine->whitelist;
1076
1077         if (engine->class != RENDER_CLASS)
1078                 return;
1079
1080         gen9_whitelist_build(w);
1081
1082         /* WaDisableLSQCROPERFforOCL:kbl */
1083         whitelist_reg(w, GEN8_L3SQCREG4);
1084 }
1085
1086 static void glk_whitelist_build(struct intel_engine_cs *engine)
1087 {
1088         struct i915_wa_list *w = &engine->whitelist;
1089
1090         if (engine->class != RENDER_CLASS)
1091                 return;
1092
1093         gen9_whitelist_build(w);
1094
1095         /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1096         whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1097 }
1098
1099 static void cfl_whitelist_build(struct intel_engine_cs *engine)
1100 {
1101         struct i915_wa_list *w = &engine->whitelist;
1102
1103         if (engine->class != RENDER_CLASS)
1104                 return;
1105
1106         gen9_whitelist_build(w);
1107
1108         /*
1109          * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1110          *
1111          * This covers 4 register which are next to one another :
1112          *   - PS_INVOCATION_COUNT
1113          *   - PS_INVOCATION_COUNT_UDW
1114          *   - PS_DEPTH_COUNT
1115          *   - PS_DEPTH_COUNT_UDW
1116          */
1117         whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1118                           RING_FORCE_TO_NONPRIV_RD |
1119                           RING_FORCE_TO_NONPRIV_RANGE_4);
1120 }
1121
1122 static void cnl_whitelist_build(struct intel_engine_cs *engine)
1123 {
1124         struct i915_wa_list *w = &engine->whitelist;
1125
1126         if (engine->class != RENDER_CLASS)
1127                 return;
1128
1129         /* WaEnablePreemptionGranularityControlByUMD:cnl */
1130         whitelist_reg(w, GEN8_CS_CHICKEN1);
1131 }
1132
1133 static void icl_whitelist_build(struct intel_engine_cs *engine)
1134 {
1135         struct i915_wa_list *w = &engine->whitelist;
1136
1137         switch (engine->class) {
1138         case RENDER_CLASS:
1139                 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
1140                 whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
1141
1142                 /* WaAllowUMDToModifySamplerMode:icl */
1143                 whitelist_reg(w, GEN10_SAMPLER_MODE);
1144
1145                 /* WaEnableStateCacheRedirectToCS:icl */
1146                 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1147
1148                 /*
1149                  * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
1150                  *
1151                  * This covers 4 register which are next to one another :
1152                  *   - PS_INVOCATION_COUNT
1153                  *   - PS_INVOCATION_COUNT_UDW
1154                  *   - PS_DEPTH_COUNT
1155                  *   - PS_DEPTH_COUNT_UDW
1156                  */
1157                 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1158                                   RING_FORCE_TO_NONPRIV_RD |
1159                                   RING_FORCE_TO_NONPRIV_RANGE_4);
1160                 break;
1161
1162         case VIDEO_DECODE_CLASS:
1163                 /* hucStatusRegOffset */
1164                 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1165                                   RING_FORCE_TO_NONPRIV_RD);
1166                 /* hucUKernelHdrInfoRegOffset */
1167                 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
1168                                   RING_FORCE_TO_NONPRIV_RD);
1169                 /* hucStatus2RegOffset */
1170                 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
1171                                   RING_FORCE_TO_NONPRIV_RD);
1172                 break;
1173
1174         default:
1175                 break;
1176         }
1177 }
1178
1179 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
1180 {
1181         struct drm_i915_private *i915 = engine->i915;
1182         struct i915_wa_list *w = &engine->whitelist;
1183
1184         wa_init_start(w, "whitelist");
1185
1186         if (IS_GEN(i915, 11))
1187                 icl_whitelist_build(engine);
1188         else if (IS_CANNONLAKE(i915))
1189                 cnl_whitelist_build(engine);
1190         else if (IS_COFFEELAKE(i915))
1191                 cfl_whitelist_build(engine);
1192         else if (IS_GEMINILAKE(i915))
1193                 glk_whitelist_build(engine);
1194         else if (IS_KABYLAKE(i915))
1195                 kbl_whitelist_build(engine);
1196         else if (IS_BROXTON(i915))
1197                 bxt_whitelist_build(engine);
1198         else if (IS_SKYLAKE(i915))
1199                 skl_whitelist_build(engine);
1200         else if (INTEL_GEN(i915) <= 8)
1201                 return;
1202         else
1203                 MISSING_CASE(INTEL_GEN(i915));
1204
1205         wa_init_finish(w);
1206 }
1207
1208 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1209 {
1210         const struct i915_wa_list *wal = &engine->whitelist;
1211         struct intel_uncore *uncore = engine->uncore;
1212         const u32 base = engine->mmio_base;
1213         struct i915_wa *wa;
1214         unsigned int i;
1215
1216         if (!wal->count)
1217                 return;
1218
1219         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1220                 intel_uncore_write(uncore,
1221                                    RING_FORCE_TO_NONPRIV(base, i),
1222                                    i915_mmio_reg_offset(wa->reg));
1223
1224         /* And clear the rest just in case of garbage */
1225         for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1226                 intel_uncore_write(uncore,
1227                                    RING_FORCE_TO_NONPRIV(base, i),
1228                                    i915_mmio_reg_offset(RING_NOPID(base)));
1229 }
1230
1231 static void
1232 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1233 {
1234         struct drm_i915_private *i915 = engine->i915;
1235
1236         if (IS_GEN(i915, 11)) {
1237                 /* This is not an Wa. Enable for better image quality */
1238                 wa_masked_en(wal,
1239                              _3D_CHICKEN3,
1240                              _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
1241
1242                 /* WaPipelineFlushCoherentLines:icl */
1243                 ignore_wa_write_or(wal,
1244                                    GEN8_L3SQCREG4,
1245                                    GEN8_LQSC_FLUSH_COHERENT_LINES,
1246                                    GEN8_LQSC_FLUSH_COHERENT_LINES);
1247
1248                 /*
1249                  * Wa_1405543622:icl
1250                  * Formerly known as WaGAPZPriorityScheme
1251                  */
1252                 wa_write_or(wal,
1253                             GEN8_GARBCNTL,
1254                             GEN11_ARBITRATION_PRIO_ORDER_MASK);
1255
1256                 /*
1257                  * Wa_1604223664:icl
1258                  * Formerly known as WaL3BankAddressHashing
1259                  */
1260                 wa_write_masked_or(wal,
1261                                    GEN8_GARBCNTL,
1262                                    GEN11_HASH_CTRL_EXCL_MASK,
1263                                    GEN11_HASH_CTRL_EXCL_BIT0);
1264                 wa_write_masked_or(wal,
1265                                    GEN11_GLBLINVL,
1266                                    GEN11_BANK_HASH_ADDR_EXCL_MASK,
1267                                    GEN11_BANK_HASH_ADDR_EXCL_BIT0);
1268
1269                 /*
1270                  * Wa_1405733216:icl
1271                  * Formerly known as WaDisableCleanEvicts
1272                  */
1273                 ignore_wa_write_or(wal,
1274                                    GEN8_L3SQCREG4,
1275                                    GEN11_LQSC_CLEAN_EVICT_DISABLE,
1276                                    GEN11_LQSC_CLEAN_EVICT_DISABLE);
1277
1278                 /* WaForwardProgressSoftReset:icl */
1279                 wa_write_or(wal,
1280                             GEN10_SCRATCH_LNCF2,
1281                             PMFLUSHDONE_LNICRSDROP |
1282                             PMFLUSH_GAPL3UNBLOCK |
1283                             PMFLUSHDONE_LNEBLK);
1284
1285                 /* Wa_1406609255:icl (pre-prod) */
1286                 if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
1287                         wa_write_or(wal,
1288                                     GEN7_SARCHKMD,
1289                                     GEN7_DISABLE_DEMAND_PREFETCH);
1290
1291                 /* Wa_1606682166:icl */
1292                 wa_write_or(wal,
1293                             GEN7_SARCHKMD,
1294                             GEN7_DISABLE_SAMPLER_PREFETCH);
1295         }
1296
1297         if (IS_GEN_RANGE(i915, 9, 11)) {
1298                 /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
1299                 wa_masked_en(wal,
1300                              GEN7_FF_SLICE_CS_CHICKEN1,
1301                              GEN9_FFSC_PERCTX_PREEMPT_CTRL);
1302         }
1303
1304         if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
1305                 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
1306                 wa_write_or(wal,
1307                             GEN8_GARBCNTL,
1308                             GEN9_GAPS_TSV_CREDIT_DISABLE);
1309         }
1310
1311         if (IS_BROXTON(i915)) {
1312                 /* WaDisablePooledEuLoadBalancingFix:bxt */
1313                 wa_masked_en(wal,
1314                              FF_SLICE_CS_CHICKEN2,
1315                              GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1316         }
1317
1318         if (IS_GEN(i915, 9)) {
1319                 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
1320                 wa_masked_en(wal,
1321                              GEN9_CSFE_CHICKEN1_RCS,
1322                              GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
1323
1324                 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
1325                 wa_write_or(wal,
1326                             BDW_SCRATCH1,
1327                             GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
1328
1329                 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
1330                 if (IS_GEN9_LP(i915))
1331                         wa_write_masked_or(wal,
1332                                            GEN8_L3SQCREG1,
1333                                            L3_PRIO_CREDITS_MASK,
1334                                            L3_GENERAL_PRIO_CREDITS(62) |
1335                                            L3_HIGH_PRIO_CREDITS(2));
1336
1337                 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
1338                 wa_write_or(wal,
1339                             GEN8_L3SQCREG4,
1340                             GEN8_LQSC_FLUSH_COHERENT_LINES);
1341         }
1342 }
1343
1344 static void
1345 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1346 {
1347         struct drm_i915_private *i915 = engine->i915;
1348
1349         /* WaKBLVECSSemaphoreWaitPoll:kbl */
1350         if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
1351                 wa_write(wal,
1352                          RING_SEMA_WAIT_POLL(engine->mmio_base),
1353                          1);
1354         }
1355 }
1356
1357 static void
1358 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1359 {
1360         if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
1361                 return;
1362
1363         if (engine->class == RENDER_CLASS)
1364                 rcs_engine_wa_init(engine, wal);
1365         else
1366                 xcs_engine_wa_init(engine, wal);
1367 }
1368
1369 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
1370 {
1371         struct i915_wa_list *wal = &engine->wa_list;
1372
1373         if (INTEL_GEN(engine->i915) < 8)
1374                 return;
1375
1376         wa_init_start(wal, engine->name);
1377         engine_init_workarounds(engine, wal);
1378         wa_init_finish(wal);
1379 }
1380
1381 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
1382 {
1383         wa_list_apply(engine->uncore, &engine->wa_list);
1384 }
1385
1386 static struct i915_vma *
1387 create_scratch(struct i915_address_space *vm, int count)
1388 {
1389         struct drm_i915_gem_object *obj;
1390         struct i915_vma *vma;
1391         unsigned int size;
1392         int err;
1393
1394         size = round_up(count * sizeof(u32), PAGE_SIZE);
1395         obj = i915_gem_object_create_internal(vm->i915, size);
1396         if (IS_ERR(obj))
1397                 return ERR_CAST(obj);
1398
1399         i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
1400
1401         vma = i915_vma_instance(obj, vm, NULL);
1402         if (IS_ERR(vma)) {
1403                 err = PTR_ERR(vma);
1404                 goto err_obj;
1405         }
1406
1407         err = i915_vma_pin(vma, 0, 0,
1408                            i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
1409         if (err)
1410                 goto err_obj;
1411
1412         return vma;
1413
1414 err_obj:
1415         i915_gem_object_put(obj);
1416         return ERR_PTR(err);
1417 }
1418
1419 static int
1420 wa_list_srm(struct i915_request *rq,
1421             const struct i915_wa_list *wal,
1422             struct i915_vma *vma)
1423 {
1424         const struct i915_wa *wa;
1425         unsigned int i;
1426         u32 srm, *cs;
1427
1428         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1429         if (INTEL_GEN(rq->i915) >= 8)
1430                 srm++;
1431
1432         cs = intel_ring_begin(rq, 4 * wal->count);
1433         if (IS_ERR(cs))
1434                 return PTR_ERR(cs);
1435
1436         for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1437                 *cs++ = srm;
1438                 *cs++ = i915_mmio_reg_offset(wa->reg);
1439                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
1440                 *cs++ = 0;
1441         }
1442         intel_ring_advance(rq, cs);
1443
1444         return 0;
1445 }
1446
1447 static int engine_wa_list_verify(struct intel_context *ce,
1448                                  const struct i915_wa_list * const wal,
1449                                  const char *from)
1450 {
1451         const struct i915_wa *wa;
1452         struct i915_request *rq;
1453         struct i915_vma *vma;
1454         unsigned int i;
1455         u32 *results;
1456         int err;
1457
1458         if (!wal->count)
1459                 return 0;
1460
1461         vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
1462         if (IS_ERR(vma))
1463                 return PTR_ERR(vma);
1464
1465         rq = intel_context_create_request(ce);
1466         if (IS_ERR(rq)) {
1467                 err = PTR_ERR(rq);
1468                 goto err_vma;
1469         }
1470
1471         err = wa_list_srm(rq, wal, vma);
1472         if (err)
1473                 goto err_vma;
1474
1475         i915_request_add(rq);
1476         if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1477                 err = -ETIME;
1478                 goto err_vma;
1479         }
1480
1481         results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1482         if (IS_ERR(results)) {
1483                 err = PTR_ERR(results);
1484                 goto err_vma;
1485         }
1486
1487         err = 0;
1488         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1489                 if (!wa_verify(wa, results[i], wal->name, from))
1490                         err = -ENXIO;
1491
1492         i915_gem_object_unpin_map(vma->obj);
1493
1494 err_vma:
1495         i915_vma_unpin(vma);
1496         i915_vma_put(vma);
1497         return err;
1498 }
1499
1500 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
1501                                     const char *from)
1502 {
1503         return engine_wa_list_verify(engine->kernel_context,
1504                                      &engine->wa_list,
1505                                      from);
1506 }
1507
1508 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1509 #include "selftest_workarounds.c"
1510 #endif