Merge remote-tracking branches 'regmap/topic/const' and 'regmap/topic/hwspinlock...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_engine_cs.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "i915_drv.h"
26 #include "intel_ringbuffer.h"
27 #include "intel_lrc.h"
28
29 /* Haswell does have the CXT_SIZE register however it does not appear to be
30  * valid. Now, docs explain in dwords what is in the context object. The full
31  * size is 70720 bytes, however, the power context and execlist context will
32  * never be saved (power context is stored elsewhere, and execlists don't work
33  * on HSW) - so the final size, including the extra state required for the
34  * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
35  */
36 #define HSW_CXT_TOTAL_SIZE              (17 * PAGE_SIZE)
37 /* Same as Haswell, but 72064 bytes now. */
38 #define GEN8_CXT_TOTAL_SIZE             (18 * PAGE_SIZE)
39
40 #define GEN8_LR_CONTEXT_RENDER_SIZE     (20 * PAGE_SIZE)
41 #define GEN9_LR_CONTEXT_RENDER_SIZE     (22 * PAGE_SIZE)
42
43 #define GEN8_LR_CONTEXT_OTHER_SIZE      ( 2 * PAGE_SIZE)
44
45 struct engine_class_info {
46         const char *name;
47         int (*init_legacy)(struct intel_engine_cs *engine);
48         int (*init_execlists)(struct intel_engine_cs *engine);
49 };
50
51 static const struct engine_class_info intel_engine_classes[] = {
52         [RENDER_CLASS] = {
53                 .name = "rcs",
54                 .init_execlists = logical_render_ring_init,
55                 .init_legacy = intel_init_render_ring_buffer,
56         },
57         [COPY_ENGINE_CLASS] = {
58                 .name = "bcs",
59                 .init_execlists = logical_xcs_ring_init,
60                 .init_legacy = intel_init_blt_ring_buffer,
61         },
62         [VIDEO_DECODE_CLASS] = {
63                 .name = "vcs",
64                 .init_execlists = logical_xcs_ring_init,
65                 .init_legacy = intel_init_bsd_ring_buffer,
66         },
67         [VIDEO_ENHANCEMENT_CLASS] = {
68                 .name = "vecs",
69                 .init_execlists = logical_xcs_ring_init,
70                 .init_legacy = intel_init_vebox_ring_buffer,
71         },
72 };
73
74 struct engine_info {
75         unsigned int hw_id;
76         unsigned int uabi_id;
77         u8 class;
78         u8 instance;
79         u32 mmio_base;
80         unsigned irq_shift;
81 };
82
83 static const struct engine_info intel_engines[] = {
84         [RCS] = {
85                 .hw_id = RCS_HW,
86                 .uabi_id = I915_EXEC_RENDER,
87                 .class = RENDER_CLASS,
88                 .instance = 0,
89                 .mmio_base = RENDER_RING_BASE,
90                 .irq_shift = GEN8_RCS_IRQ_SHIFT,
91         },
92         [BCS] = {
93                 .hw_id = BCS_HW,
94                 .uabi_id = I915_EXEC_BLT,
95                 .class = COPY_ENGINE_CLASS,
96                 .instance = 0,
97                 .mmio_base = BLT_RING_BASE,
98                 .irq_shift = GEN8_BCS_IRQ_SHIFT,
99         },
100         [VCS] = {
101                 .hw_id = VCS_HW,
102                 .uabi_id = I915_EXEC_BSD,
103                 .class = VIDEO_DECODE_CLASS,
104                 .instance = 0,
105                 .mmio_base = GEN6_BSD_RING_BASE,
106                 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
107         },
108         [VCS2] = {
109                 .hw_id = VCS2_HW,
110                 .uabi_id = I915_EXEC_BSD,
111                 .class = VIDEO_DECODE_CLASS,
112                 .instance = 1,
113                 .mmio_base = GEN8_BSD2_RING_BASE,
114                 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
115         },
116         [VECS] = {
117                 .hw_id = VECS_HW,
118                 .uabi_id = I915_EXEC_VEBOX,
119                 .class = VIDEO_ENHANCEMENT_CLASS,
120                 .instance = 0,
121                 .mmio_base = VEBOX_RING_BASE,
122                 .irq_shift = GEN8_VECS_IRQ_SHIFT,
123         },
124 };
125
126 /**
127  * ___intel_engine_context_size() - return the size of the context for an engine
128  * @dev_priv: i915 device private
129  * @class: engine class
130  *
131  * Each engine class may require a different amount of space for a context
132  * image.
133  *
134  * Return: size (in bytes) of an engine class specific context image
135  *
136  * Note: this size includes the HWSP, which is part of the context image
137  * in LRC mode, but does not include the "shared data page" used with
138  * GuC submission. The caller should account for this if using the GuC.
139  */
140 static u32
141 __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
142 {
143         u32 cxt_size;
144
145         BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
146
147         switch (class) {
148         case RENDER_CLASS:
149                 switch (INTEL_GEN(dev_priv)) {
150                 default:
151                         MISSING_CASE(INTEL_GEN(dev_priv));
152                 case 10:
153                 case 9:
154                         return GEN9_LR_CONTEXT_RENDER_SIZE;
155                 case 8:
156                         return i915.enable_execlists ?
157                                GEN8_LR_CONTEXT_RENDER_SIZE :
158                                GEN8_CXT_TOTAL_SIZE;
159                 case 7:
160                         if (IS_HASWELL(dev_priv))
161                                 return HSW_CXT_TOTAL_SIZE;
162
163                         cxt_size = I915_READ(GEN7_CXT_SIZE);
164                         return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
165                                         PAGE_SIZE);
166                 case 6:
167                         cxt_size = I915_READ(CXT_SIZE);
168                         return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
169                                         PAGE_SIZE);
170                 case 5:
171                 case 4:
172                 case 3:
173                 case 2:
174                 /* For the special day when i810 gets merged. */
175                 case 1:
176                         return 0;
177                 }
178                 break;
179         default:
180                 MISSING_CASE(class);
181         case VIDEO_DECODE_CLASS:
182         case VIDEO_ENHANCEMENT_CLASS:
183         case COPY_ENGINE_CLASS:
184                 if (INTEL_GEN(dev_priv) < 8)
185                         return 0;
186                 return GEN8_LR_CONTEXT_OTHER_SIZE;
187         }
188 }
189
190 static int
191 intel_engine_setup(struct drm_i915_private *dev_priv,
192                    enum intel_engine_id id)
193 {
194         const struct engine_info *info = &intel_engines[id];
195         const struct engine_class_info *class_info;
196         struct intel_engine_cs *engine;
197
198         GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
199         class_info = &intel_engine_classes[info->class];
200
201         GEM_BUG_ON(dev_priv->engine[id]);
202         engine = kzalloc(sizeof(*engine), GFP_KERNEL);
203         if (!engine)
204                 return -ENOMEM;
205
206         engine->id = id;
207         engine->i915 = dev_priv;
208         WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
209                          class_info->name, info->instance) >=
210                 sizeof(engine->name));
211         engine->uabi_id = info->uabi_id;
212         engine->hw_id = engine->guc_id = info->hw_id;
213         engine->mmio_base = info->mmio_base;
214         engine->irq_shift = info->irq_shift;
215         engine->class = info->class;
216         engine->instance = info->instance;
217
218         engine->context_size = __intel_engine_context_size(dev_priv,
219                                                            engine->class);
220         if (WARN_ON(engine->context_size > BIT(20)))
221                 engine->context_size = 0;
222
223         /* Nothing to do here, execute in order of dependencies */
224         engine->schedule = NULL;
225
226         ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
227
228         dev_priv->engine[id] = engine;
229         return 0;
230 }
231
232 /**
233  * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
234  * @dev_priv: i915 device private
235  *
236  * Return: non-zero if the initialization failed.
237  */
238 int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
239 {
240         struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
241         const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
242         struct intel_engine_cs *engine;
243         enum intel_engine_id id;
244         unsigned int mask = 0;
245         unsigned int i;
246         int err;
247
248         WARN_ON(ring_mask == 0);
249         WARN_ON(ring_mask &
250                 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
251
252         for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
253                 if (!HAS_ENGINE(dev_priv, i))
254                         continue;
255
256                 err = intel_engine_setup(dev_priv, i);
257                 if (err)
258                         goto cleanup;
259
260                 mask |= ENGINE_MASK(i);
261         }
262
263         /*
264          * Catch failures to update intel_engines table when the new engines
265          * are added to the driver by a warning and disabling the forgotten
266          * engines.
267          */
268         if (WARN_ON(mask != ring_mask))
269                 device_info->ring_mask = mask;
270
271         /* We always presume we have at least RCS available for later probing */
272         if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
273                 err = -ENODEV;
274                 goto cleanup;
275         }
276
277         device_info->num_rings = hweight32(mask);
278
279         return 0;
280
281 cleanup:
282         for_each_engine(engine, dev_priv, id)
283                 kfree(engine);
284         return err;
285 }
286
287 /**
288  * intel_engines_init() - init the Engine Command Streamers
289  * @dev_priv: i915 device private
290  *
291  * Return: non-zero if the initialization failed.
292  */
293 int intel_engines_init(struct drm_i915_private *dev_priv)
294 {
295         struct intel_engine_cs *engine;
296         enum intel_engine_id id, err_id;
297         int err;
298
299         for_each_engine(engine, dev_priv, id) {
300                 const struct engine_class_info *class_info =
301                         &intel_engine_classes[engine->class];
302                 int (*init)(struct intel_engine_cs *engine);
303
304                 if (i915.enable_execlists)
305                         init = class_info->init_execlists;
306                 else
307                         init = class_info->init_legacy;
308
309                 err = -EINVAL;
310                 err_id = id;
311
312                 if (GEM_WARN_ON(!init))
313                         goto cleanup;
314
315                 err = init(engine);
316                 if (err)
317                         goto cleanup;
318
319                 GEM_BUG_ON(!engine->submit_request);
320         }
321
322         return 0;
323
324 cleanup:
325         for_each_engine(engine, dev_priv, id) {
326                 if (id >= err_id) {
327                         kfree(engine);
328                         dev_priv->engine[id] = NULL;
329                 } else {
330                         dev_priv->gt.cleanup_engine(engine);
331                 }
332         }
333         return err;
334 }
335
336 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
337 {
338         struct drm_i915_private *dev_priv = engine->i915;
339
340         /* Our semaphore implementation is strictly monotonic (i.e. we proceed
341          * so long as the semaphore value in the register/page is greater
342          * than the sync value), so whenever we reset the seqno,
343          * so long as we reset the tracking semaphore value to 0, it will
344          * always be before the next request's seqno. If we don't reset
345          * the semaphore value, then when the seqno moves backwards all
346          * future waits will complete instantly (causing rendering corruption).
347          */
348         if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
349                 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
350                 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
351                 if (HAS_VEBOX(dev_priv))
352                         I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
353         }
354         if (dev_priv->semaphore) {
355                 struct page *page = i915_vma_first_page(dev_priv->semaphore);
356                 void *semaphores;
357
358                 /* Semaphores are in noncoherent memory, flush to be safe */
359                 semaphores = kmap_atomic(page);
360                 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
361                        0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
362                 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
363                                        I915_NUM_ENGINES * gen8_semaphore_seqno_size);
364                 kunmap_atomic(semaphores);
365         }
366
367         intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
368         clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
369
370         /* After manually advancing the seqno, fake the interrupt in case
371          * there are any waiters for that seqno.
372          */
373         intel_engine_wakeup(engine);
374
375         GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
376 }
377
378 static void intel_engine_init_timeline(struct intel_engine_cs *engine)
379 {
380         engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
381 }
382
383 /**
384  * intel_engines_setup_common - setup engine state not requiring hw access
385  * @engine: Engine to setup.
386  *
387  * Initializes @engine@ structure members shared between legacy and execlists
388  * submission modes which do not require hardware access.
389  *
390  * Typically done early in the submission mode specific engine setup stage.
391  */
392 void intel_engine_setup_common(struct intel_engine_cs *engine)
393 {
394         engine->execlist_queue = RB_ROOT;
395         engine->execlist_first = NULL;
396
397         intel_engine_init_timeline(engine);
398         intel_engine_init_hangcheck(engine);
399         i915_gem_batch_pool_init(engine, &engine->batch_pool);
400
401         intel_engine_init_cmd_parser(engine);
402 }
403
404 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
405 {
406         struct drm_i915_gem_object *obj;
407         struct i915_vma *vma;
408         int ret;
409
410         WARN_ON(engine->scratch);
411
412         obj = i915_gem_object_create_stolen(engine->i915, size);
413         if (!obj)
414                 obj = i915_gem_object_create_internal(engine->i915, size);
415         if (IS_ERR(obj)) {
416                 DRM_ERROR("Failed to allocate scratch page\n");
417                 return PTR_ERR(obj);
418         }
419
420         vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
421         if (IS_ERR(vma)) {
422                 ret = PTR_ERR(vma);
423                 goto err_unref;
424         }
425
426         ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
427         if (ret)
428                 goto err_unref;
429
430         engine->scratch = vma;
431         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
432                          engine->name, i915_ggtt_offset(vma));
433         return 0;
434
435 err_unref:
436         i915_gem_object_put(obj);
437         return ret;
438 }
439
440 static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
441 {
442         i915_vma_unpin_and_release(&engine->scratch);
443 }
444
445 /**
446  * intel_engines_init_common - initialize cengine state which might require hw access
447  * @engine: Engine to initialize.
448  *
449  * Initializes @engine@ structure members shared between legacy and execlists
450  * submission modes which do require hardware access.
451  *
452  * Typcally done at later stages of submission mode specific engine setup.
453  *
454  * Returns zero on success or an error code on failure.
455  */
456 int intel_engine_init_common(struct intel_engine_cs *engine)
457 {
458         struct intel_ring *ring;
459         int ret;
460
461         engine->set_default_submission(engine);
462
463         /* We may need to do things with the shrinker which
464          * require us to immediately switch back to the default
465          * context. This can cause a problem as pinning the
466          * default context also requires GTT space which may not
467          * be available. To avoid this we always pin the default
468          * context.
469          */
470         ring = engine->context_pin(engine, engine->i915->kernel_context);
471         if (IS_ERR(ring))
472                 return PTR_ERR(ring);
473
474         ret = intel_engine_init_breadcrumbs(engine);
475         if (ret)
476                 goto err_unpin;
477
478         ret = i915_gem_render_state_init(engine);
479         if (ret)
480                 goto err_unpin;
481
482         return 0;
483
484 err_unpin:
485         engine->context_unpin(engine, engine->i915->kernel_context);
486         return ret;
487 }
488
489 /**
490  * intel_engines_cleanup_common - cleans up the engine state created by
491  *                                the common initiailizers.
492  * @engine: Engine to cleanup.
493  *
494  * This cleans up everything created by the common helpers.
495  */
496 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
497 {
498         intel_engine_cleanup_scratch(engine);
499
500         i915_gem_render_state_fini(engine);
501         intel_engine_fini_breadcrumbs(engine);
502         intel_engine_cleanup_cmd_parser(engine);
503         i915_gem_batch_pool_fini(&engine->batch_pool);
504
505         engine->context_unpin(engine, engine->i915->kernel_context);
506 }
507
508 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
509 {
510         struct drm_i915_private *dev_priv = engine->i915;
511         u64 acthd;
512
513         if (INTEL_GEN(dev_priv) >= 8)
514                 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
515                                          RING_ACTHD_UDW(engine->mmio_base));
516         else if (INTEL_GEN(dev_priv) >= 4)
517                 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
518         else
519                 acthd = I915_READ(ACTHD);
520
521         return acthd;
522 }
523
524 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
525 {
526         struct drm_i915_private *dev_priv = engine->i915;
527         u64 bbaddr;
528
529         if (INTEL_GEN(dev_priv) >= 8)
530                 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
531                                           RING_BBADDR_UDW(engine->mmio_base));
532         else
533                 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
534
535         return bbaddr;
536 }
537
538 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
539 {
540         switch (type) {
541         case I915_CACHE_NONE: return " uncached";
542         case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
543         case I915_CACHE_L3_LLC: return " L3+LLC";
544         case I915_CACHE_WT: return " WT";
545         default: return "";
546         }
547 }
548
549 static inline uint32_t
550 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
551                   int subslice, i915_reg_t reg)
552 {
553         uint32_t mcr;
554         uint32_t ret;
555         enum forcewake_domains fw_domains;
556
557         fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
558                                                     FW_REG_READ);
559         fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
560                                                      GEN8_MCR_SELECTOR,
561                                                      FW_REG_READ | FW_REG_WRITE);
562
563         spin_lock_irq(&dev_priv->uncore.lock);
564         intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
565
566         mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
567         /*
568          * The HW expects the slice and sublice selectors to be reset to 0
569          * after reading out the registers.
570          */
571         WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
572         mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
573         mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
574         I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
575
576         ret = I915_READ_FW(reg);
577
578         mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
579         I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
580
581         intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
582         spin_unlock_irq(&dev_priv->uncore.lock);
583
584         return ret;
585 }
586
587 /* NB: please notice the memset */
588 void intel_engine_get_instdone(struct intel_engine_cs *engine,
589                                struct intel_instdone *instdone)
590 {
591         struct drm_i915_private *dev_priv = engine->i915;
592         u32 mmio_base = engine->mmio_base;
593         int slice;
594         int subslice;
595
596         memset(instdone, 0, sizeof(*instdone));
597
598         switch (INTEL_GEN(dev_priv)) {
599         default:
600                 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
601
602                 if (engine->id != RCS)
603                         break;
604
605                 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
606                 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
607                         instdone->sampler[slice][subslice] =
608                                 read_subslice_reg(dev_priv, slice, subslice,
609                                                   GEN7_SAMPLER_INSTDONE);
610                         instdone->row[slice][subslice] =
611                                 read_subslice_reg(dev_priv, slice, subslice,
612                                                   GEN7_ROW_INSTDONE);
613                 }
614                 break;
615         case 7:
616                 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
617
618                 if (engine->id != RCS)
619                         break;
620
621                 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
622                 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
623                 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
624
625                 break;
626         case 6:
627         case 5:
628         case 4:
629                 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
630
631                 if (engine->id == RCS)
632                         /* HACK: Using the wrong struct member */
633                         instdone->slice_common = I915_READ(GEN4_INSTDONE1);
634                 break;
635         case 3:
636         case 2:
637                 instdone->instdone = I915_READ(GEN2_INSTDONE);
638                 break;
639         }
640 }
641
642 static int wa_add(struct drm_i915_private *dev_priv,
643                   i915_reg_t addr,
644                   const u32 mask, const u32 val)
645 {
646         const u32 idx = dev_priv->workarounds.count;
647
648         if (WARN_ON(idx >= I915_MAX_WA_REGS))
649                 return -ENOSPC;
650
651         dev_priv->workarounds.reg[idx].addr = addr;
652         dev_priv->workarounds.reg[idx].value = val;
653         dev_priv->workarounds.reg[idx].mask = mask;
654
655         dev_priv->workarounds.count++;
656
657         return 0;
658 }
659
660 #define WA_REG(addr, mask, val) do { \
661                 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
662                 if (r) \
663                         return r; \
664         } while (0)
665
666 #define WA_SET_BIT_MASKED(addr, mask) \
667         WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
668
669 #define WA_CLR_BIT_MASKED(addr, mask) \
670         WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
671
672 #define WA_SET_FIELD_MASKED(addr, mask, value) \
673         WA_REG(addr, mask, _MASKED_FIELD(mask, value))
674
675 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
676 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
677
678 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
679
680 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
681                                  i915_reg_t reg)
682 {
683         struct drm_i915_private *dev_priv = engine->i915;
684         struct i915_workarounds *wa = &dev_priv->workarounds;
685         const uint32_t index = wa->hw_whitelist_count[engine->id];
686
687         if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
688                 return -EINVAL;
689
690         WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
691                  i915_mmio_reg_offset(reg));
692         wa->hw_whitelist_count[engine->id]++;
693
694         return 0;
695 }
696
697 static int gen8_init_workarounds(struct intel_engine_cs *engine)
698 {
699         struct drm_i915_private *dev_priv = engine->i915;
700
701         WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
702
703         /* WaDisableAsyncFlipPerfMode:bdw,chv */
704         WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
705
706         /* WaDisablePartialInstShootdown:bdw,chv */
707         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
708                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
709
710         /* Use Force Non-Coherent whenever executing a 3D context. This is a
711          * workaround for for a possible hang in the unlikely event a TLB
712          * invalidation occurs during a PSD flush.
713          */
714         /* WaForceEnableNonCoherent:bdw,chv */
715         /* WaHdcDisableFetchWhenMasked:bdw,chv */
716         WA_SET_BIT_MASKED(HDC_CHICKEN0,
717                           HDC_DONOT_FETCH_MEM_WHEN_MASKED |
718                           HDC_FORCE_NON_COHERENT);
719
720         /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
721          * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
722          *  polygons in the same 8x4 pixel/sample area to be processed without
723          *  stalling waiting for the earlier ones to write to Hierarchical Z
724          *  buffer."
725          *
726          * This optimization is off by default for BDW and CHV; turn it on.
727          */
728         WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
729
730         /* Wa4x4STCOptimizationDisable:bdw,chv */
731         WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
732
733         /*
734          * BSpec recommends 8x4 when MSAA is used,
735          * however in practice 16x4 seems fastest.
736          *
737          * Note that PS/WM thread counts depend on the WIZ hashing
738          * disable bit, which we don't touch here, but it's good
739          * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
740          */
741         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
742                             GEN6_WIZ_HASHING_MASK,
743                             GEN6_WIZ_HASHING_16x4);
744
745         return 0;
746 }
747
748 static int bdw_init_workarounds(struct intel_engine_cs *engine)
749 {
750         struct drm_i915_private *dev_priv = engine->i915;
751         int ret;
752
753         ret = gen8_init_workarounds(engine);
754         if (ret)
755                 return ret;
756
757         /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
758         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
759
760         /* WaDisableDopClockGating:bdw
761          *
762          * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
763          * to disable EUTC clock gating.
764          */
765         WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
766                           DOP_CLOCK_GATING_DISABLE);
767
768         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
769                           GEN8_SAMPLER_POWER_BYPASS_DIS);
770
771         WA_SET_BIT_MASKED(HDC_CHICKEN0,
772                           /* WaForceContextSaveRestoreNonCoherent:bdw */
773                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
774                           /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
775                           (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
776
777         return 0;
778 }
779
780 static int chv_init_workarounds(struct intel_engine_cs *engine)
781 {
782         struct drm_i915_private *dev_priv = engine->i915;
783         int ret;
784
785         ret = gen8_init_workarounds(engine);
786         if (ret)
787                 return ret;
788
789         /* WaDisableThreadStallDopClockGating:chv */
790         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
791
792         /* Improve HiZ throughput on CHV. */
793         WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
794
795         return 0;
796 }
797
798 static int gen9_init_workarounds(struct intel_engine_cs *engine)
799 {
800         struct drm_i915_private *dev_priv = engine->i915;
801         int ret;
802
803         /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
804         I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
805
806         /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
807         I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
808                    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
809
810         /* WaDisableKillLogic:bxt,skl,kbl */
811         if (!IS_COFFEELAKE(dev_priv))
812                 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
813                            ECOCHK_DIS_TLB);
814
815         /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
816         /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
817         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
818                           FLOW_CONTROL_ENABLE |
819                           PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
820
821         /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
822         if (!IS_COFFEELAKE(dev_priv))
823                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
824                                   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
825
826         /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
827         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
828                 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
829                                   GEN9_DG_MIRROR_FIX_ENABLE);
830
831         /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
832         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
833                 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
834                                   GEN9_RHWO_OPTIMIZATION_DISABLE);
835                 /*
836                  * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
837                  * but we do that in per ctx batchbuffer as there is an issue
838                  * with this register not getting restored on ctx restore
839                  */
840         }
841
842         /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
843         /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
844         WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
845                           GEN9_ENABLE_YV12_BUGFIX |
846                           GEN9_ENABLE_GPGPU_PREEMPTION);
847
848         /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
849         /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
850         WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
851                                          GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
852
853         /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
854         WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
855                           GEN9_CCS_TLB_PREFETCH_ENABLE);
856
857         /* WaDisableMaskBasedCammingInRCC:bxt */
858         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
859                 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
860                                   PIXEL_MASK_CAMMING_DISABLE);
861
862         /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
863         WA_SET_BIT_MASKED(HDC_CHICKEN0,
864                           HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
865                           HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
866
867         /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
868          * both tied to WaForceContextSaveRestoreNonCoherent
869          * in some hsds for skl. We keep the tie for all gen9. The
870          * documentation is a bit hazy and so we want to get common behaviour,
871          * even though there is no clear evidence we would need both on kbl/bxt.
872          * This area has been source of system hangs so we play it safe
873          * and mimic the skl regardless of what bspec says.
874          *
875          * Use Force Non-Coherent whenever executing a 3D context. This
876          * is a workaround for a possible hang in the unlikely event
877          * a TLB invalidation occurs during a PSD flush.
878          */
879
880         /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
881         WA_SET_BIT_MASKED(HDC_CHICKEN0,
882                           HDC_FORCE_NON_COHERENT);
883
884         /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
885         I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
886                    BDW_DISABLE_HDC_INVALIDATION);
887
888         /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
889         if (IS_SKYLAKE(dev_priv) ||
890             IS_KABYLAKE(dev_priv) ||
891             IS_COFFEELAKE(dev_priv) ||
892             IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
893                 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
894                                   GEN8_SAMPLER_POWER_BYPASS_DIS);
895
896         /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
897         WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
898
899         /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
900         I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
901                                     GEN8_LQSC_FLUSH_COHERENT_LINES));
902
903         /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
904         ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
905         if (ret)
906                 return ret;
907
908         /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
909         ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
910         if (ret)
911                 return ret;
912
913         /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
914         ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
915         if (ret)
916                 return ret;
917
918         return 0;
919 }
920
921 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
922 {
923         struct drm_i915_private *dev_priv = engine->i915;
924         u8 vals[3] = { 0, 0, 0 };
925         unsigned int i;
926
927         for (i = 0; i < 3; i++) {
928                 u8 ss;
929
930                 /*
931                  * Only consider slices where one, and only one, subslice has 7
932                  * EUs
933                  */
934                 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
935                         continue;
936
937                 /*
938                  * subslice_7eu[i] != 0 (because of the check above) and
939                  * ss_max == 4 (maximum number of subslices possible per slice)
940                  *
941                  * ->    0 <= ss <= 3;
942                  */
943                 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
944                 vals[i] = 3 - ss;
945         }
946
947         if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
948                 return 0;
949
950         /* Tune IZ hashing. See intel_device_info_runtime_init() */
951         WA_SET_FIELD_MASKED(GEN7_GT_MODE,
952                             GEN9_IZ_HASHING_MASK(2) |
953                             GEN9_IZ_HASHING_MASK(1) |
954                             GEN9_IZ_HASHING_MASK(0),
955                             GEN9_IZ_HASHING(2, vals[2]) |
956                             GEN9_IZ_HASHING(1, vals[1]) |
957                             GEN9_IZ_HASHING(0, vals[0]));
958
959         return 0;
960 }
961
962 static int skl_init_workarounds(struct intel_engine_cs *engine)
963 {
964         struct drm_i915_private *dev_priv = engine->i915;
965         int ret;
966
967         ret = gen9_init_workarounds(engine);
968         if (ret)
969                 return ret;
970
971         /*
972          * Actual WA is to disable percontext preemption granularity control
973          * until D0 which is the default case so this is equivalent to
974          * !WaDisablePerCtxtPreemptionGranularityControl:skl
975          */
976         I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
977                    _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
978
979         /* WaEnableGapsTsvCreditFix:skl */
980         I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
981                                    GEN9_GAPS_TSV_CREDIT_DISABLE));
982
983         /* WaDisableGafsUnitClkGating:skl */
984         WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
985
986         /* WaInPlaceDecompressionHang:skl */
987         if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
988                 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
989                            GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
990
991         /* WaDisableLSQCROPERFforOCL:skl */
992         ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
993         if (ret)
994                 return ret;
995
996         return skl_tune_iz_hashing(engine);
997 }
998
999 static int bxt_init_workarounds(struct intel_engine_cs *engine)
1000 {
1001         struct drm_i915_private *dev_priv = engine->i915;
1002         int ret;
1003
1004         ret = gen9_init_workarounds(engine);
1005         if (ret)
1006                 return ret;
1007
1008         /* WaStoreMultiplePTEenable:bxt */
1009         /* This is a requirement according to Hardware specification */
1010         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1011                 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1012
1013         /* WaSetClckGatingDisableMedia:bxt */
1014         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1015                 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1016                                             ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1017         }
1018
1019         /* WaDisableThreadStallDopClockGating:bxt */
1020         WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1021                           STALL_DOP_GATING_DISABLE);
1022
1023         /* WaDisablePooledEuLoadBalancingFix:bxt */
1024         if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1025                 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1026                                   GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1027         }
1028
1029         /* WaDisableSbeCacheDispatchPortSharing:bxt */
1030         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1031                 WA_SET_BIT_MASKED(
1032                         GEN7_HALF_SLICE_CHICKEN1,
1033                         GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1034         }
1035
1036         /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1037         /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1038         /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1039         /* WaDisableLSQCROPERFforOCL:bxt */
1040         if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1041                 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1042                 if (ret)
1043                         return ret;
1044
1045                 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1046                 if (ret)
1047                         return ret;
1048         }
1049
1050         /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1051         if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1052                 u32 val = I915_READ(GEN8_L3SQCREG1);
1053                 val &= ~L3_PRIO_CREDITS_MASK;
1054                 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1055                 I915_WRITE(GEN8_L3SQCREG1, val);
1056         }
1057
1058         /* WaToEnableHwFixForPushConstHWBug:bxt */
1059         if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1060                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1061                                   GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1062
1063         /* WaInPlaceDecompressionHang:bxt */
1064         if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1065                 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1066                            GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1067
1068         return 0;
1069 }
1070
1071 static int kbl_init_workarounds(struct intel_engine_cs *engine)
1072 {
1073         struct drm_i915_private *dev_priv = engine->i915;
1074         int ret;
1075
1076         ret = gen9_init_workarounds(engine);
1077         if (ret)
1078                 return ret;
1079
1080         /* WaEnableGapsTsvCreditFix:kbl */
1081         I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1082                                    GEN9_GAPS_TSV_CREDIT_DISABLE));
1083
1084         /* WaDisableDynamicCreditSharing:kbl */
1085         if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1086                 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1087                            GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1088
1089         /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1090         if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1091                 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1092                                   HDC_FENCE_DEST_SLM_DISABLE);
1093
1094         /* WaToEnableHwFixForPushConstHWBug:kbl */
1095         if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1096                 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1097                                   GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1098
1099         /* WaDisableGafsUnitClkGating:kbl */
1100         WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1101
1102         /* WaDisableSbeCacheDispatchPortSharing:kbl */
1103         WA_SET_BIT_MASKED(
1104                 GEN7_HALF_SLICE_CHICKEN1,
1105                 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1106
1107         /* WaInPlaceDecompressionHang:kbl */
1108         WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1109                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1110
1111         /* WaDisableLSQCROPERFforOCL:kbl */
1112         ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1113         if (ret)
1114                 return ret;
1115
1116         return 0;
1117 }
1118
1119 static int glk_init_workarounds(struct intel_engine_cs *engine)
1120 {
1121         struct drm_i915_private *dev_priv = engine->i915;
1122         int ret;
1123
1124         ret = gen9_init_workarounds(engine);
1125         if (ret)
1126                 return ret;
1127
1128         /* WaToEnableHwFixForPushConstHWBug:glk */
1129         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1130                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1131
1132         return 0;
1133 }
1134
1135 static int cfl_init_workarounds(struct intel_engine_cs *engine)
1136 {
1137         struct drm_i915_private *dev_priv = engine->i915;
1138         int ret;
1139
1140         ret = gen9_init_workarounds(engine);
1141         if (ret)
1142                 return ret;
1143
1144         /* WaEnableGapsTsvCreditFix:cfl */
1145         I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1146                                    GEN9_GAPS_TSV_CREDIT_DISABLE));
1147
1148         /* WaToEnableHwFixForPushConstHWBug:cfl */
1149         WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1150                           GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1151
1152         /* WaDisableGafsUnitClkGating:cfl */
1153         WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1154
1155         /* WaDisableSbeCacheDispatchPortSharing:cfl */
1156         WA_SET_BIT_MASKED(
1157                 GEN7_HALF_SLICE_CHICKEN1,
1158                 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1159
1160         /* WaInPlaceDecompressionHang:cfl */
1161         WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1162                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1163
1164         return 0;
1165 }
1166
1167 int init_workarounds_ring(struct intel_engine_cs *engine)
1168 {
1169         struct drm_i915_private *dev_priv = engine->i915;
1170         int err;
1171
1172         WARN_ON(engine->id != RCS);
1173
1174         dev_priv->workarounds.count = 0;
1175         dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
1176
1177         if (IS_BROADWELL(dev_priv))
1178                 err = bdw_init_workarounds(engine);
1179         else if (IS_CHERRYVIEW(dev_priv))
1180                 err = chv_init_workarounds(engine);
1181         else if (IS_SKYLAKE(dev_priv))
1182                 err =  skl_init_workarounds(engine);
1183         else if (IS_BROXTON(dev_priv))
1184                 err = bxt_init_workarounds(engine);
1185         else if (IS_KABYLAKE(dev_priv))
1186                 err = kbl_init_workarounds(engine);
1187         else if (IS_GEMINILAKE(dev_priv))
1188                 err =  glk_init_workarounds(engine);
1189         else if (IS_COFFEELAKE(dev_priv))
1190                 err = cfl_init_workarounds(engine);
1191         else
1192                 err = 0;
1193         if (err)
1194                 return err;
1195
1196         DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1197                          engine->name, dev_priv->workarounds.count);
1198         return 0;
1199 }
1200
1201 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1202 {
1203         struct i915_workarounds *w = &req->i915->workarounds;
1204         u32 *cs;
1205         int ret, i;
1206
1207         if (w->count == 0)
1208                 return 0;
1209
1210         ret = req->engine->emit_flush(req, EMIT_BARRIER);
1211         if (ret)
1212                 return ret;
1213
1214         cs = intel_ring_begin(req, (w->count * 2 + 2));
1215         if (IS_ERR(cs))
1216                 return PTR_ERR(cs);
1217
1218         *cs++ = MI_LOAD_REGISTER_IMM(w->count);
1219         for (i = 0; i < w->count; i++) {
1220                 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
1221                 *cs++ = w->reg[i].value;
1222         }
1223         *cs++ = MI_NOOP;
1224
1225         intel_ring_advance(req, cs);
1226
1227         ret = req->engine->emit_flush(req, EMIT_BARRIER);
1228         if (ret)
1229                 return ret;
1230
1231         return 0;
1232 }
1233
1234 static bool ring_is_idle(struct intel_engine_cs *engine)
1235 {
1236         struct drm_i915_private *dev_priv = engine->i915;
1237         bool idle = true;
1238
1239         intel_runtime_pm_get(dev_priv);
1240
1241         /* First check that no commands are left in the ring */
1242         if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1243             (I915_READ_TAIL(engine) & TAIL_ADDR))
1244                 idle = false;
1245
1246         /* No bit for gen2, so assume the CS parser is idle */
1247         if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1248                 idle = false;
1249
1250         intel_runtime_pm_put(dev_priv);
1251
1252         return idle;
1253 }
1254
1255 /**
1256  * intel_engine_is_idle() - Report if the engine has finished process all work
1257  * @engine: the intel_engine_cs
1258  *
1259  * Return true if there are no requests pending, nothing left to be submitted
1260  * to hardware, and that the engine is idle.
1261  */
1262 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1263 {
1264         struct drm_i915_private *dev_priv = engine->i915;
1265
1266         /* More white lies, if wedged, hw state is inconsistent */
1267         if (i915_terminally_wedged(&dev_priv->gpu_error))
1268                 return true;
1269
1270         /* Any inflight/incomplete requests? */
1271         if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1272                                intel_engine_last_submit(engine)))
1273                 return false;
1274
1275         if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
1276                 return true;
1277
1278         /* Interrupt/tasklet pending? */
1279         if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
1280                 return false;
1281
1282         /* Both ports drained, no more ELSP submission? */
1283         if (port_request(&engine->execlist_port[0]))
1284                 return false;
1285
1286         /* ELSP is empty, but there are ready requests? */
1287         if (READ_ONCE(engine->execlist_first))
1288                 return false;
1289
1290         /* Ring stopped? */
1291         if (!ring_is_idle(engine))
1292                 return false;
1293
1294         return true;
1295 }
1296
1297 bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1298 {
1299         struct intel_engine_cs *engine;
1300         enum intel_engine_id id;
1301
1302         if (READ_ONCE(dev_priv->gt.active_requests))
1303                 return false;
1304
1305         /* If the driver is wedged, HW state may be very inconsistent and
1306          * report that it is still busy, even though we have stopped using it.
1307          */
1308         if (i915_terminally_wedged(&dev_priv->gpu_error))
1309                 return true;
1310
1311         for_each_engine(engine, dev_priv, id) {
1312                 if (!intel_engine_is_idle(engine))
1313                         return false;
1314         }
1315
1316         return true;
1317 }
1318
1319 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1320 {
1321         struct intel_engine_cs *engine;
1322         enum intel_engine_id id;
1323
1324         for_each_engine(engine, i915, id)
1325                 engine->set_default_submission(engine);
1326 }
1327
1328 void intel_engines_mark_idle(struct drm_i915_private *i915)
1329 {
1330         struct intel_engine_cs *engine;
1331         enum intel_engine_id id;
1332
1333         for_each_engine(engine, i915, id) {
1334                 intel_engine_disarm_breadcrumbs(engine);
1335                 i915_gem_batch_pool_fini(&engine->batch_pool);
1336                 tasklet_kill(&engine->irq_tasklet);
1337                 engine->no_priolist = false;
1338         }
1339 }
1340
1341 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1342 #include "selftests/mock_engine.c"
1343 #endif