2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
28 #include "i915_vgpu.h"
29 #include "intel_ringbuffer.h"
30 #include "intel_lrc.h"
32 /* Haswell does have the CXT_SIZE register however it does not appear to be
33 * valid. Now, docs explain in dwords what is in the context object. The full
34 * size is 70720 bytes, however, the power context and execlist context will
35 * never be saved (power context is stored elsewhere, and execlists don't work
36 * on HSW) - so the final size, including the extra state required for the
37 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
39 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
40 /* Same as Haswell, but 72064 bytes now. */
41 #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
43 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
44 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
45 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
47 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
49 struct engine_class_info {
51 int (*init_legacy)(struct intel_engine_cs *engine);
52 int (*init_execlists)(struct intel_engine_cs *engine);
55 static const struct engine_class_info intel_engine_classes[] = {
58 .init_execlists = logical_render_ring_init,
59 .init_legacy = intel_init_render_ring_buffer,
61 [COPY_ENGINE_CLASS] = {
63 .init_execlists = logical_xcs_ring_init,
64 .init_legacy = intel_init_blt_ring_buffer,
66 [VIDEO_DECODE_CLASS] = {
68 .init_execlists = logical_xcs_ring_init,
69 .init_legacy = intel_init_bsd_ring_buffer,
71 [VIDEO_ENHANCEMENT_CLASS] = {
73 .init_execlists = logical_xcs_ring_init,
74 .init_legacy = intel_init_vebox_ring_buffer,
87 static const struct engine_info intel_engines[] = {
90 .uabi_id = I915_EXEC_RENDER,
91 .class = RENDER_CLASS,
93 .mmio_base = RENDER_RING_BASE,
94 .irq_shift = GEN8_RCS_IRQ_SHIFT,
98 .uabi_id = I915_EXEC_BLT,
99 .class = COPY_ENGINE_CLASS,
101 .mmio_base = BLT_RING_BASE,
102 .irq_shift = GEN8_BCS_IRQ_SHIFT,
106 .uabi_id = I915_EXEC_BSD,
107 .class = VIDEO_DECODE_CLASS,
109 .mmio_base = GEN6_BSD_RING_BASE,
110 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
114 .uabi_id = I915_EXEC_BSD,
115 .class = VIDEO_DECODE_CLASS,
117 .mmio_base = GEN8_BSD2_RING_BASE,
118 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
122 .uabi_id = I915_EXEC_VEBOX,
123 .class = VIDEO_ENHANCEMENT_CLASS,
125 .mmio_base = VEBOX_RING_BASE,
126 .irq_shift = GEN8_VECS_IRQ_SHIFT,
131 * ___intel_engine_context_size() - return the size of the context for an engine
132 * @dev_priv: i915 device private
133 * @class: engine class
135 * Each engine class may require a different amount of space for a context
138 * Return: size (in bytes) of an engine class specific context image
140 * Note: this size includes the HWSP, which is part of the context image
141 * in LRC mode, but does not include the "shared data page" used with
142 * GuC submission. The caller should account for this if using the GuC.
145 __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
149 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
153 switch (INTEL_GEN(dev_priv)) {
155 MISSING_CASE(INTEL_GEN(dev_priv));
157 return GEN10_LR_CONTEXT_RENDER_SIZE;
159 return GEN9_LR_CONTEXT_RENDER_SIZE;
161 return i915_modparams.enable_execlists ?
162 GEN8_LR_CONTEXT_RENDER_SIZE :
165 if (IS_HASWELL(dev_priv))
166 return HSW_CXT_TOTAL_SIZE;
168 cxt_size = I915_READ(GEN7_CXT_SIZE);
169 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
172 cxt_size = I915_READ(CXT_SIZE);
173 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
179 /* For the special day when i810 gets merged. */
186 case VIDEO_DECODE_CLASS:
187 case VIDEO_ENHANCEMENT_CLASS:
188 case COPY_ENGINE_CLASS:
189 if (INTEL_GEN(dev_priv) < 8)
191 return GEN8_LR_CONTEXT_OTHER_SIZE;
196 intel_engine_setup(struct drm_i915_private *dev_priv,
197 enum intel_engine_id id)
199 const struct engine_info *info = &intel_engines[id];
200 const struct engine_class_info *class_info;
201 struct intel_engine_cs *engine;
203 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
204 class_info = &intel_engine_classes[info->class];
206 GEM_BUG_ON(dev_priv->engine[id]);
207 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
212 engine->i915 = dev_priv;
213 WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
214 class_info->name, info->instance) >=
215 sizeof(engine->name));
216 engine->uabi_id = info->uabi_id;
217 engine->hw_id = engine->guc_id = info->hw_id;
218 engine->mmio_base = info->mmio_base;
219 engine->irq_shift = info->irq_shift;
220 engine->class = info->class;
221 engine->instance = info->instance;
223 engine->context_size = __intel_engine_context_size(dev_priv,
225 if (WARN_ON(engine->context_size > BIT(20)))
226 engine->context_size = 0;
228 /* Nothing to do here, execute in order of dependencies */
229 engine->schedule = NULL;
231 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
233 dev_priv->engine[id] = engine;
238 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
239 * @dev_priv: i915 device private
241 * Return: non-zero if the initialization failed.
243 int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
245 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
246 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
247 struct intel_engine_cs *engine;
248 enum intel_engine_id id;
249 unsigned int mask = 0;
253 WARN_ON(ring_mask == 0);
255 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
257 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
258 if (!HAS_ENGINE(dev_priv, i))
261 err = intel_engine_setup(dev_priv, i);
265 mask |= ENGINE_MASK(i);
269 * Catch failures to update intel_engines table when the new engines
270 * are added to the driver by a warning and disabling the forgotten
273 if (WARN_ON(mask != ring_mask))
274 device_info->ring_mask = mask;
276 /* We always presume we have at least RCS available for later probing */
277 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
282 device_info->num_rings = hweight32(mask);
287 for_each_engine(engine, dev_priv, id)
293 * intel_engines_init() - init the Engine Command Streamers
294 * @dev_priv: i915 device private
296 * Return: non-zero if the initialization failed.
298 int intel_engines_init(struct drm_i915_private *dev_priv)
300 struct intel_engine_cs *engine;
301 enum intel_engine_id id, err_id;
304 for_each_engine(engine, dev_priv, id) {
305 const struct engine_class_info *class_info =
306 &intel_engine_classes[engine->class];
307 int (*init)(struct intel_engine_cs *engine);
309 if (i915_modparams.enable_execlists)
310 init = class_info->init_execlists;
312 init = class_info->init_legacy;
317 if (GEM_WARN_ON(!init))
324 GEM_BUG_ON(!engine->submit_request);
330 for_each_engine(engine, dev_priv, id) {
333 dev_priv->engine[id] = NULL;
335 dev_priv->gt.cleanup_engine(engine);
341 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
343 struct drm_i915_private *dev_priv = engine->i915;
345 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
346 * so long as the semaphore value in the register/page is greater
347 * than the sync value), so whenever we reset the seqno,
348 * so long as we reset the tracking semaphore value to 0, it will
349 * always be before the next request's seqno. If we don't reset
350 * the semaphore value, then when the seqno moves backwards all
351 * future waits will complete instantly (causing rendering corruption).
353 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
354 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
355 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
356 if (HAS_VEBOX(dev_priv))
357 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
359 if (dev_priv->semaphore) {
360 struct page *page = i915_vma_first_page(dev_priv->semaphore);
363 /* Semaphores are in noncoherent memory, flush to be safe */
364 semaphores = kmap_atomic(page);
365 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
366 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
367 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
368 I915_NUM_ENGINES * gen8_semaphore_seqno_size);
369 kunmap_atomic(semaphores);
372 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
373 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
375 /* After manually advancing the seqno, fake the interrupt in case
376 * there are any waiters for that seqno.
378 intel_engine_wakeup(engine);
380 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
383 static void intel_engine_init_timeline(struct intel_engine_cs *engine)
385 engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
388 static bool csb_force_mmio(struct drm_i915_private *i915)
391 * IOMMU adds unpredictable latency causing the CSB write (from the
392 * GPU into the HWSP) to only be visible some time after the interrupt
393 * (missed breadcrumb syndrome).
395 if (intel_vtd_active())
398 /* Older GVT emulation depends upon intercepting CSB mmio */
399 if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
405 static void intel_engine_init_execlist(struct intel_engine_cs *engine)
407 struct intel_engine_execlists * const execlists = &engine->execlists;
409 execlists->csb_use_mmio = csb_force_mmio(engine->i915);
411 execlists->port_mask = 1;
412 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
413 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
415 execlists->queue = RB_ROOT;
416 execlists->first = NULL;
420 * intel_engines_setup_common - setup engine state not requiring hw access
421 * @engine: Engine to setup.
423 * Initializes @engine@ structure members shared between legacy and execlists
424 * submission modes which do not require hardware access.
426 * Typically done early in the submission mode specific engine setup stage.
428 void intel_engine_setup_common(struct intel_engine_cs *engine)
430 intel_engine_init_execlist(engine);
432 intel_engine_init_timeline(engine);
433 intel_engine_init_hangcheck(engine);
434 i915_gem_batch_pool_init(engine, &engine->batch_pool);
436 intel_engine_init_cmd_parser(engine);
439 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
441 struct drm_i915_gem_object *obj;
442 struct i915_vma *vma;
445 WARN_ON(engine->scratch);
447 obj = i915_gem_object_create_stolen(engine->i915, size);
449 obj = i915_gem_object_create_internal(engine->i915, size);
451 DRM_ERROR("Failed to allocate scratch page\n");
455 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
461 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
465 engine->scratch = vma;
466 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
467 engine->name, i915_ggtt_offset(vma));
471 i915_gem_object_put(obj);
475 static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
477 i915_vma_unpin_and_release(&engine->scratch);
480 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
482 struct drm_i915_private *dev_priv = engine->i915;
484 if (!dev_priv->status_page_dmah)
487 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
488 engine->status_page.page_addr = NULL;
491 static void cleanup_status_page(struct intel_engine_cs *engine)
493 struct i915_vma *vma;
494 struct drm_i915_gem_object *obj;
496 vma = fetch_and_zero(&engine->status_page.vma);
505 i915_gem_object_unpin_map(obj);
506 __i915_gem_object_release_unless_active(obj);
509 static int init_status_page(struct intel_engine_cs *engine)
511 struct drm_i915_gem_object *obj;
512 struct i915_vma *vma;
517 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
519 DRM_ERROR("Failed to allocate status page\n");
523 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
527 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
534 if (!HAS_LLC(engine->i915))
535 /* On g33, we cannot place HWS above 256MiB, so
536 * restrict its pinning to the low mappable arena.
537 * Though this restriction is not documented for
538 * gen4, gen5, or byt, they also behave similarly
539 * and hang if the HWS is placed at the top of the
540 * GTT. To generalise, it appears that all !llc
541 * platforms have issues with us placing the HWS
542 * above the mappable region (even though we never
545 flags |= PIN_MAPPABLE;
548 ret = i915_vma_pin(vma, 0, 4096, flags);
552 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
554 ret = PTR_ERR(vaddr);
558 engine->status_page.vma = vma;
559 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
560 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
562 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
563 engine->name, i915_ggtt_offset(vma));
569 i915_gem_object_put(obj);
573 static int init_phys_status_page(struct intel_engine_cs *engine)
575 struct drm_i915_private *dev_priv = engine->i915;
577 GEM_BUG_ON(engine->id != RCS);
579 dev_priv->status_page_dmah =
580 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
581 if (!dev_priv->status_page_dmah)
584 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
585 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
591 * intel_engines_init_common - initialize cengine state which might require hw access
592 * @engine: Engine to initialize.
594 * Initializes @engine@ structure members shared between legacy and execlists
595 * submission modes which do require hardware access.
597 * Typcally done at later stages of submission mode specific engine setup.
599 * Returns zero on success or an error code on failure.
601 int intel_engine_init_common(struct intel_engine_cs *engine)
603 struct intel_ring *ring;
606 engine->set_default_submission(engine);
608 /* We may need to do things with the shrinker which
609 * require us to immediately switch back to the default
610 * context. This can cause a problem as pinning the
611 * default context also requires GTT space which may not
612 * be available. To avoid this we always pin the default
615 ring = engine->context_pin(engine, engine->i915->kernel_context);
617 return PTR_ERR(ring);
620 * Similarly the preempt context must always be available so that
621 * we can interrupt the engine at any time.
623 if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
624 ring = engine->context_pin(engine,
625 engine->i915->preempt_context);
628 goto err_unpin_kernel;
632 ret = intel_engine_init_breadcrumbs(engine);
634 goto err_unpin_preempt;
636 ret = i915_gem_render_state_init(engine);
638 goto err_breadcrumbs;
640 if (HWS_NEEDS_PHYSICAL(engine->i915))
641 ret = init_phys_status_page(engine);
643 ret = init_status_page(engine);
650 i915_gem_render_state_fini(engine);
652 intel_engine_fini_breadcrumbs(engine);
654 if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
655 engine->context_unpin(engine, engine->i915->preempt_context);
657 engine->context_unpin(engine, engine->i915->kernel_context);
662 * intel_engines_cleanup_common - cleans up the engine state created by
663 * the common initiailizers.
664 * @engine: Engine to cleanup.
666 * This cleans up everything created by the common helpers.
668 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
670 intel_engine_cleanup_scratch(engine);
672 if (HWS_NEEDS_PHYSICAL(engine->i915))
673 cleanup_phys_status_page(engine);
675 cleanup_status_page(engine);
677 i915_gem_render_state_fini(engine);
678 intel_engine_fini_breadcrumbs(engine);
679 intel_engine_cleanup_cmd_parser(engine);
680 i915_gem_batch_pool_fini(&engine->batch_pool);
682 if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
683 engine->context_unpin(engine, engine->i915->preempt_context);
684 engine->context_unpin(engine, engine->i915->kernel_context);
687 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
689 struct drm_i915_private *dev_priv = engine->i915;
692 if (INTEL_GEN(dev_priv) >= 8)
693 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
694 RING_ACTHD_UDW(engine->mmio_base));
695 else if (INTEL_GEN(dev_priv) >= 4)
696 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
698 acthd = I915_READ(ACTHD);
703 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
705 struct drm_i915_private *dev_priv = engine->i915;
708 if (INTEL_GEN(dev_priv) >= 8)
709 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
710 RING_BBADDR_UDW(engine->mmio_base));
712 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
717 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
720 case I915_CACHE_NONE: return " uncached";
721 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
722 case I915_CACHE_L3_LLC: return " L3+LLC";
723 case I915_CACHE_WT: return " WT";
728 static inline uint32_t
729 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
730 int subslice, i915_reg_t reg)
734 enum forcewake_domains fw_domains;
736 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
738 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
740 FW_REG_READ | FW_REG_WRITE);
742 spin_lock_irq(&dev_priv->uncore.lock);
743 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
745 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
747 * The HW expects the slice and sublice selectors to be reset to 0
748 * after reading out the registers.
750 WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK));
751 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
752 mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
753 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
755 ret = I915_READ_FW(reg);
757 mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK);
758 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
760 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
761 spin_unlock_irq(&dev_priv->uncore.lock);
766 /* NB: please notice the memset */
767 void intel_engine_get_instdone(struct intel_engine_cs *engine,
768 struct intel_instdone *instdone)
770 struct drm_i915_private *dev_priv = engine->i915;
771 u32 mmio_base = engine->mmio_base;
775 memset(instdone, 0, sizeof(*instdone));
777 switch (INTEL_GEN(dev_priv)) {
779 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
781 if (engine->id != RCS)
784 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
785 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
786 instdone->sampler[slice][subslice] =
787 read_subslice_reg(dev_priv, slice, subslice,
788 GEN7_SAMPLER_INSTDONE);
789 instdone->row[slice][subslice] =
790 read_subslice_reg(dev_priv, slice, subslice,
795 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
797 if (engine->id != RCS)
800 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
801 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
802 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
808 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
810 if (engine->id == RCS)
811 /* HACK: Using the wrong struct member */
812 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
816 instdone->instdone = I915_READ(GEN2_INSTDONE);
821 static int wa_add(struct drm_i915_private *dev_priv,
823 const u32 mask, const u32 val)
825 const u32 idx = dev_priv->workarounds.count;
827 if (WARN_ON(idx >= I915_MAX_WA_REGS))
830 dev_priv->workarounds.reg[idx].addr = addr;
831 dev_priv->workarounds.reg[idx].value = val;
832 dev_priv->workarounds.reg[idx].mask = mask;
834 dev_priv->workarounds.count++;
839 #define WA_REG(addr, mask, val) do { \
840 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
845 #define WA_SET_BIT_MASKED(addr, mask) \
846 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
848 #define WA_CLR_BIT_MASKED(addr, mask) \
849 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
851 #define WA_SET_FIELD_MASKED(addr, mask, value) \
852 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
854 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
857 struct drm_i915_private *dev_priv = engine->i915;
858 struct i915_workarounds *wa = &dev_priv->workarounds;
859 const uint32_t index = wa->hw_whitelist_count[engine->id];
861 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
864 I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
865 i915_mmio_reg_offset(reg));
866 wa->hw_whitelist_count[engine->id]++;
871 static int gen8_init_workarounds(struct intel_engine_cs *engine)
873 struct drm_i915_private *dev_priv = engine->i915;
875 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
877 /* WaDisableAsyncFlipPerfMode:bdw,chv */
878 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
880 /* WaDisablePartialInstShootdown:bdw,chv */
881 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
882 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
884 /* Use Force Non-Coherent whenever executing a 3D context. This is a
885 * workaround for for a possible hang in the unlikely event a TLB
886 * invalidation occurs during a PSD flush.
888 /* WaForceEnableNonCoherent:bdw,chv */
889 /* WaHdcDisableFetchWhenMasked:bdw,chv */
890 WA_SET_BIT_MASKED(HDC_CHICKEN0,
891 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
892 HDC_FORCE_NON_COHERENT);
894 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
895 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
896 * polygons in the same 8x4 pixel/sample area to be processed without
897 * stalling waiting for the earlier ones to write to Hierarchical Z
900 * This optimization is off by default for BDW and CHV; turn it on.
902 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
904 /* Wa4x4STCOptimizationDisable:bdw,chv */
905 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
908 * BSpec recommends 8x4 when MSAA is used,
909 * however in practice 16x4 seems fastest.
911 * Note that PS/WM thread counts depend on the WIZ hashing
912 * disable bit, which we don't touch here, but it's good
913 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
915 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
916 GEN6_WIZ_HASHING_MASK,
917 GEN6_WIZ_HASHING_16x4);
922 static int bdw_init_workarounds(struct intel_engine_cs *engine)
924 struct drm_i915_private *dev_priv = engine->i915;
927 ret = gen8_init_workarounds(engine);
931 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
932 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
934 /* WaDisableDopClockGating:bdw
936 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
937 * to disable EUTC clock gating.
939 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
940 DOP_CLOCK_GATING_DISABLE);
942 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
943 GEN8_SAMPLER_POWER_BYPASS_DIS);
945 WA_SET_BIT_MASKED(HDC_CHICKEN0,
946 /* WaForceContextSaveRestoreNonCoherent:bdw */
947 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
948 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
949 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
954 static int chv_init_workarounds(struct intel_engine_cs *engine)
956 struct drm_i915_private *dev_priv = engine->i915;
959 ret = gen8_init_workarounds(engine);
963 /* WaDisableThreadStallDopClockGating:chv */
964 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
966 /* Improve HiZ throughput on CHV. */
967 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
972 static int gen9_init_workarounds(struct intel_engine_cs *engine)
974 struct drm_i915_private *dev_priv = engine->i915;
977 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
978 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
980 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
981 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
982 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
984 /* WaDisableKillLogic:bxt,skl,kbl */
985 if (!IS_COFFEELAKE(dev_priv))
986 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
989 if (HAS_LLC(dev_priv)) {
990 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
992 * Must match Display Engine. See
993 * WaCompressedResourceDisplayNewHashMode.
995 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
996 GEN9_PBE_COMPRESSED_HASH_SELECTION);
997 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
998 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
1000 I915_WRITE(MMCD_MISC_CTRL,
1001 I915_READ(MMCD_MISC_CTRL) |
1006 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
1007 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
1008 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1009 FLOW_CONTROL_ENABLE |
1010 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
1012 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
1013 if (!IS_COFFEELAKE(dev_priv))
1014 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1015 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
1017 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
1018 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1019 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
1020 GEN9_DG_MIRROR_FIX_ENABLE);
1022 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
1023 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1024 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
1025 GEN9_RHWO_OPTIMIZATION_DISABLE);
1027 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
1028 * but we do that in per ctx batchbuffer as there is an issue
1029 * with this register not getting restored on ctx restore
1033 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
1034 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
1035 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
1036 GEN9_ENABLE_YV12_BUGFIX |
1037 GEN9_ENABLE_GPGPU_PREEMPTION);
1039 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
1040 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
1041 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
1042 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
1044 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
1045 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
1046 GEN9_CCS_TLB_PREFETCH_ENABLE);
1048 /* WaDisableMaskBasedCammingInRCC:bxt */
1049 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1050 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
1051 PIXEL_MASK_CAMMING_DISABLE);
1053 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
1054 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1055 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
1056 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
1058 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
1059 * both tied to WaForceContextSaveRestoreNonCoherent
1060 * in some hsds for skl. We keep the tie for all gen9. The
1061 * documentation is a bit hazy and so we want to get common behaviour,
1062 * even though there is no clear evidence we would need both on kbl/bxt.
1063 * This area has been source of system hangs so we play it safe
1064 * and mimic the skl regardless of what bspec says.
1066 * Use Force Non-Coherent whenever executing a 3D context. This
1067 * is a workaround for a possible hang in the unlikely event
1068 * a TLB invalidation occurs during a PSD flush.
1071 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
1072 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1073 HDC_FORCE_NON_COHERENT);
1075 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1076 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1077 BDW_DISABLE_HDC_INVALIDATION);
1079 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
1080 if (IS_SKYLAKE(dev_priv) ||
1081 IS_KABYLAKE(dev_priv) ||
1082 IS_COFFEELAKE(dev_priv) ||
1083 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
1084 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1085 GEN8_SAMPLER_POWER_BYPASS_DIS);
1087 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
1088 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
1090 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
1091 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
1092 GEN8_LQSC_FLUSH_COHERENT_LINES));
1095 * Supporting preemption with fine-granularity requires changes in the
1096 * batch buffer programming. Since we can't break old userspace, we
1097 * need to set our default preemption level to safe value. Userspace is
1098 * still able to use more fine-grained preemption levels, since in
1099 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
1100 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
1101 * not real HW workarounds, but merely a way to start using preemption
1102 * while maintaining old contract with userspace.
1105 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
1106 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
1108 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
1109 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1110 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1112 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1113 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
1117 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1118 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1119 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1120 ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
1124 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1125 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
1132 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1134 struct drm_i915_private *dev_priv = engine->i915;
1135 u8 vals[3] = { 0, 0, 0 };
1138 for (i = 0; i < 3; i++) {
1142 * Only consider slices where one, and only one, subslice has 7
1145 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
1149 * subslice_7eu[i] != 0 (because of the check above) and
1150 * ss_max == 4 (maximum number of subslices possible per slice)
1154 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
1158 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1161 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1162 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1163 GEN9_IZ_HASHING_MASK(2) |
1164 GEN9_IZ_HASHING_MASK(1) |
1165 GEN9_IZ_HASHING_MASK(0),
1166 GEN9_IZ_HASHING(2, vals[2]) |
1167 GEN9_IZ_HASHING(1, vals[1]) |
1168 GEN9_IZ_HASHING(0, vals[0]));
1173 static int skl_init_workarounds(struct intel_engine_cs *engine)
1175 struct drm_i915_private *dev_priv = engine->i915;
1178 ret = gen9_init_workarounds(engine);
1182 /* WaEnableGapsTsvCreditFix:skl */
1183 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1184 GEN9_GAPS_TSV_CREDIT_DISABLE));
1186 /* WaDisableGafsUnitClkGating:skl */
1187 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1188 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
1190 /* WaInPlaceDecompressionHang:skl */
1191 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1192 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1193 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1194 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1196 /* WaDisableLSQCROPERFforOCL:skl */
1197 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1201 return skl_tune_iz_hashing(engine);
1204 static int bxt_init_workarounds(struct intel_engine_cs *engine)
1206 struct drm_i915_private *dev_priv = engine->i915;
1209 ret = gen9_init_workarounds(engine);
1213 /* WaStoreMultiplePTEenable:bxt */
1214 /* This is a requirement according to Hardware specification */
1215 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1216 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1218 /* WaSetClckGatingDisableMedia:bxt */
1219 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1220 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1221 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1224 /* WaDisableThreadStallDopClockGating:bxt */
1225 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1226 STALL_DOP_GATING_DISABLE);
1228 /* WaDisablePooledEuLoadBalancingFix:bxt */
1229 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1230 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1231 _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
1234 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1235 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1237 GEN7_HALF_SLICE_CHICKEN1,
1238 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1241 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1242 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1243 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1244 /* WaDisableLSQCROPERFforOCL:bxt */
1245 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1246 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1250 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1255 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1256 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1257 u32 val = I915_READ(GEN8_L3SQCREG1);
1258 val &= ~L3_PRIO_CREDITS_MASK;
1259 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1260 I915_WRITE(GEN8_L3SQCREG1, val);
1263 /* WaToEnableHwFixForPushConstHWBug:bxt */
1264 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1265 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1266 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1268 /* WaInPlaceDecompressionHang:bxt */
1269 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1270 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1271 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1272 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1277 static int cnl_init_workarounds(struct intel_engine_cs *engine)
1279 struct drm_i915_private *dev_priv = engine->i915;
1282 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
1283 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
1284 I915_WRITE(GAMT_CHKN_BIT_REG,
1285 (I915_READ(GAMT_CHKN_BIT_REG) |
1286 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
1288 /* WaForceContextSaveRestoreNonCoherent:cnl */
1289 WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
1290 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
1292 /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
1293 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
1294 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
1296 /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
1297 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1298 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1300 /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
1301 if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
1302 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1303 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
1305 /* WaInPlaceDecompressionHang:cnl */
1306 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1307 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1308 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1310 /* WaPushConstantDereferenceHoldDisable:cnl */
1311 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
1313 /* FtrEnableFastAnisoL1BankingFix: cnl */
1314 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
1316 /* WaDisable3DMidCmdPreemption:cnl */
1317 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
1319 /* WaDisableGPGPUMidCmdPreemption:cnl */
1320 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1321 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1323 /* WaEnablePreemptionGranularityControlByUMD:cnl */
1324 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1325 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1326 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
1333 static int kbl_init_workarounds(struct intel_engine_cs *engine)
1335 struct drm_i915_private *dev_priv = engine->i915;
1338 ret = gen9_init_workarounds(engine);
1342 /* WaEnableGapsTsvCreditFix:kbl */
1343 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1344 GEN9_GAPS_TSV_CREDIT_DISABLE));
1346 /* WaDisableDynamicCreditSharing:kbl */
1347 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1348 I915_WRITE(GAMT_CHKN_BIT_REG,
1349 (I915_READ(GAMT_CHKN_BIT_REG) |
1350 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
1352 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1353 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1354 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1355 HDC_FENCE_DEST_SLM_DISABLE);
1357 /* WaToEnableHwFixForPushConstHWBug:kbl */
1358 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1359 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1360 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1362 /* WaDisableGafsUnitClkGating:kbl */
1363 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1364 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
1366 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1368 GEN7_HALF_SLICE_CHICKEN1,
1369 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1371 /* WaInPlaceDecompressionHang:kbl */
1372 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1373 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1374 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1376 /* WaDisableLSQCROPERFforOCL:kbl */
1377 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1384 static int glk_init_workarounds(struct intel_engine_cs *engine)
1386 struct drm_i915_private *dev_priv = engine->i915;
1389 ret = gen9_init_workarounds(engine);
1393 /* WaToEnableHwFixForPushConstHWBug:glk */
1394 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1395 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1400 static int cfl_init_workarounds(struct intel_engine_cs *engine)
1402 struct drm_i915_private *dev_priv = engine->i915;
1405 ret = gen9_init_workarounds(engine);
1409 /* WaEnableGapsTsvCreditFix:cfl */
1410 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1411 GEN9_GAPS_TSV_CREDIT_DISABLE));
1413 /* WaToEnableHwFixForPushConstHWBug:cfl */
1414 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1415 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1417 /* WaDisableGafsUnitClkGating:cfl */
1418 I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
1419 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
1421 /* WaDisableSbeCacheDispatchPortSharing:cfl */
1423 GEN7_HALF_SLICE_CHICKEN1,
1424 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1426 /* WaInPlaceDecompressionHang:cfl */
1427 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
1428 (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
1429 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
1434 int init_workarounds_ring(struct intel_engine_cs *engine)
1436 struct drm_i915_private *dev_priv = engine->i915;
1439 WARN_ON(engine->id != RCS);
1441 dev_priv->workarounds.count = 0;
1442 dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
1444 if (IS_BROADWELL(dev_priv))
1445 err = bdw_init_workarounds(engine);
1446 else if (IS_CHERRYVIEW(dev_priv))
1447 err = chv_init_workarounds(engine);
1448 else if (IS_SKYLAKE(dev_priv))
1449 err = skl_init_workarounds(engine);
1450 else if (IS_BROXTON(dev_priv))
1451 err = bxt_init_workarounds(engine);
1452 else if (IS_KABYLAKE(dev_priv))
1453 err = kbl_init_workarounds(engine);
1454 else if (IS_GEMINILAKE(dev_priv))
1455 err = glk_init_workarounds(engine);
1456 else if (IS_COFFEELAKE(dev_priv))
1457 err = cfl_init_workarounds(engine);
1458 else if (IS_CANNONLAKE(dev_priv))
1459 err = cnl_init_workarounds(engine);
1465 DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1466 engine->name, dev_priv->workarounds.count);
1470 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1472 struct i915_workarounds *w = &req->i915->workarounds;
1479 ret = req->engine->emit_flush(req, EMIT_BARRIER);
1483 cs = intel_ring_begin(req, (w->count * 2 + 2));
1487 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
1488 for (i = 0; i < w->count; i++) {
1489 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
1490 *cs++ = w->reg[i].value;
1494 intel_ring_advance(req, cs);
1496 ret = req->engine->emit_flush(req, EMIT_BARRIER);
1503 static bool ring_is_idle(struct intel_engine_cs *engine)
1505 struct drm_i915_private *dev_priv = engine->i915;
1508 intel_runtime_pm_get(dev_priv);
1510 /* First check that no commands are left in the ring */
1511 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1512 (I915_READ_TAIL(engine) & TAIL_ADDR))
1515 /* No bit for gen2, so assume the CS parser is idle */
1516 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1519 intel_runtime_pm_put(dev_priv);
1525 * intel_engine_is_idle() - Report if the engine has finished process all work
1526 * @engine: the intel_engine_cs
1528 * Return true if there are no requests pending, nothing left to be submitted
1529 * to hardware, and that the engine is idle.
1531 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1533 struct drm_i915_private *dev_priv = engine->i915;
1535 /* More white lies, if wedged, hw state is inconsistent */
1536 if (i915_terminally_wedged(&dev_priv->gpu_error))
1539 /* Any inflight/incomplete requests? */
1540 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1541 intel_engine_last_submit(engine)))
1544 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
1547 /* Interrupt/tasklet pending? */
1548 if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
1551 /* Waiting to drain ELSP? */
1552 if (READ_ONCE(engine->execlists.active))
1555 /* ELSP is empty, but there are ready requests? */
1556 if (READ_ONCE(engine->execlists.first))
1560 if (!ring_is_idle(engine))
1566 bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1568 struct intel_engine_cs *engine;
1569 enum intel_engine_id id;
1571 if (READ_ONCE(dev_priv->gt.active_requests))
1574 /* If the driver is wedged, HW state may be very inconsistent and
1575 * report that it is still busy, even though we have stopped using it.
1577 if (i915_terminally_wedged(&dev_priv->gpu_error))
1580 for_each_engine(engine, dev_priv, id) {
1581 if (!intel_engine_is_idle(engine))
1588 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
1590 return (!engine->last_retired_context ||
1591 i915_gem_context_is_kernel(engine->last_retired_context));
1594 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1596 struct intel_engine_cs *engine;
1597 enum intel_engine_id id;
1599 for_each_engine(engine, i915, id)
1600 engine->set_default_submission(engine);
1604 * intel_engines_park: called when the GT is transitioning from busy->idle
1605 * @i915: the i915 device
1607 * The GT is now idle and about to go to sleep (maybe never to wake again?).
1608 * Time for us to tidy and put away our toys (release resources back to the
1611 void intel_engines_park(struct drm_i915_private *i915)
1613 struct intel_engine_cs *engine;
1614 enum intel_engine_id id;
1616 for_each_engine(engine, i915, id) {
1618 engine->park(engine);
1620 intel_engine_disarm_breadcrumbs(engine);
1621 tasklet_kill(&engine->execlists.irq_tasklet);
1623 i915_gem_batch_pool_fini(&engine->batch_pool);
1624 engine->execlists.no_priolist = false;
1629 * intel_engines_unpark: called when the GT is transitioning from idle->busy
1630 * @i915: the i915 device
1632 * The GT was idle and now about to fire up with some new user requests.
1634 void intel_engines_unpark(struct drm_i915_private *i915)
1636 struct intel_engine_cs *engine;
1637 enum intel_engine_id id;
1639 for_each_engine(engine, i915, id) {
1641 engine->unpark(engine);
1645 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1647 switch (INTEL_GEN(engine->i915)) {
1649 return false; /* uses physical not virtual addresses */
1651 /* maybe only uses physical not virtual addresses */
1652 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1654 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1660 static void print_request(struct drm_printer *m,
1661 struct drm_i915_gem_request *rq,
1664 drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
1666 i915_gem_request_completed(rq) ? "!" : "",
1667 rq->ctx->hw_id, rq->fence.seqno,
1668 rq->priotree.priority,
1669 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1670 rq->timeline->common->name);
1673 void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
1675 struct intel_breadcrumbs * const b = &engine->breadcrumbs;
1676 const struct intel_engine_execlists * const execlists = &engine->execlists;
1677 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1678 struct drm_i915_private *dev_priv = engine->i915;
1679 struct drm_i915_gem_request *rq;
1683 drm_printf(m, "%s\n", engine->name);
1684 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
1685 intel_engine_get_seqno(engine),
1686 intel_engine_last_submit(engine),
1687 engine->hangcheck.seqno,
1688 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
1689 engine->timeline->inflight_seqnos);
1690 drm_printf(m, "\tReset count: %d\n",
1691 i915_reset_engine_count(error, engine));
1695 drm_printf(m, "\tRequests:\n");
1697 rq = list_first_entry(&engine->timeline->requests,
1698 struct drm_i915_gem_request, link);
1699 if (&rq->link != &engine->timeline->requests)
1700 print_request(m, rq, "\t\tfirst ");
1702 rq = list_last_entry(&engine->timeline->requests,
1703 struct drm_i915_gem_request, link);
1704 if (&rq->link != &engine->timeline->requests)
1705 print_request(m, rq, "\t\tlast ");
1707 rq = i915_gem_find_active_request(engine);
1709 print_request(m, rq, "\t\tactive ");
1711 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
1712 rq->head, rq->postfix, rq->tail,
1713 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1714 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1717 drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
1718 I915_READ(RING_START(engine->mmio_base)),
1719 rq ? i915_ggtt_offset(rq->ring->vma) : 0);
1720 drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
1721 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
1722 rq ? rq->ring->head : 0);
1723 drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
1724 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
1725 rq ? rq->ring->tail : 0);
1726 drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
1727 I915_READ(RING_CTL(engine->mmio_base)),
1728 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
1732 addr = intel_engine_get_active_head(engine);
1733 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1734 upper_32_bits(addr), lower_32_bits(addr));
1735 addr = intel_engine_get_last_batch_head(engine);
1736 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1737 upper_32_bits(addr), lower_32_bits(addr));
1739 if (i915_modparams.enable_execlists) {
1740 const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
1741 u32 ptr, read, write;
1744 drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
1745 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
1746 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
1748 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
1749 read = GEN8_CSB_READ_PTR(ptr);
1750 write = GEN8_CSB_WRITE_PTR(ptr);
1751 drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
1752 read, execlists->csb_head,
1754 intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
1755 yesno(test_bit(ENGINE_IRQ_EXECLIST,
1756 &engine->irq_posted)));
1757 if (read >= GEN8_CSB_ENTRIES)
1759 if (write >= GEN8_CSB_ENTRIES)
1762 write += GEN8_CSB_ENTRIES;
1763 while (read < write) {
1764 idx = ++read % GEN8_CSB_ENTRIES;
1765 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
1767 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
1769 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
1774 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
1777 rq = port_unpack(&execlists->port[idx], &count);
1779 drm_printf(m, "\t\tELSP[%d] count=%d, ",
1781 print_request(m, rq, "rq: ");
1783 drm_printf(m, "\t\tELSP[%d] idle\n",
1787 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
1789 } else if (INTEL_GEN(dev_priv) > 6) {
1790 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1791 I915_READ(RING_PP_DIR_BASE(engine)));
1792 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1793 I915_READ(RING_PP_DIR_BASE_READ(engine)));
1794 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1795 I915_READ(RING_PP_DIR_DCLV(engine)));
1798 spin_lock_irq(&engine->timeline->lock);
1799 list_for_each_entry(rq, &engine->timeline->requests, link)
1800 print_request(m, rq, "\t\tE ");
1801 for (rb = execlists->first; rb; rb = rb_next(rb)) {
1802 struct i915_priolist *p =
1803 rb_entry(rb, typeof(*p), node);
1805 list_for_each_entry(rq, &p->requests, priotree.link)
1806 print_request(m, rq, "\t\tQ ");
1808 spin_unlock_irq(&engine->timeline->lock);
1810 spin_lock_irq(&b->rb_lock);
1811 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1812 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1814 drm_printf(m, "\t%s [%d] waiting for %x\n",
1815 w->tsk->comm, w->tsk->pid, w->seqno);
1817 spin_unlock_irq(&b->rb_lock);
1819 drm_printf(m, "\n");
1822 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1823 #include "selftests/mock_engine.c"