2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
37 /* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
40 #define LEGACY_REQUEST_SIZE 200
42 static unsigned int __intel_ring_space(unsigned int head,
47 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
48 * same cacheline, the Head Pointer must not be greater than the Tail
51 GEM_BUG_ON(!is_power_of_2(size));
52 return (head - tail - CACHELINE_BYTES) & (size - 1);
55 unsigned int intel_ring_update_space(struct intel_ring *ring)
59 space = __intel_ring_space(ring->head, ring->emit, ring->size);
66 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
72 if (mode & EMIT_INVALIDATE)
75 cs = intel_ring_begin(req, 2);
81 intel_ring_advance(req, cs);
87 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
96 * also flushed at 2d versus 3d pipeline switches.
100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101 * MI_READ_FLUSH is set, and is always flushed on 965.
103 * I915_GEM_DOMAIN_COMMAND may not exist?
105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106 * invalidated when MI_EXE_FLUSH is set.
108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109 * invalidated with every MI_FLUSH.
113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116 * are flushed at any MI_FLUSH.
120 if (mode & EMIT_INVALIDATE) {
122 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
123 cmd |= MI_INVALIDATE_ISP;
126 cs = intel_ring_begin(req, 2);
132 intel_ring_advance(req, cs);
138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139 * implementing two workarounds on gen6. From section 1.4.7.1
140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143 * produced by non-pipelined state commands), software needs to first
144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150 * And the workaround for these two requires this workaround first:
152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153 * BEFORE the pipe-control with a post-sync op and no write-cache
156 * And this last workaround is tricky because of the requirements on
157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
160 * "1 of the following must also be set:
161 * - Render Target Cache Flush Enable ([12] of DW1)
162 * - Depth Cache Flush Enable ([0] of DW1)
163 * - Stall at Pixel Scoreboard ([1] of DW1)
164 * - Depth Stall ([13] of DW1)
165 * - Post-Sync Operation ([13] of DW1)
166 * - Notify Enable ([8] of DW1)"
168 * The cache flushes require the workaround flush that triggered this
169 * one, so we can't use it. Depth stall would trigger the same.
170 * Post-sync nonzero is what triggered this second workaround, so we
171 * can't use that one either. Notify enable is IRQs, which aren't
172 * really our business. That leaves only stall at scoreboard.
175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
178 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
181 cs = intel_ring_begin(req, 6);
185 *cs++ = GFX_OP_PIPE_CONTROL(5);
186 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
187 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
188 *cs++ = 0; /* low dword */
189 *cs++ = 0; /* high dword */
191 intel_ring_advance(req, cs);
193 cs = intel_ring_begin(req, 6);
197 *cs++ = GFX_OP_PIPE_CONTROL(5);
198 *cs++ = PIPE_CONTROL_QW_WRITE;
199 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
203 intel_ring_advance(req, cs);
209 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
212 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
216 /* Force SNB workarounds for PIPE_CONTROL flushes */
217 ret = intel_emit_post_sync_nonzero_flush(req);
221 /* Just flush everything. Experiments have shown that reducing the
222 * number of bits based on the write domains has little performance
225 if (mode & EMIT_FLUSH) {
226 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
227 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
229 * Ensure that any following seqno writes only happen
230 * when the render cache is indeed flushed.
232 flags |= PIPE_CONTROL_CS_STALL;
234 if (mode & EMIT_INVALIDATE) {
235 flags |= PIPE_CONTROL_TLB_INVALIDATE;
236 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
237 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
238 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
239 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
240 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
242 * TLB invalidate requires a post-sync write.
244 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
247 cs = intel_ring_begin(req, 4);
251 *cs++ = GFX_OP_PIPE_CONTROL(4);
253 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
255 intel_ring_advance(req, cs);
261 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
265 cs = intel_ring_begin(req, 4);
269 *cs++ = GFX_OP_PIPE_CONTROL(4);
270 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
273 intel_ring_advance(req, cs);
279 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
282 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
286 * Ensure that any following seqno writes only happen when the render
287 * cache is indeed flushed.
289 * Workaround: 4th PIPE_CONTROL command (except the ones with only
290 * read-cache invalidate bits set) must have the CS_STALL bit set. We
291 * don't try to be clever and just set it unconditionally.
293 flags |= PIPE_CONTROL_CS_STALL;
295 /* Just flush everything. Experiments have shown that reducing the
296 * number of bits based on the write domains has little performance
299 if (mode & EMIT_FLUSH) {
300 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
301 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
302 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
303 flags |= PIPE_CONTROL_FLUSH_ENABLE;
305 if (mode & EMIT_INVALIDATE) {
306 flags |= PIPE_CONTROL_TLB_INVALIDATE;
307 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
308 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
309 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
310 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
311 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
312 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
314 * TLB invalidate requires a post-sync write.
316 flags |= PIPE_CONTROL_QW_WRITE;
317 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
319 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
321 /* Workaround: we must issue a pipe_control with CS-stall bit
322 * set before a pipe_control command that has the state cache
323 * invalidate bit set. */
324 gen7_render_ring_cs_stall_wa(req);
327 cs = intel_ring_begin(req, 4);
331 *cs++ = GFX_OP_PIPE_CONTROL(4);
333 *cs++ = scratch_addr;
335 intel_ring_advance(req, cs);
341 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
346 cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
350 flags = PIPE_CONTROL_CS_STALL;
352 if (mode & EMIT_FLUSH) {
353 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
354 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
355 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
356 flags |= PIPE_CONTROL_FLUSH_ENABLE;
358 if (mode & EMIT_INVALIDATE) {
359 flags |= PIPE_CONTROL_TLB_INVALIDATE;
360 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
361 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
365 flags |= PIPE_CONTROL_QW_WRITE;
366 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
368 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
369 cs = gen8_emit_pipe_control(cs,
370 PIPE_CONTROL_CS_STALL |
371 PIPE_CONTROL_STALL_AT_SCOREBOARD,
375 cs = gen8_emit_pipe_control(cs, flags,
376 i915_ggtt_offset(req->engine->scratch) +
377 2 * CACHELINE_BYTES);
379 intel_ring_advance(req, cs);
384 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
386 struct drm_i915_private *dev_priv = engine->i915;
389 addr = dev_priv->status_page_dmah->busaddr;
390 if (INTEL_GEN(dev_priv) >= 4)
391 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
392 I915_WRITE(HWS_PGA, addr);
395 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
397 struct drm_i915_private *dev_priv = engine->i915;
400 /* The ring status page addresses are no longer next to the rest of
401 * the ring registers as of gen7.
403 if (IS_GEN7(dev_priv)) {
404 switch (engine->id) {
406 * No more rings exist on Gen7. Default case is only to shut up
407 * gcc switch check warning.
410 GEM_BUG_ON(engine->id);
412 mmio = RENDER_HWS_PGA_GEN7;
415 mmio = BLT_HWS_PGA_GEN7;
418 mmio = BSD_HWS_PGA_GEN7;
421 mmio = VEBOX_HWS_PGA_GEN7;
424 } else if (IS_GEN6(dev_priv)) {
425 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
427 /* XXX: gen8 returns to sanity */
428 mmio = RING_HWS_PGA(engine->mmio_base);
431 if (INTEL_GEN(dev_priv) >= 6)
432 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
434 I915_WRITE(mmio, engine->status_page.ggtt_offset);
438 * Flush the TLB for this page
440 * FIXME: These two bits have disappeared on gen8, so a question
441 * arises: do we still need this and if so how should we go about
442 * invalidating the TLB?
444 if (IS_GEN(dev_priv, 6, 7)) {
445 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
447 /* ring should be idle before issuing a sync flush*/
448 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
451 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
453 if (intel_wait_for_register(dev_priv,
454 reg, INSTPM_SYNC_FLUSH, 0,
456 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
461 static bool stop_ring(struct intel_engine_cs *engine)
463 struct drm_i915_private *dev_priv = engine->i915;
465 if (INTEL_GEN(dev_priv) > 2) {
466 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
467 if (intel_wait_for_register(dev_priv,
468 RING_MI_MODE(engine->mmio_base),
472 DRM_ERROR("%s : timed out trying to stop ring\n",
474 /* Sometimes we observe that the idle flag is not
475 * set even though the ring is empty. So double
476 * check before giving up.
478 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
483 I915_WRITE_CTL(engine, 0);
484 I915_WRITE_HEAD(engine, 0);
485 I915_WRITE_TAIL(engine, 0);
487 if (INTEL_GEN(dev_priv) > 2) {
488 (void)I915_READ_CTL(engine);
489 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
492 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
495 static int init_ring_common(struct intel_engine_cs *engine)
497 struct drm_i915_private *dev_priv = engine->i915;
498 struct intel_ring *ring = engine->buffer;
501 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
503 if (!stop_ring(engine)) {
504 /* G45 ring initialization often fails to reset head to zero */
505 DRM_DEBUG_KMS("%s head not reset to zero "
506 "ctl %08x head %08x tail %08x start %08x\n",
508 I915_READ_CTL(engine),
509 I915_READ_HEAD(engine),
510 I915_READ_TAIL(engine),
511 I915_READ_START(engine));
513 if (!stop_ring(engine)) {
514 DRM_ERROR("failed to set %s head to zero "
515 "ctl %08x head %08x tail %08x start %08x\n",
517 I915_READ_CTL(engine),
518 I915_READ_HEAD(engine),
519 I915_READ_TAIL(engine),
520 I915_READ_START(engine));
526 if (HWS_NEEDS_PHYSICAL(dev_priv))
527 ring_setup_phys_status_page(engine);
529 intel_ring_setup_status_page(engine);
531 intel_engine_reset_breadcrumbs(engine);
533 /* Enforce ordering by reading HEAD register back */
534 I915_READ_HEAD(engine);
536 /* Initialize the ring. This must happen _after_ we've cleared the ring
537 * registers with the above sequence (the readback of the HEAD registers
538 * also enforces ordering), otherwise the hw might lose the new ring
539 * register values. */
540 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
542 /* WaClearRingBufHeadRegAtInit:ctg,elk */
543 if (I915_READ_HEAD(engine))
544 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
545 engine->name, I915_READ_HEAD(engine));
547 intel_ring_update_space(ring);
548 I915_WRITE_HEAD(engine, ring->head);
549 I915_WRITE_TAIL(engine, ring->tail);
550 (void)I915_READ_TAIL(engine);
552 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
554 /* If the head is still not zero, the ring is dead */
555 if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
556 RING_VALID, RING_VALID,
558 DRM_ERROR("%s initialization failed "
559 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
561 I915_READ_CTL(engine),
562 I915_READ_CTL(engine) & RING_VALID,
563 I915_READ_HEAD(engine), ring->head,
564 I915_READ_TAIL(engine), ring->tail,
565 I915_READ_START(engine),
566 i915_ggtt_offset(ring->vma));
571 intel_engine_init_hangcheck(engine);
574 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
579 static void reset_ring_common(struct intel_engine_cs *engine,
580 struct drm_i915_gem_request *request)
583 * RC6 must be prevented until the reset is complete and the engine
584 * reinitialised. If it occurs in the middle of this sequence, the
585 * state written to/loaded from the power context is ill-defined (e.g.
586 * the PP_BASE_DIR may be lost).
588 assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
591 * Try to restore the logical GPU state to match the continuation
592 * of the request queue. If we skip the context/PD restore, then
593 * the next request may try to execute assuming that its context
594 * is valid and loaded on the GPU and so may try to access invalid
595 * memory, prompting repeated GPU hangs.
597 * If the request was guilty, we still restore the logical state
598 * in case the next request requires it (e.g. the aliasing ppgtt),
599 * but skip over the hung batch.
601 * If the request was innocent, we try to replay the request with
602 * the restored context.
605 struct drm_i915_private *dev_priv = request->i915;
606 struct intel_context *ce = &request->ctx->engine[engine->id];
607 struct i915_hw_ppgtt *ppgtt;
609 /* FIXME consider gen8 reset */
613 i915_ggtt_offset(ce->state) |
614 BIT(8) /* must be set! */ |
615 CCID_EXTENDED_STATE_SAVE |
616 CCID_EXTENDED_STATE_RESTORE |
620 ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
622 u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
624 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
625 I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
627 /* Wait for the PD reload to complete */
628 if (intel_wait_for_register(dev_priv,
629 RING_PP_DIR_BASE(engine),
632 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
634 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
637 /* If the rq hung, jump to its breadcrumb and skip the batch */
638 if (request->fence.error == -EIO)
639 request->ring->head = request->postfix;
641 engine->legacy_active_context = NULL;
645 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
649 ret = intel_ring_workarounds_emit(req);
653 ret = i915_gem_render_state_emit(req);
660 static int init_render_ring(struct intel_engine_cs *engine)
662 struct drm_i915_private *dev_priv = engine->i915;
663 int ret = init_ring_common(engine);
667 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
668 if (IS_GEN(dev_priv, 4, 6))
669 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
671 /* We need to disable the AsyncFlip performance optimisations in order
672 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
673 * programmed to '1' on all products.
675 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
677 if (IS_GEN(dev_priv, 6, 7))
678 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
680 /* Required for the hardware to program scanline values for waiting */
681 /* WaEnableFlushTlbInvalidationMode:snb */
682 if (IS_GEN6(dev_priv))
684 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
686 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
687 if (IS_GEN7(dev_priv))
688 I915_WRITE(GFX_MODE_GEN7,
689 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
690 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
692 if (IS_GEN6(dev_priv)) {
693 /* From the Sandybridge PRM, volume 1 part 3, page 24:
694 * "If this bit is set, STCunit will have LRA as replacement
695 * policy. [...] This bit must be reset. LRA replacement
696 * policy is not supported."
698 I915_WRITE(CACHE_MODE_0,
699 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
702 if (IS_GEN(dev_priv, 6, 7))
703 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
705 if (INTEL_INFO(dev_priv)->gen >= 6)
706 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
708 return init_workarounds_ring(engine);
711 static void render_ring_cleanup(struct intel_engine_cs *engine)
713 struct drm_i915_private *dev_priv = engine->i915;
715 i915_vma_unpin_and_release(&dev_priv->semaphore);
718 static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
720 struct drm_i915_private *dev_priv = req->i915;
721 struct intel_engine_cs *waiter;
722 enum intel_engine_id id;
724 for_each_engine(waiter, dev_priv, id) {
725 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
726 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
729 *cs++ = GFX_OP_PIPE_CONTROL(6);
730 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
731 PIPE_CONTROL_CS_STALL;
732 *cs++ = lower_32_bits(gtt_offset);
733 *cs++ = upper_32_bits(gtt_offset);
734 *cs++ = req->global_seqno;
736 *cs++ = MI_SEMAPHORE_SIGNAL |
737 MI_SEMAPHORE_TARGET(waiter->hw_id);
744 static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
746 struct drm_i915_private *dev_priv = req->i915;
747 struct intel_engine_cs *waiter;
748 enum intel_engine_id id;
750 for_each_engine(waiter, dev_priv, id) {
751 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
752 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
755 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
756 *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
757 *cs++ = upper_32_bits(gtt_offset);
758 *cs++ = req->global_seqno;
759 *cs++ = MI_SEMAPHORE_SIGNAL |
760 MI_SEMAPHORE_TARGET(waiter->hw_id);
767 static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
769 struct drm_i915_private *dev_priv = req->i915;
770 struct intel_engine_cs *engine;
771 enum intel_engine_id id;
774 for_each_engine(engine, dev_priv, id) {
777 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
780 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
781 if (i915_mmio_reg_valid(mbox_reg)) {
782 *cs++ = MI_LOAD_REGISTER_IMM(1);
783 *cs++ = i915_mmio_reg_offset(mbox_reg);
784 *cs++ = req->global_seqno;
794 static void cancel_requests(struct intel_engine_cs *engine)
796 struct drm_i915_gem_request *request;
799 spin_lock_irqsave(&engine->timeline->lock, flags);
801 /* Mark all submitted requests as skipped. */
802 list_for_each_entry(request, &engine->timeline->requests, link) {
803 GEM_BUG_ON(!request->global_seqno);
804 if (!i915_gem_request_completed(request))
805 dma_fence_set_error(&request->fence, -EIO);
807 /* Remaining _unready_ requests will be nop'ed when submitted */
809 spin_unlock_irqrestore(&engine->timeline->lock, flags);
812 static void i9xx_submit_request(struct drm_i915_gem_request *request)
814 struct drm_i915_private *dev_priv = request->i915;
816 i915_gem_request_submit(request);
818 I915_WRITE_TAIL(request->engine,
819 intel_ring_set_tail(request->ring, request->tail));
822 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
824 *cs++ = MI_STORE_DWORD_INDEX;
825 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
826 *cs++ = req->global_seqno;
827 *cs++ = MI_USER_INTERRUPT;
829 req->tail = intel_ring_offset(req, cs);
830 assert_ring_tail_valid(req->ring, req->tail);
833 static const int i9xx_emit_breadcrumb_sz = 4;
836 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
838 * @request - request to write to the ring
840 * Update the mailbox registers in the *other* rings with the current seqno.
841 * This acts like a signal in the canonical semaphore.
843 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
845 return i9xx_emit_breadcrumb(req,
846 req->engine->semaphore.signal(req, cs));
849 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
852 struct intel_engine_cs *engine = req->engine;
854 if (engine->semaphore.signal)
855 cs = engine->semaphore.signal(req, cs);
857 *cs++ = GFX_OP_PIPE_CONTROL(6);
858 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
859 PIPE_CONTROL_QW_WRITE;
860 *cs++ = intel_hws_seqno_address(engine);
862 *cs++ = req->global_seqno;
863 /* We're thrashing one dword of HWS. */
865 *cs++ = MI_USER_INTERRUPT;
868 req->tail = intel_ring_offset(req, cs);
869 assert_ring_tail_valid(req->ring, req->tail);
872 static const int gen8_render_emit_breadcrumb_sz = 8;
875 * intel_ring_sync - sync the waiter to the signaller on seqno
877 * @waiter - ring that is waiting
878 * @signaller - ring which has, or will signal
879 * @seqno - seqno which the waiter will block on
883 gen8_ring_sync_to(struct drm_i915_gem_request *req,
884 struct drm_i915_gem_request *signal)
886 struct drm_i915_private *dev_priv = req->i915;
887 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
888 struct i915_hw_ppgtt *ppgtt;
891 cs = intel_ring_begin(req, 4);
895 *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
896 MI_SEMAPHORE_SAD_GTE_SDD;
897 *cs++ = signal->global_seqno;
898 *cs++ = lower_32_bits(offset);
899 *cs++ = upper_32_bits(offset);
900 intel_ring_advance(req, cs);
902 /* When the !RCS engines idle waiting upon a semaphore, they lose their
903 * pagetables and we must reload them before executing the batch.
904 * We do this on the i915_switch_context() following the wait and
905 * before the dispatch.
907 ppgtt = req->ctx->ppgtt;
908 if (ppgtt && req->engine->id != RCS)
909 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
914 gen6_ring_sync_to(struct drm_i915_gem_request *req,
915 struct drm_i915_gem_request *signal)
917 u32 dw1 = MI_SEMAPHORE_MBOX |
918 MI_SEMAPHORE_COMPARE |
919 MI_SEMAPHORE_REGISTER;
920 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
923 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
925 cs = intel_ring_begin(req, 4);
929 *cs++ = dw1 | wait_mbox;
930 /* Throughout all of the GEM code, seqno passed implies our current
931 * seqno is >= the last seqno executed. However for hardware the
932 * comparison is strictly greater than.
934 *cs++ = signal->global_seqno - 1;
937 intel_ring_advance(req, cs);
943 gen5_seqno_barrier(struct intel_engine_cs *engine)
945 /* MI_STORE are internally buffered by the GPU and not flushed
946 * either by MI_FLUSH or SyncFlush or any other combination of
949 * "Only the submission of the store operation is guaranteed.
950 * The write result will be complete (coherent) some time later
951 * (this is practically a finite period but there is no guaranteed
954 * Empirically, we observe that we need a delay of at least 75us to
955 * be sure that the seqno write is visible by the CPU.
957 usleep_range(125, 250);
961 gen6_seqno_barrier(struct intel_engine_cs *engine)
963 struct drm_i915_private *dev_priv = engine->i915;
965 /* Workaround to force correct ordering between irq and seqno writes on
966 * ivb (and maybe also on snb) by reading from a CS register (like
967 * ACTHD) before reading the status page.
969 * Note that this effectively stalls the read by the time it takes to
970 * do a memory transaction, which more or less ensures that the write
971 * from the GPU has sufficient time to invalidate the CPU cacheline.
972 * Alternatively we could delay the interrupt from the CS ring to give
973 * the write time to land, but that would incur a delay after every
974 * batch i.e. much more frequent than a delay when waiting for the
975 * interrupt (with the same net latency).
977 * Also note that to prevent whole machine hangs on gen7, we have to
978 * take the spinlock to guard against concurrent cacheline access.
980 spin_lock_irq(&dev_priv->uncore.lock);
981 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
982 spin_unlock_irq(&dev_priv->uncore.lock);
986 gen5_irq_enable(struct intel_engine_cs *engine)
988 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
992 gen5_irq_disable(struct intel_engine_cs *engine)
994 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
998 i9xx_irq_enable(struct intel_engine_cs *engine)
1000 struct drm_i915_private *dev_priv = engine->i915;
1002 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1003 I915_WRITE(IMR, dev_priv->irq_mask);
1004 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1008 i9xx_irq_disable(struct intel_engine_cs *engine)
1010 struct drm_i915_private *dev_priv = engine->i915;
1012 dev_priv->irq_mask |= engine->irq_enable_mask;
1013 I915_WRITE(IMR, dev_priv->irq_mask);
1017 i8xx_irq_enable(struct intel_engine_cs *engine)
1019 struct drm_i915_private *dev_priv = engine->i915;
1021 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1022 I915_WRITE16(IMR, dev_priv->irq_mask);
1023 POSTING_READ16(RING_IMR(engine->mmio_base));
1027 i8xx_irq_disable(struct intel_engine_cs *engine)
1029 struct drm_i915_private *dev_priv = engine->i915;
1031 dev_priv->irq_mask |= engine->irq_enable_mask;
1032 I915_WRITE16(IMR, dev_priv->irq_mask);
1036 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1040 cs = intel_ring_begin(req, 2);
1046 intel_ring_advance(req, cs);
1051 gen6_irq_enable(struct intel_engine_cs *engine)
1053 struct drm_i915_private *dev_priv = engine->i915;
1055 I915_WRITE_IMR(engine,
1056 ~(engine->irq_enable_mask |
1057 engine->irq_keep_mask));
1058 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1062 gen6_irq_disable(struct intel_engine_cs *engine)
1064 struct drm_i915_private *dev_priv = engine->i915;
1066 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1067 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1071 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1073 struct drm_i915_private *dev_priv = engine->i915;
1075 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1076 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
1080 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1082 struct drm_i915_private *dev_priv = engine->i915;
1084 I915_WRITE_IMR(engine, ~0);
1085 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
1089 gen8_irq_enable(struct intel_engine_cs *engine)
1091 struct drm_i915_private *dev_priv = engine->i915;
1093 I915_WRITE_IMR(engine,
1094 ~(engine->irq_enable_mask |
1095 engine->irq_keep_mask));
1096 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1100 gen8_irq_disable(struct intel_engine_cs *engine)
1102 struct drm_i915_private *dev_priv = engine->i915;
1104 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1108 i965_emit_bb_start(struct drm_i915_gem_request *req,
1109 u64 offset, u32 length,
1110 unsigned int dispatch_flags)
1114 cs = intel_ring_begin(req, 2);
1118 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1119 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1121 intel_ring_advance(req, cs);
1126 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1127 #define I830_BATCH_LIMIT (256*1024)
1128 #define I830_TLB_ENTRIES (2)
1129 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1131 i830_emit_bb_start(struct drm_i915_gem_request *req,
1132 u64 offset, u32 len,
1133 unsigned int dispatch_flags)
1135 u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
1137 cs = intel_ring_begin(req, 6);
1141 /* Evict the invalid PTE TLBs */
1142 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1143 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1144 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
1148 intel_ring_advance(req, cs);
1150 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1151 if (len > I830_BATCH_LIMIT)
1154 cs = intel_ring_begin(req, 6 + 2);
1158 /* Blit the batch (which has now all relocs applied) to the
1159 * stable batch scratch bo area (so that the CS never
1160 * stumbles over its tlb invalidation bug) ...
1162 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
1163 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1164 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1171 intel_ring_advance(req, cs);
1173 /* ... and execute it. */
1177 cs = intel_ring_begin(req, 2);
1181 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1182 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1183 MI_BATCH_NON_SECURE);
1184 intel_ring_advance(req, cs);
1190 i915_emit_bb_start(struct drm_i915_gem_request *req,
1191 u64 offset, u32 len,
1192 unsigned int dispatch_flags)
1196 cs = intel_ring_begin(req, 2);
1200 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1201 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1202 MI_BATCH_NON_SECURE);
1203 intel_ring_advance(req, cs);
1210 int intel_ring_pin(struct intel_ring *ring,
1211 struct drm_i915_private *i915,
1212 unsigned int offset_bias)
1214 enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1215 struct i915_vma *vma = ring->vma;
1220 GEM_BUG_ON(ring->vaddr);
1225 flags |= PIN_OFFSET_BIAS | offset_bias;
1226 if (vma->obj->stolen)
1227 flags |= PIN_MAPPABLE;
1229 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1230 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1231 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1233 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1238 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1242 if (i915_vma_is_map_and_fenceable(vma))
1243 addr = (void __force *)i915_vma_pin_iomap(vma);
1245 addr = i915_gem_object_pin_map(vma->obj, map);
1253 i915_vma_unpin(vma);
1254 return PTR_ERR(addr);
1257 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1259 GEM_BUG_ON(!list_empty(&ring->request_list));
1263 intel_ring_update_space(ring);
1266 void intel_ring_unpin(struct intel_ring *ring)
1268 GEM_BUG_ON(!ring->vma);
1269 GEM_BUG_ON(!ring->vaddr);
1271 /* Discard any unused bytes beyond that submitted to hw. */
1272 intel_ring_reset(ring, ring->tail);
1274 if (i915_vma_is_map_and_fenceable(ring->vma))
1275 i915_vma_unpin_iomap(ring->vma);
1277 i915_gem_object_unpin_map(ring->vma->obj);
1280 i915_vma_unpin(ring->vma);
1283 static struct i915_vma *
1284 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1286 struct drm_i915_gem_object *obj;
1287 struct i915_vma *vma;
1289 obj = i915_gem_object_create_stolen(dev_priv, size);
1291 obj = i915_gem_object_create_internal(dev_priv, size);
1293 return ERR_CAST(obj);
1295 /* mark ring buffers as read-only from GPU side by default */
1298 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1305 i915_gem_object_put(obj);
1310 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1312 struct intel_ring *ring;
1313 struct i915_vma *vma;
1315 GEM_BUG_ON(!is_power_of_2(size));
1316 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1318 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1320 return ERR_PTR(-ENOMEM);
1322 INIT_LIST_HEAD(&ring->request_list);
1325 /* Workaround an erratum on the i830 which causes a hang if
1326 * the TAIL pointer points to within the last 2 cachelines
1329 ring->effective_size = size;
1330 if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1331 ring->effective_size -= 2 * CACHELINE_BYTES;
1333 intel_ring_update_space(ring);
1335 vma = intel_ring_create_vma(engine->i915, size);
1338 return ERR_CAST(vma);
1346 intel_ring_free(struct intel_ring *ring)
1348 struct drm_i915_gem_object *obj = ring->vma->obj;
1350 i915_vma_close(ring->vma);
1351 __i915_gem_object_release_unless_active(obj);
1356 static int context_pin(struct i915_gem_context *ctx)
1358 struct i915_vma *vma = ctx->engine[RCS].state;
1361 /* Clear this page out of any CPU caches for coherent swap-in/out.
1362 * We only want to do this on the first bind so that we do not stall
1363 * on an active context (which by nature is already on the GPU).
1365 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1366 ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
1371 return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
1372 PIN_GLOBAL | PIN_HIGH);
1375 static struct i915_vma *
1376 alloc_context_vma(struct intel_engine_cs *engine)
1378 struct drm_i915_private *i915 = engine->i915;
1379 struct drm_i915_gem_object *obj;
1380 struct i915_vma *vma;
1382 obj = i915_gem_object_create(i915, engine->context_size);
1384 return ERR_CAST(obj);
1387 * Try to make the context utilize L3 as well as LLC.
1389 * On VLV we don't have L3 controls in the PTEs so we
1390 * shouldn't touch the cache level, especially as that
1391 * would make the object snooped which might have a
1392 * negative performance impact.
1394 * Snooping is required on non-llc platforms in execlist
1395 * mode, but since all GGTT accesses use PAT entry 0 we
1396 * get snooping anyway regardless of cache_level.
1398 * This is only applicable for Ivy Bridge devices since
1399 * later platforms don't have L3 control bits in the PTE.
1401 if (IS_IVYBRIDGE(i915)) {
1402 /* Ignore any error, regard it as a simple optimisation */
1403 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
1406 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1408 i915_gem_object_put(obj);
1413 static struct intel_ring *
1414 intel_ring_context_pin(struct intel_engine_cs *engine,
1415 struct i915_gem_context *ctx)
1417 struct intel_context *ce = &ctx->engine[engine->id];
1420 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1422 if (likely(ce->pin_count++))
1424 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1426 if (!ce->state && engine->context_size) {
1427 struct i915_vma *vma;
1429 vma = alloc_context_vma(engine);
1439 ret = context_pin(ctx);
1443 ce->state->obj->mm.dirty = true;
1446 /* The kernel context is only used as a placeholder for flushing the
1447 * active context. It is never used for submitting user rendering and
1448 * as such never requires the golden render context, and so we can skip
1449 * emitting it when we switch to the kernel context. This is required
1450 * as during eviction we cannot allocate and pin the renderstate in
1451 * order to initialise the context.
1453 if (i915_gem_context_is_kernel(ctx))
1454 ce->initialised = true;
1456 i915_gem_context_get(ctx);
1459 /* One ringbuffer to rule them all */
1460 return engine->buffer;
1464 return ERR_PTR(ret);
1467 static void intel_ring_context_unpin(struct intel_engine_cs *engine,
1468 struct i915_gem_context *ctx)
1470 struct intel_context *ce = &ctx->engine[engine->id];
1472 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1473 GEM_BUG_ON(ce->pin_count == 0);
1475 if (--ce->pin_count)
1479 i915_vma_unpin(ce->state);
1481 i915_gem_context_put(ctx);
1484 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1486 struct intel_ring *ring;
1489 intel_engine_setup_common(engine);
1491 err = intel_engine_init_common(engine);
1495 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
1497 err = PTR_ERR(ring);
1501 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1502 err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
1506 GEM_BUG_ON(engine->buffer);
1507 engine->buffer = ring;
1512 intel_ring_free(ring);
1514 intel_engine_cleanup_common(engine);
1518 void intel_engine_cleanup(struct intel_engine_cs *engine)
1520 struct drm_i915_private *dev_priv = engine->i915;
1522 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1523 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
1525 intel_ring_unpin(engine->buffer);
1526 intel_ring_free(engine->buffer);
1528 if (engine->cleanup)
1529 engine->cleanup(engine);
1531 intel_engine_cleanup_common(engine);
1533 dev_priv->engine[engine->id] = NULL;
1537 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1539 struct intel_engine_cs *engine;
1540 enum intel_engine_id id;
1542 /* Restart from the beginning of the rings for convenience */
1543 for_each_engine(engine, dev_priv, id)
1544 intel_ring_reset(engine->buffer, 0);
1547 static int ring_request_alloc(struct drm_i915_gem_request *request)
1551 GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
1553 /* Flush enough space to reduce the likelihood of waiting after
1554 * we start building the request - in which case we will just
1555 * have to repeat work.
1557 request->reserved_space += LEGACY_REQUEST_SIZE;
1559 cs = intel_ring_begin(request, 0);
1563 request->reserved_space -= LEGACY_REQUEST_SIZE;
1567 static noinline int wait_for_space(struct drm_i915_gem_request *req,
1570 struct intel_ring *ring = req->ring;
1571 struct drm_i915_gem_request *target;
1574 lockdep_assert_held(&req->i915->drm.struct_mutex);
1576 if (intel_ring_update_space(ring) >= bytes)
1580 * Space is reserved in the ringbuffer for finalising the request,
1581 * as that cannot be allowed to fail. During request finalisation,
1582 * reserved_space is set to 0 to stop the overallocation and the
1583 * assumption is that then we never need to wait (which has the
1584 * risk of failing with EINTR).
1586 * See also i915_gem_request_alloc() and i915_add_request().
1588 GEM_BUG_ON(!req->reserved_space);
1590 list_for_each_entry(target, &ring->request_list, ring_link) {
1591 /* Would completion of this request free enough space? */
1592 if (bytes <= __intel_ring_space(target->postfix,
1593 ring->emit, ring->size))
1597 if (WARN_ON(&target->ring_link == &ring->request_list))
1600 timeout = i915_wait_request(target,
1601 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
1602 MAX_SCHEDULE_TIMEOUT);
1606 i915_gem_request_retire_upto(target);
1608 intel_ring_update_space(ring);
1609 GEM_BUG_ON(ring->space < bytes);
1613 u32 *intel_ring_begin(struct drm_i915_gem_request *req,
1614 unsigned int num_dwords)
1616 struct intel_ring *ring = req->ring;
1617 const unsigned int remain_usable = ring->effective_size - ring->emit;
1618 const unsigned int bytes = num_dwords * sizeof(u32);
1619 unsigned int need_wrap = 0;
1620 unsigned int total_bytes;
1623 /* Packets must be qword aligned. */
1624 GEM_BUG_ON(num_dwords & 1);
1626 total_bytes = bytes + req->reserved_space;
1627 GEM_BUG_ON(total_bytes > ring->effective_size);
1629 if (unlikely(total_bytes > remain_usable)) {
1630 const int remain_actual = ring->size - ring->emit;
1632 if (bytes > remain_usable) {
1634 * Not enough space for the basic request. So need to
1635 * flush out the remainder and then wait for
1638 total_bytes += remain_actual;
1639 need_wrap = remain_actual | 1;
1642 * The base request will fit but the reserved space
1643 * falls off the end. So we don't need an immediate
1644 * wrap and only need to effectively wait for the
1645 * reserved size from the start of ringbuffer.
1647 total_bytes = req->reserved_space + remain_actual;
1651 if (unlikely(total_bytes > ring->space)) {
1652 int ret = wait_for_space(req, total_bytes);
1654 return ERR_PTR(ret);
1657 if (unlikely(need_wrap)) {
1659 GEM_BUG_ON(need_wrap > ring->space);
1660 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
1662 /* Fill the tail with MI_NOOP */
1663 memset(ring->vaddr + ring->emit, 0, need_wrap);
1665 ring->space -= need_wrap;
1668 GEM_BUG_ON(ring->emit > ring->size - bytes);
1669 GEM_BUG_ON(ring->space < bytes);
1670 cs = ring->vaddr + ring->emit;
1671 GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes));
1672 ring->emit += bytes;
1673 ring->space -= bytes;
1678 /* Align the ring tail to a cacheline boundary */
1679 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
1682 (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1685 if (num_dwords == 0)
1688 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1689 cs = intel_ring_begin(req, num_dwords);
1693 while (num_dwords--)
1696 intel_ring_advance(req, cs);
1701 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
1703 struct drm_i915_private *dev_priv = request->i915;
1705 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1707 /* Every tail move must follow the sequence below */
1709 /* Disable notification that the ring is IDLE. The GT
1710 * will then assume that it is busy and bring it out of rc6.
1712 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1713 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1715 /* Clear the context id. Here be magic! */
1716 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
1718 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1719 if (__intel_wait_for_register_fw(dev_priv,
1720 GEN6_BSD_SLEEP_PSMI_CONTROL,
1721 GEN6_BSD_SLEEP_INDICATOR,
1724 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1726 /* Now that the ring is fully powered up, update the tail */
1727 i9xx_submit_request(request);
1729 /* Let the ring send IDLE messages to the GT again,
1730 * and so let it sleep to conserve power when idle.
1732 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1733 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1735 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1738 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1742 cs = intel_ring_begin(req, 4);
1747 if (INTEL_GEN(req->i915) >= 8)
1750 /* We always require a command barrier so that subsequent
1751 * commands, such as breadcrumb interrupts, are strictly ordered
1752 * wrt the contents of the write cache being flushed to memory
1753 * (and thus being coherent from the CPU).
1755 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1758 * Bspec vol 1c.5 - video engine command streamer:
1759 * "If ENABLED, all TLBs will be invalidated once the flush
1760 * operation is complete. This bit is only valid when the
1761 * Post-Sync Operation field is a value of 1h or 3h."
1763 if (mode & EMIT_INVALIDATE)
1764 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1767 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1768 if (INTEL_GEN(req->i915) >= 8) {
1769 *cs++ = 0; /* upper addr */
1770 *cs++ = 0; /* value */
1775 intel_ring_advance(req, cs);
1780 gen8_emit_bb_start(struct drm_i915_gem_request *req,
1781 u64 offset, u32 len,
1782 unsigned int dispatch_flags)
1784 bool ppgtt = USES_PPGTT(req->i915) &&
1785 !(dispatch_flags & I915_DISPATCH_SECURE);
1788 cs = intel_ring_begin(req, 4);
1792 /* FIXME(BDW): Address space and security selectors. */
1793 *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
1794 I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
1795 *cs++ = lower_32_bits(offset);
1796 *cs++ = upper_32_bits(offset);
1798 intel_ring_advance(req, cs);
1804 hsw_emit_bb_start(struct drm_i915_gem_request *req,
1805 u64 offset, u32 len,
1806 unsigned int dispatch_flags)
1810 cs = intel_ring_begin(req, 2);
1814 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1815 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
1816 (dispatch_flags & I915_DISPATCH_RS ?
1817 MI_BATCH_RESOURCE_STREAMER : 0);
1818 /* bit0-7 is the length on GEN6+ */
1820 intel_ring_advance(req, cs);
1826 gen6_emit_bb_start(struct drm_i915_gem_request *req,
1827 u64 offset, u32 len,
1828 unsigned int dispatch_flags)
1832 cs = intel_ring_begin(req, 2);
1836 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1837 0 : MI_BATCH_NON_SECURE_I965);
1838 /* bit0-7 is the length on GEN6+ */
1840 intel_ring_advance(req, cs);
1845 /* Blitter support (SandyBridge+) */
1847 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1851 cs = intel_ring_begin(req, 4);
1856 if (INTEL_GEN(req->i915) >= 8)
1859 /* We always require a command barrier so that subsequent
1860 * commands, such as breadcrumb interrupts, are strictly ordered
1861 * wrt the contents of the write cache being flushed to memory
1862 * (and thus being coherent from the CPU).
1864 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1867 * Bspec vol 1c.3 - blitter engine command streamer:
1868 * "If ENABLED, all TLBs will be invalidated once the flush
1869 * operation is complete. This bit is only valid when the
1870 * Post-Sync Operation field is a value of 1h or 3h."
1872 if (mode & EMIT_INVALIDATE)
1873 cmd |= MI_INVALIDATE_TLB;
1875 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1876 if (INTEL_GEN(req->i915) >= 8) {
1877 *cs++ = 0; /* upper addr */
1878 *cs++ = 0; /* value */
1883 intel_ring_advance(req, cs);
1888 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
1889 struct intel_engine_cs *engine)
1891 struct drm_i915_gem_object *obj;
1894 if (!i915_modparams.semaphores)
1897 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
1898 struct i915_vma *vma;
1900 obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
1904 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1908 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1912 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1916 dev_priv->semaphore = vma;
1919 if (INTEL_GEN(dev_priv) >= 8) {
1920 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
1922 engine->semaphore.sync_to = gen8_ring_sync_to;
1923 engine->semaphore.signal = gen8_xcs_signal;
1925 for (i = 0; i < I915_NUM_ENGINES; i++) {
1928 if (i != engine->id)
1929 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
1931 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
1933 engine->semaphore.signal_ggtt[i] = ring_offset;
1935 } else if (INTEL_GEN(dev_priv) >= 6) {
1936 engine->semaphore.sync_to = gen6_ring_sync_to;
1937 engine->semaphore.signal = gen6_signal;
1940 * The current semaphore is only applied on pre-gen8
1941 * platform. And there is no VCS2 ring on the pre-gen8
1942 * platform. So the semaphore between RCS and VCS2 is
1943 * initialized as INVALID. Gen8 will initialize the
1944 * sema between VCS2 and RCS later.
1946 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
1947 static const struct {
1949 i915_reg_t mbox_reg;
1950 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
1952 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
1953 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
1954 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
1957 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
1958 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
1959 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
1962 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
1963 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
1964 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
1967 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
1968 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
1969 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
1973 i915_reg_t mbox_reg;
1975 if (i == engine->hw_id) {
1976 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
1977 mbox_reg = GEN6_NOSYNC;
1979 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
1980 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
1983 engine->semaphore.mbox.wait[i] = wait_mbox;
1984 engine->semaphore.mbox.signal[i] = mbox_reg;
1991 i915_gem_object_put(obj);
1993 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
1994 i915_modparams.semaphores = 0;
1997 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
1998 struct intel_engine_cs *engine)
2000 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2002 if (INTEL_GEN(dev_priv) >= 8) {
2003 engine->irq_enable = gen8_irq_enable;
2004 engine->irq_disable = gen8_irq_disable;
2005 engine->irq_seqno_barrier = gen6_seqno_barrier;
2006 } else if (INTEL_GEN(dev_priv) >= 6) {
2007 engine->irq_enable = gen6_irq_enable;
2008 engine->irq_disable = gen6_irq_disable;
2009 engine->irq_seqno_barrier = gen6_seqno_barrier;
2010 } else if (INTEL_GEN(dev_priv) >= 5) {
2011 engine->irq_enable = gen5_irq_enable;
2012 engine->irq_disable = gen5_irq_disable;
2013 engine->irq_seqno_barrier = gen5_seqno_barrier;
2014 } else if (INTEL_GEN(dev_priv) >= 3) {
2015 engine->irq_enable = i9xx_irq_enable;
2016 engine->irq_disable = i9xx_irq_disable;
2018 engine->irq_enable = i8xx_irq_enable;
2019 engine->irq_disable = i8xx_irq_disable;
2023 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2025 engine->submit_request = i9xx_submit_request;
2026 engine->cancel_requests = cancel_requests;
2029 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2031 engine->submit_request = gen6_bsd_submit_request;
2032 engine->cancel_requests = cancel_requests;
2035 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2036 struct intel_engine_cs *engine)
2038 intel_ring_init_irq(dev_priv, engine);
2039 intel_ring_init_semaphores(dev_priv, engine);
2041 engine->init_hw = init_ring_common;
2042 engine->reset_hw = reset_ring_common;
2044 engine->context_pin = intel_ring_context_pin;
2045 engine->context_unpin = intel_ring_context_unpin;
2047 engine->request_alloc = ring_request_alloc;
2049 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2050 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2051 if (i915_modparams.semaphores) {
2054 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2056 num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2057 if (INTEL_GEN(dev_priv) >= 8) {
2058 engine->emit_breadcrumb_sz += num_rings * 6;
2060 engine->emit_breadcrumb_sz += num_rings * 3;
2062 engine->emit_breadcrumb_sz++;
2066 engine->set_default_submission = i9xx_set_default_submission;
2068 if (INTEL_GEN(dev_priv) >= 8)
2069 engine->emit_bb_start = gen8_emit_bb_start;
2070 else if (INTEL_GEN(dev_priv) >= 6)
2071 engine->emit_bb_start = gen6_emit_bb_start;
2072 else if (INTEL_GEN(dev_priv) >= 4)
2073 engine->emit_bb_start = i965_emit_bb_start;
2074 else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2075 engine->emit_bb_start = i830_emit_bb_start;
2077 engine->emit_bb_start = i915_emit_bb_start;
2080 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2082 struct drm_i915_private *dev_priv = engine->i915;
2085 intel_ring_default_vfuncs(dev_priv, engine);
2087 if (HAS_L3_DPF(dev_priv))
2088 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2090 if (INTEL_GEN(dev_priv) >= 8) {
2091 engine->init_context = intel_rcs_ctx_init;
2092 engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2093 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2094 engine->emit_flush = gen8_render_ring_flush;
2095 if (i915_modparams.semaphores) {
2098 engine->semaphore.signal = gen8_rcs_signal;
2100 num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2101 engine->emit_breadcrumb_sz += num_rings * 8;
2103 } else if (INTEL_GEN(dev_priv) >= 6) {
2104 engine->init_context = intel_rcs_ctx_init;
2105 engine->emit_flush = gen7_render_ring_flush;
2106 if (IS_GEN6(dev_priv))
2107 engine->emit_flush = gen6_render_ring_flush;
2108 } else if (IS_GEN5(dev_priv)) {
2109 engine->emit_flush = gen4_render_ring_flush;
2111 if (INTEL_GEN(dev_priv) < 4)
2112 engine->emit_flush = gen2_render_ring_flush;
2114 engine->emit_flush = gen4_render_ring_flush;
2115 engine->irq_enable_mask = I915_USER_INTERRUPT;
2118 if (IS_HASWELL(dev_priv))
2119 engine->emit_bb_start = hsw_emit_bb_start;
2121 engine->init_hw = init_render_ring;
2122 engine->cleanup = render_ring_cleanup;
2124 ret = intel_init_ring_buffer(engine);
2128 if (INTEL_GEN(dev_priv) >= 6) {
2129 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2132 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2133 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2141 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2143 struct drm_i915_private *dev_priv = engine->i915;
2145 intel_ring_default_vfuncs(dev_priv, engine);
2147 if (INTEL_GEN(dev_priv) >= 6) {
2148 /* gen6 bsd needs a special wa for tail updates */
2149 if (IS_GEN6(dev_priv))
2150 engine->set_default_submission = gen6_bsd_set_default_submission;
2151 engine->emit_flush = gen6_bsd_ring_flush;
2152 if (INTEL_GEN(dev_priv) < 8)
2153 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2155 engine->mmio_base = BSD_RING_BASE;
2156 engine->emit_flush = bsd_ring_flush;
2157 if (IS_GEN5(dev_priv))
2158 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2160 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2163 return intel_init_ring_buffer(engine);
2166 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2168 struct drm_i915_private *dev_priv = engine->i915;
2170 intel_ring_default_vfuncs(dev_priv, engine);
2172 engine->emit_flush = gen6_ring_flush;
2173 if (INTEL_GEN(dev_priv) < 8)
2174 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2176 return intel_init_ring_buffer(engine);
2179 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2181 struct drm_i915_private *dev_priv = engine->i915;
2183 intel_ring_default_vfuncs(dev_priv, engine);
2185 engine->emit_flush = gen6_ring_flush;
2187 if (INTEL_GEN(dev_priv) < 8) {
2188 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2189 engine->irq_enable = hsw_vebox_irq_enable;
2190 engine->irq_disable = hsw_vebox_irq_disable;
2193 return intel_init_ring_buffer(engine);