2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
36 #include "i915_gem_render_state.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 #include "intel_workarounds.h"
41 /* Rough estimate of the typical request size, performing a flush,
42 * set-context and then emitting the batch.
44 #define LEGACY_REQUEST_SIZE 200
46 static unsigned int __intel_ring_space(unsigned int head,
51 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
52 * same cacheline, the Head Pointer must not be greater than the Tail
55 GEM_BUG_ON(!is_power_of_2(size));
56 return (head - tail - CACHELINE_BYTES) & (size - 1);
59 unsigned int intel_ring_update_space(struct intel_ring *ring)
63 space = __intel_ring_space(ring->head, ring->emit, ring->size);
70 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
76 if (mode & EMIT_INVALIDATE)
79 cs = intel_ring_begin(rq, 2);
85 intel_ring_advance(rq, cs);
91 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
98 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
99 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
100 * also flushed at 2d versus 3d pipeline switches.
104 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
105 * MI_READ_FLUSH is set, and is always flushed on 965.
107 * I915_GEM_DOMAIN_COMMAND may not exist?
109 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
110 * invalidated when MI_EXE_FLUSH is set.
112 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
113 * invalidated with every MI_FLUSH.
117 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
118 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
119 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
120 * are flushed at any MI_FLUSH.
124 if (mode & EMIT_INVALIDATE) {
126 if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
127 cmd |= MI_INVALIDATE_ISP;
130 cs = intel_ring_begin(rq, 2);
136 intel_ring_advance(rq, cs);
142 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
143 * implementing two workarounds on gen6. From section 1.4.7.1
144 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
146 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
147 * produced by non-pipelined state commands), software needs to first
148 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
151 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
152 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
154 * And the workaround for these two requires this workaround first:
156 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
157 * BEFORE the pipe-control with a post-sync op and no write-cache
160 * And this last workaround is tricky because of the requirements on
161 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
164 * "1 of the following must also be set:
165 * - Render Target Cache Flush Enable ([12] of DW1)
166 * - Depth Cache Flush Enable ([0] of DW1)
167 * - Stall at Pixel Scoreboard ([1] of DW1)
168 * - Depth Stall ([13] of DW1)
169 * - Post-Sync Operation ([13] of DW1)
170 * - Notify Enable ([8] of DW1)"
172 * The cache flushes require the workaround flush that triggered this
173 * one, so we can't use it. Depth stall would trigger the same.
174 * Post-sync nonzero is what triggered this second workaround, so we
175 * can't use that one either. Notify enable is IRQs, which aren't
176 * really our business. That leaves only stall at scoreboard.
179 intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
182 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
185 cs = intel_ring_begin(rq, 6);
189 *cs++ = GFX_OP_PIPE_CONTROL(5);
190 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
191 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
192 *cs++ = 0; /* low dword */
193 *cs++ = 0; /* high dword */
195 intel_ring_advance(rq, cs);
197 cs = intel_ring_begin(rq, 6);
201 *cs++ = GFX_OP_PIPE_CONTROL(5);
202 *cs++ = PIPE_CONTROL_QW_WRITE;
203 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
207 intel_ring_advance(rq, cs);
213 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
216 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
220 /* Force SNB workarounds for PIPE_CONTROL flushes */
221 ret = intel_emit_post_sync_nonzero_flush(rq);
225 /* Just flush everything. Experiments have shown that reducing the
226 * number of bits based on the write domains has little performance
229 if (mode & EMIT_FLUSH) {
230 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
231 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
233 * Ensure that any following seqno writes only happen
234 * when the render cache is indeed flushed.
236 flags |= PIPE_CONTROL_CS_STALL;
238 if (mode & EMIT_INVALIDATE) {
239 flags |= PIPE_CONTROL_TLB_INVALIDATE;
240 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
244 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
246 * TLB invalidate requires a post-sync write.
248 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
251 cs = intel_ring_begin(rq, 4);
255 *cs++ = GFX_OP_PIPE_CONTROL(4);
257 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
259 intel_ring_advance(rq, cs);
265 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
269 cs = intel_ring_begin(rq, 4);
273 *cs++ = GFX_OP_PIPE_CONTROL(4);
274 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
277 intel_ring_advance(rq, cs);
283 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
286 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
290 * Ensure that any following seqno writes only happen when the render
291 * cache is indeed flushed.
293 * Workaround: 4th PIPE_CONTROL command (except the ones with only
294 * read-cache invalidate bits set) must have the CS_STALL bit set. We
295 * don't try to be clever and just set it unconditionally.
297 flags |= PIPE_CONTROL_CS_STALL;
299 /* Just flush everything. Experiments have shown that reducing the
300 * number of bits based on the write domains has little performance
303 if (mode & EMIT_FLUSH) {
304 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
305 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
306 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
307 flags |= PIPE_CONTROL_FLUSH_ENABLE;
309 if (mode & EMIT_INVALIDATE) {
310 flags |= PIPE_CONTROL_TLB_INVALIDATE;
311 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
312 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
313 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
314 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
315 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
318 * TLB invalidate requires a post-sync write.
320 flags |= PIPE_CONTROL_QW_WRITE;
321 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
323 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
325 /* Workaround: we must issue a pipe_control with CS-stall bit
326 * set before a pipe_control command that has the state cache
327 * invalidate bit set. */
328 gen7_render_ring_cs_stall_wa(rq);
331 cs = intel_ring_begin(rq, 4);
335 *cs++ = GFX_OP_PIPE_CONTROL(4);
337 *cs++ = scratch_addr;
339 intel_ring_advance(rq, cs);
344 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
346 struct drm_i915_private *dev_priv = engine->i915;
349 addr = dev_priv->status_page_dmah->busaddr;
350 if (INTEL_GEN(dev_priv) >= 4)
351 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
352 I915_WRITE(HWS_PGA, addr);
355 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
357 struct drm_i915_private *dev_priv = engine->i915;
360 /* The ring status page addresses are no longer next to the rest of
361 * the ring registers as of gen7.
363 if (IS_GEN7(dev_priv)) {
364 switch (engine->id) {
366 * No more rings exist on Gen7. Default case is only to shut up
367 * gcc switch check warning.
370 GEM_BUG_ON(engine->id);
372 mmio = RENDER_HWS_PGA_GEN7;
375 mmio = BLT_HWS_PGA_GEN7;
378 mmio = BSD_HWS_PGA_GEN7;
381 mmio = VEBOX_HWS_PGA_GEN7;
384 } else if (IS_GEN6(dev_priv)) {
385 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
387 mmio = RING_HWS_PGA(engine->mmio_base);
390 if (INTEL_GEN(dev_priv) >= 6)
391 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
393 I915_WRITE(mmio, engine->status_page.ggtt_offset);
396 /* Flush the TLB for this page */
397 if (IS_GEN(dev_priv, 6, 7)) {
398 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
400 /* ring should be idle before issuing a sync flush*/
401 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
404 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
406 if (intel_wait_for_register(dev_priv,
407 reg, INSTPM_SYNC_FLUSH, 0,
409 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
414 static bool stop_ring(struct intel_engine_cs *engine)
416 struct drm_i915_private *dev_priv = engine->i915;
418 if (INTEL_GEN(dev_priv) > 2) {
419 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
420 if (intel_wait_for_register(dev_priv,
421 RING_MI_MODE(engine->mmio_base),
425 DRM_ERROR("%s : timed out trying to stop ring\n",
427 /* Sometimes we observe that the idle flag is not
428 * set even though the ring is empty. So double
429 * check before giving up.
431 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
436 I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
438 I915_WRITE_HEAD(engine, 0);
439 I915_WRITE_TAIL(engine, 0);
441 /* The ring must be empty before it is disabled */
442 I915_WRITE_CTL(engine, 0);
444 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
447 static int init_ring_common(struct intel_engine_cs *engine)
449 struct drm_i915_private *dev_priv = engine->i915;
450 struct intel_ring *ring = engine->buffer;
453 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
455 if (!stop_ring(engine)) {
456 /* G45 ring initialization often fails to reset head to zero */
457 DRM_DEBUG_DRIVER("%s head not reset to zero "
458 "ctl %08x head %08x tail %08x start %08x\n",
460 I915_READ_CTL(engine),
461 I915_READ_HEAD(engine),
462 I915_READ_TAIL(engine),
463 I915_READ_START(engine));
465 if (!stop_ring(engine)) {
466 DRM_ERROR("failed to set %s head to zero "
467 "ctl %08x head %08x tail %08x start %08x\n",
469 I915_READ_CTL(engine),
470 I915_READ_HEAD(engine),
471 I915_READ_TAIL(engine),
472 I915_READ_START(engine));
478 if (HWS_NEEDS_PHYSICAL(dev_priv))
479 ring_setup_phys_status_page(engine);
481 intel_ring_setup_status_page(engine);
483 intel_engine_reset_breadcrumbs(engine);
485 /* Enforce ordering by reading HEAD register back */
486 I915_READ_HEAD(engine);
488 /* Initialize the ring. This must happen _after_ we've cleared the ring
489 * registers with the above sequence (the readback of the HEAD registers
490 * also enforces ordering), otherwise the hw might lose the new ring
491 * register values. */
492 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
494 /* WaClearRingBufHeadRegAtInit:ctg,elk */
495 if (I915_READ_HEAD(engine))
496 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
497 engine->name, I915_READ_HEAD(engine));
499 intel_ring_update_space(ring);
500 I915_WRITE_HEAD(engine, ring->head);
501 I915_WRITE_TAIL(engine, ring->tail);
502 (void)I915_READ_TAIL(engine);
504 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
506 /* If the head is still not zero, the ring is dead */
507 if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
508 RING_VALID, RING_VALID,
510 DRM_ERROR("%s initialization failed "
511 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
513 I915_READ_CTL(engine),
514 I915_READ_CTL(engine) & RING_VALID,
515 I915_READ_HEAD(engine), ring->head,
516 I915_READ_TAIL(engine), ring->tail,
517 I915_READ_START(engine),
518 i915_ggtt_offset(ring->vma));
523 intel_engine_init_hangcheck(engine);
525 if (INTEL_GEN(dev_priv) > 2)
526 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
529 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
534 static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
536 intel_engine_stop_cs(engine);
538 if (engine->irq_seqno_barrier)
539 engine->irq_seqno_barrier(engine);
541 return i915_gem_find_active_request(engine);
544 static void reset_ring(struct intel_engine_cs *engine,
545 struct i915_request *request)
547 GEM_TRACE("%s seqno=%x\n",
548 engine->name, request ? request->global_seqno : 0);
551 * RC6 must be prevented until the reset is complete and the engine
552 * reinitialised. If it occurs in the middle of this sequence, the
553 * state written to/loaded from the power context is ill-defined (e.g.
554 * the PP_BASE_DIR may be lost).
556 assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
559 * Try to restore the logical GPU state to match the continuation
560 * of the request queue. If we skip the context/PD restore, then
561 * the next request may try to execute assuming that its context
562 * is valid and loaded on the GPU and so may try to access invalid
563 * memory, prompting repeated GPU hangs.
565 * If the request was guilty, we still restore the logical state
566 * in case the next request requires it (e.g. the aliasing ppgtt),
567 * but skip over the hung batch.
569 * If the request was innocent, we try to replay the request with
570 * the restored context.
573 struct drm_i915_private *dev_priv = request->i915;
574 struct intel_context *ce = request->hw_context;
575 struct i915_hw_ppgtt *ppgtt;
579 i915_ggtt_offset(ce->state) |
580 BIT(8) /* must be set! */ |
581 CCID_EXTENDED_STATE_SAVE |
582 CCID_EXTENDED_STATE_RESTORE |
586 ppgtt = request->gem_context->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
588 u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
590 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
591 I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
593 /* Wait for the PD reload to complete */
594 if (intel_wait_for_register(dev_priv,
595 RING_PP_DIR_BASE(engine),
598 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
600 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
603 /* If the rq hung, jump to its breadcrumb and skip the batch */
604 if (request->fence.error == -EIO)
605 request->ring->head = request->postfix;
607 engine->legacy_active_context = NULL;
608 engine->legacy_active_ppgtt = NULL;
612 static void reset_finish(struct intel_engine_cs *engine)
616 static int intel_rcs_ctx_init(struct i915_request *rq)
620 ret = intel_ctx_workarounds_emit(rq);
624 ret = i915_gem_render_state_emit(rq);
631 static int init_render_ring(struct intel_engine_cs *engine)
633 struct drm_i915_private *dev_priv = engine->i915;
634 int ret = init_ring_common(engine);
638 intel_whitelist_workarounds_apply(engine);
640 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
641 if (IS_GEN(dev_priv, 4, 6))
642 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
644 /* We need to disable the AsyncFlip performance optimisations in order
645 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
646 * programmed to '1' on all products.
648 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
650 if (IS_GEN(dev_priv, 6, 7))
651 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
653 /* Required for the hardware to program scanline values for waiting */
654 /* WaEnableFlushTlbInvalidationMode:snb */
655 if (IS_GEN6(dev_priv))
657 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
659 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
660 if (IS_GEN7(dev_priv))
661 I915_WRITE(GFX_MODE_GEN7,
662 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
663 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
665 if (IS_GEN6(dev_priv)) {
666 /* From the Sandybridge PRM, volume 1 part 3, page 24:
667 * "If this bit is set, STCunit will have LRA as replacement
668 * policy. [...] This bit must be reset. LRA replacement
669 * policy is not supported."
671 I915_WRITE(CACHE_MODE_0,
672 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
675 if (IS_GEN(dev_priv, 6, 7))
676 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
678 if (INTEL_GEN(dev_priv) >= 6)
679 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
684 static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
686 struct drm_i915_private *dev_priv = rq->i915;
687 struct intel_engine_cs *engine;
688 enum intel_engine_id id;
691 for_each_engine(engine, dev_priv, id) {
694 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
697 mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
698 if (i915_mmio_reg_valid(mbox_reg)) {
699 *cs++ = MI_LOAD_REGISTER_IMM(1);
700 *cs++ = i915_mmio_reg_offset(mbox_reg);
701 *cs++ = rq->global_seqno;
711 static void cancel_requests(struct intel_engine_cs *engine)
713 struct i915_request *request;
716 spin_lock_irqsave(&engine->timeline.lock, flags);
718 /* Mark all submitted requests as skipped. */
719 list_for_each_entry(request, &engine->timeline.requests, link) {
720 GEM_BUG_ON(!request->global_seqno);
721 if (!i915_request_completed(request))
722 dma_fence_set_error(&request->fence, -EIO);
724 /* Remaining _unready_ requests will be nop'ed when submitted */
726 spin_unlock_irqrestore(&engine->timeline.lock, flags);
729 static void i9xx_submit_request(struct i915_request *request)
731 struct drm_i915_private *dev_priv = request->i915;
733 i915_request_submit(request);
735 I915_WRITE_TAIL(request->engine,
736 intel_ring_set_tail(request->ring, request->tail));
739 static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
741 *cs++ = MI_STORE_DWORD_INDEX;
742 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
743 *cs++ = rq->global_seqno;
744 *cs++ = MI_USER_INTERRUPT;
746 rq->tail = intel_ring_offset(rq, cs);
747 assert_ring_tail_valid(rq->ring, rq->tail);
750 static const int i9xx_emit_breadcrumb_sz = 4;
752 static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
754 return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
758 gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
760 u32 dw1 = MI_SEMAPHORE_MBOX |
761 MI_SEMAPHORE_COMPARE |
762 MI_SEMAPHORE_REGISTER;
763 u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
766 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
768 cs = intel_ring_begin(rq, 4);
772 *cs++ = dw1 | wait_mbox;
773 /* Throughout all of the GEM code, seqno passed implies our current
774 * seqno is >= the last seqno executed. However for hardware the
775 * comparison is strictly greater than.
777 *cs++ = signal->global_seqno - 1;
780 intel_ring_advance(rq, cs);
786 gen5_seqno_barrier(struct intel_engine_cs *engine)
788 /* MI_STORE are internally buffered by the GPU and not flushed
789 * either by MI_FLUSH or SyncFlush or any other combination of
792 * "Only the submission of the store operation is guaranteed.
793 * The write result will be complete (coherent) some time later
794 * (this is practically a finite period but there is no guaranteed
797 * Empirically, we observe that we need a delay of at least 75us to
798 * be sure that the seqno write is visible by the CPU.
800 usleep_range(125, 250);
804 gen6_seqno_barrier(struct intel_engine_cs *engine)
806 struct drm_i915_private *dev_priv = engine->i915;
808 /* Workaround to force correct ordering between irq and seqno writes on
809 * ivb (and maybe also on snb) by reading from a CS register (like
810 * ACTHD) before reading the status page.
812 * Note that this effectively stalls the read by the time it takes to
813 * do a memory transaction, which more or less ensures that the write
814 * from the GPU has sufficient time to invalidate the CPU cacheline.
815 * Alternatively we could delay the interrupt from the CS ring to give
816 * the write time to land, but that would incur a delay after every
817 * batch i.e. much more frequent than a delay when waiting for the
818 * interrupt (with the same net latency).
820 * Also note that to prevent whole machine hangs on gen7, we have to
821 * take the spinlock to guard against concurrent cacheline access.
823 spin_lock_irq(&dev_priv->uncore.lock);
824 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
825 spin_unlock_irq(&dev_priv->uncore.lock);
829 gen5_irq_enable(struct intel_engine_cs *engine)
831 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
835 gen5_irq_disable(struct intel_engine_cs *engine)
837 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
841 i9xx_irq_enable(struct intel_engine_cs *engine)
843 struct drm_i915_private *dev_priv = engine->i915;
845 dev_priv->irq_mask &= ~engine->irq_enable_mask;
846 I915_WRITE(IMR, dev_priv->irq_mask);
847 POSTING_READ_FW(RING_IMR(engine->mmio_base));
851 i9xx_irq_disable(struct intel_engine_cs *engine)
853 struct drm_i915_private *dev_priv = engine->i915;
855 dev_priv->irq_mask |= engine->irq_enable_mask;
856 I915_WRITE(IMR, dev_priv->irq_mask);
860 i8xx_irq_enable(struct intel_engine_cs *engine)
862 struct drm_i915_private *dev_priv = engine->i915;
864 dev_priv->irq_mask &= ~engine->irq_enable_mask;
865 I915_WRITE16(IMR, dev_priv->irq_mask);
866 POSTING_READ16(RING_IMR(engine->mmio_base));
870 i8xx_irq_disable(struct intel_engine_cs *engine)
872 struct drm_i915_private *dev_priv = engine->i915;
874 dev_priv->irq_mask |= engine->irq_enable_mask;
875 I915_WRITE16(IMR, dev_priv->irq_mask);
879 bsd_ring_flush(struct i915_request *rq, u32 mode)
883 cs = intel_ring_begin(rq, 2);
889 intel_ring_advance(rq, cs);
894 gen6_irq_enable(struct intel_engine_cs *engine)
896 struct drm_i915_private *dev_priv = engine->i915;
898 I915_WRITE_IMR(engine,
899 ~(engine->irq_enable_mask |
900 engine->irq_keep_mask));
901 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
905 gen6_irq_disable(struct intel_engine_cs *engine)
907 struct drm_i915_private *dev_priv = engine->i915;
909 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
910 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
914 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
916 struct drm_i915_private *dev_priv = engine->i915;
918 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
919 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
923 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
925 struct drm_i915_private *dev_priv = engine->i915;
927 I915_WRITE_IMR(engine, ~0);
928 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
932 i965_emit_bb_start(struct i915_request *rq,
933 u64 offset, u32 length,
934 unsigned int dispatch_flags)
938 cs = intel_ring_begin(rq, 2);
942 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
943 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
945 intel_ring_advance(rq, cs);
950 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
951 #define I830_BATCH_LIMIT (256*1024)
952 #define I830_TLB_ENTRIES (2)
953 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
955 i830_emit_bb_start(struct i915_request *rq,
957 unsigned int dispatch_flags)
959 u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
961 cs = intel_ring_begin(rq, 6);
965 /* Evict the invalid PTE TLBs */
966 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
967 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
968 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
972 intel_ring_advance(rq, cs);
974 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
975 if (len > I830_BATCH_LIMIT)
978 cs = intel_ring_begin(rq, 6 + 2);
982 /* Blit the batch (which has now all relocs applied) to the
983 * stable batch scratch bo area (so that the CS never
984 * stumbles over its tlb invalidation bug) ...
986 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
987 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
988 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
995 intel_ring_advance(rq, cs);
997 /* ... and execute it. */
1001 cs = intel_ring_begin(rq, 2);
1005 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1006 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1007 MI_BATCH_NON_SECURE);
1008 intel_ring_advance(rq, cs);
1014 i915_emit_bb_start(struct i915_request *rq,
1015 u64 offset, u32 len,
1016 unsigned int dispatch_flags)
1020 cs = intel_ring_begin(rq, 2);
1024 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1025 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1026 MI_BATCH_NON_SECURE);
1027 intel_ring_advance(rq, cs);
1034 int intel_ring_pin(struct intel_ring *ring,
1035 struct drm_i915_private *i915,
1036 unsigned int offset_bias)
1038 enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1039 struct i915_vma *vma = ring->vma;
1044 GEM_BUG_ON(ring->vaddr);
1049 flags |= PIN_OFFSET_BIAS | offset_bias;
1050 if (vma->obj->stolen)
1051 flags |= PIN_MAPPABLE;
1055 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1056 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1057 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1059 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1064 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1068 if (i915_vma_is_map_and_fenceable(vma))
1069 addr = (void __force *)i915_vma_pin_iomap(vma);
1071 addr = i915_gem_object_pin_map(vma->obj, map);
1075 vma->obj->pin_global++;
1081 i915_vma_unpin(vma);
1082 return PTR_ERR(addr);
1085 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1090 intel_ring_update_space(ring);
1093 void intel_ring_unpin(struct intel_ring *ring)
1095 GEM_BUG_ON(!ring->vma);
1096 GEM_BUG_ON(!ring->vaddr);
1098 /* Discard any unused bytes beyond that submitted to hw. */
1099 intel_ring_reset(ring, ring->tail);
1101 if (i915_vma_is_map_and_fenceable(ring->vma))
1102 i915_vma_unpin_iomap(ring->vma);
1104 i915_gem_object_unpin_map(ring->vma->obj);
1107 ring->vma->obj->pin_global--;
1108 i915_vma_unpin(ring->vma);
1111 static struct i915_vma *
1112 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1114 struct drm_i915_gem_object *obj;
1115 struct i915_vma *vma;
1117 obj = i915_gem_object_create_stolen(dev_priv, size);
1119 obj = i915_gem_object_create_internal(dev_priv, size);
1121 return ERR_CAST(obj);
1123 /* mark ring buffers as read-only from GPU side by default */
1126 vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
1133 i915_gem_object_put(obj);
1138 intel_engine_create_ring(struct intel_engine_cs *engine,
1139 struct i915_timeline *timeline,
1142 struct intel_ring *ring;
1143 struct i915_vma *vma;
1145 GEM_BUG_ON(!is_power_of_2(size));
1146 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1147 GEM_BUG_ON(timeline == &engine->timeline);
1148 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1150 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1152 return ERR_PTR(-ENOMEM);
1154 INIT_LIST_HEAD(&ring->request_list);
1155 ring->timeline = i915_timeline_get(timeline);
1158 /* Workaround an erratum on the i830 which causes a hang if
1159 * the TAIL pointer points to within the last 2 cachelines
1162 ring->effective_size = size;
1163 if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1164 ring->effective_size -= 2 * CACHELINE_BYTES;
1166 intel_ring_update_space(ring);
1168 vma = intel_ring_create_vma(engine->i915, size);
1171 return ERR_CAST(vma);
1179 intel_ring_free(struct intel_ring *ring)
1181 struct drm_i915_gem_object *obj = ring->vma->obj;
1183 i915_vma_close(ring->vma);
1184 __i915_gem_object_release_unless_active(obj);
1186 i915_timeline_put(ring->timeline);
1190 static void intel_ring_context_destroy(struct intel_context *ce)
1192 GEM_BUG_ON(ce->pin_count);
1195 __i915_gem_object_release_unless_active(ce->state->obj);
1198 static int __context_pin(struct intel_context *ce)
1200 struct i915_vma *vma;
1208 * Clear this page out of any CPU caches for coherent swap-in/out.
1209 * We only want to do this on the first bind so that we do not stall
1210 * on an active context (which by nature is already on the GPU).
1212 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1213 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1218 err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
1219 PIN_GLOBAL | PIN_HIGH);
1224 * And mark is as a globally pinned object to let the shrinker know
1225 * it cannot reclaim the object until we release it.
1227 vma->obj->pin_global++;
1232 static void __context_unpin(struct intel_context *ce)
1234 struct i915_vma *vma;
1240 vma->obj->pin_global--;
1241 i915_vma_unpin(vma);
1244 static void intel_ring_context_unpin(struct intel_context *ce)
1246 __context_unpin(ce);
1248 i915_gem_context_put(ce->gem_context);
1251 static struct i915_vma *
1252 alloc_context_vma(struct intel_engine_cs *engine)
1254 struct drm_i915_private *i915 = engine->i915;
1255 struct drm_i915_gem_object *obj;
1256 struct i915_vma *vma;
1259 obj = i915_gem_object_create(i915, engine->context_size);
1261 return ERR_CAST(obj);
1263 if (engine->default_state) {
1264 void *defaults, *vaddr;
1266 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1267 if (IS_ERR(vaddr)) {
1268 err = PTR_ERR(vaddr);
1272 defaults = i915_gem_object_pin_map(engine->default_state,
1274 if (IS_ERR(defaults)) {
1275 err = PTR_ERR(defaults);
1279 memcpy(vaddr, defaults, engine->context_size);
1281 i915_gem_object_unpin_map(engine->default_state);
1282 i915_gem_object_unpin_map(obj);
1286 * Try to make the context utilize L3 as well as LLC.
1288 * On VLV we don't have L3 controls in the PTEs so we
1289 * shouldn't touch the cache level, especially as that
1290 * would make the object snooped which might have a
1291 * negative performance impact.
1293 * Snooping is required on non-llc platforms in execlist
1294 * mode, but since all GGTT accesses use PAT entry 0 we
1295 * get snooping anyway regardless of cache_level.
1297 * This is only applicable for Ivy Bridge devices since
1298 * later platforms don't have L3 control bits in the PTE.
1300 if (IS_IVYBRIDGE(i915)) {
1301 /* Ignore any error, regard it as a simple optimisation */
1302 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
1305 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1314 i915_gem_object_unpin_map(obj);
1316 i915_gem_object_put(obj);
1317 return ERR_PTR(err);
1320 static struct intel_context *
1321 __ring_context_pin(struct intel_engine_cs *engine,
1322 struct i915_gem_context *ctx,
1323 struct intel_context *ce)
1327 if (!ce->state && engine->context_size) {
1328 struct i915_vma *vma;
1330 vma = alloc_context_vma(engine);
1339 err = __context_pin(ce);
1343 i915_gem_context_get(ctx);
1345 /* One ringbuffer to rule them all */
1346 GEM_BUG_ON(!engine->buffer);
1347 ce->ring = engine->buffer;
1353 return ERR_PTR(err);
1356 static const struct intel_context_ops ring_context_ops = {
1357 .unpin = intel_ring_context_unpin,
1358 .destroy = intel_ring_context_destroy,
1361 static struct intel_context *
1362 intel_ring_context_pin(struct intel_engine_cs *engine,
1363 struct i915_gem_context *ctx)
1365 struct intel_context *ce = to_intel_context(ctx, engine);
1367 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1369 if (likely(ce->pin_count++))
1371 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1373 ce->ops = &ring_context_ops;
1375 return __ring_context_pin(engine, ctx, ce);
1378 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1380 struct intel_ring *ring;
1381 struct i915_timeline *timeline;
1384 intel_engine_setup_common(engine);
1386 timeline = i915_timeline_create(engine->i915, engine->name);
1387 if (IS_ERR(timeline)) {
1388 err = PTR_ERR(timeline);
1392 ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
1393 i915_timeline_put(timeline);
1395 err = PTR_ERR(ring);
1399 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1400 err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
1404 GEM_BUG_ON(engine->buffer);
1405 engine->buffer = ring;
1407 err = intel_engine_init_common(engine);
1414 intel_ring_unpin(ring);
1416 intel_ring_free(ring);
1418 intel_engine_cleanup_common(engine);
1422 void intel_engine_cleanup(struct intel_engine_cs *engine)
1424 struct drm_i915_private *dev_priv = engine->i915;
1426 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1427 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
1429 intel_ring_unpin(engine->buffer);
1430 intel_ring_free(engine->buffer);
1432 if (engine->cleanup)
1433 engine->cleanup(engine);
1435 intel_engine_cleanup_common(engine);
1437 dev_priv->engine[engine->id] = NULL;
1441 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1443 struct intel_engine_cs *engine;
1444 enum intel_engine_id id;
1446 /* Restart from the beginning of the rings for convenience */
1447 for_each_engine(engine, dev_priv, id)
1448 intel_ring_reset(engine->buffer, 0);
1451 static inline int mi_set_context(struct i915_request *rq, u32 flags)
1453 struct drm_i915_private *i915 = rq->i915;
1454 struct intel_engine_cs *engine = rq->engine;
1455 enum intel_engine_id id;
1456 const int num_rings =
1457 /* Use an extended w/a on gen7 if signalling from other rings */
1458 (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
1459 INTEL_INFO(i915)->num_rings - 1 :
1464 flags |= MI_MM_SPACE_GTT;
1465 if (IS_HASWELL(i915))
1466 /* These flags are for resource streamer on HSW+ */
1467 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
1469 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1473 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
1475 cs = intel_ring_begin(rq, len);
1479 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1480 if (IS_GEN7(i915)) {
1481 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1483 struct intel_engine_cs *signaller;
1485 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1486 for_each_engine(signaller, i915, id) {
1487 if (signaller == engine)
1490 *cs++ = i915_mmio_reg_offset(
1491 RING_PSMI_CTL(signaller->mmio_base));
1492 *cs++ = _MASKED_BIT_ENABLE(
1493 GEN6_PSMI_SLEEP_MSG_DISABLE);
1499 *cs++ = MI_SET_CONTEXT;
1500 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
1502 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1503 * WaMiSetContext_Hang:snb,ivb,vlv
1507 if (IS_GEN7(i915)) {
1509 struct intel_engine_cs *signaller;
1510 i915_reg_t last_reg = {}; /* keep gcc quiet */
1512 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1513 for_each_engine(signaller, i915, id) {
1514 if (signaller == engine)
1517 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1518 *cs++ = i915_mmio_reg_offset(last_reg);
1519 *cs++ = _MASKED_BIT_DISABLE(
1520 GEN6_PSMI_SLEEP_MSG_DISABLE);
1523 /* Insert a delay before the next switch! */
1524 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1525 *cs++ = i915_mmio_reg_offset(last_reg);
1526 *cs++ = i915_ggtt_offset(engine->scratch);
1529 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1532 intel_ring_advance(rq, cs);
1537 static int remap_l3(struct i915_request *rq, int slice)
1539 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1545 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1550 * Note: We do not worry about the concurrent register cacheline hang
1551 * here because no other code should access these registers other than
1552 * at initialization time.
1554 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1555 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1556 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1557 *cs++ = remap_info[i];
1560 intel_ring_advance(rq, cs);
1565 static int switch_context(struct i915_request *rq)
1567 struct intel_engine_cs *engine = rq->engine;
1568 struct i915_gem_context *to_ctx = rq->gem_context;
1569 struct i915_hw_ppgtt *to_mm =
1570 to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
1571 struct i915_gem_context *from_ctx = engine->legacy_active_context;
1572 struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
1576 lockdep_assert_held(&rq->i915->drm.struct_mutex);
1577 GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1579 if (to_mm != from_mm ||
1580 (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
1581 trace_switch_mm(engine, to_ctx);
1582 ret = to_mm->switch_mm(to_mm, rq);
1586 to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
1587 engine->legacy_active_ppgtt = to_mm;
1588 hw_flags = MI_FORCE_RESTORE;
1591 if (rq->hw_context->state &&
1592 (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
1593 GEM_BUG_ON(engine->id != RCS);
1596 * The kernel context(s) is treated as pure scratch and is not
1597 * expected to retain any state (as we sacrifice it during
1598 * suspend and on resume it may be corrupted). This is ok,
1599 * as nothing actually executes using the kernel context; it
1600 * is purely used for flushing user contexts.
1602 if (i915_gem_context_is_kernel(to_ctx))
1603 hw_flags = MI_RESTORE_INHIBIT;
1605 ret = mi_set_context(rq, hw_flags);
1609 engine->legacy_active_context = to_ctx;
1612 if (to_ctx->remap_slice) {
1613 for (i = 0; i < MAX_L3_SLICES; i++) {
1614 if (!(to_ctx->remap_slice & BIT(i)))
1617 ret = remap_l3(rq, i);
1622 to_ctx->remap_slice = 0;
1628 engine->legacy_active_context = from_ctx;
1630 engine->legacy_active_ppgtt = from_mm;
1635 static int ring_request_alloc(struct i915_request *request)
1639 GEM_BUG_ON(!request->hw_context->pin_count);
1641 /* Flush enough space to reduce the likelihood of waiting after
1642 * we start building the request - in which case we will just
1643 * have to repeat work.
1645 request->reserved_space += LEGACY_REQUEST_SIZE;
1647 ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
1651 ret = switch_context(request);
1655 request->reserved_space -= LEGACY_REQUEST_SIZE;
1659 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
1661 struct i915_request *target;
1664 lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
1666 if (intel_ring_update_space(ring) >= bytes)
1669 GEM_BUG_ON(list_empty(&ring->request_list));
1670 list_for_each_entry(target, &ring->request_list, ring_link) {
1671 /* Would completion of this request free enough space? */
1672 if (bytes <= __intel_ring_space(target->postfix,
1673 ring->emit, ring->size))
1677 if (WARN_ON(&target->ring_link == &ring->request_list))
1680 timeout = i915_request_wait(target,
1681 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
1682 MAX_SCHEDULE_TIMEOUT);
1686 i915_request_retire_upto(target);
1688 intel_ring_update_space(ring);
1689 GEM_BUG_ON(ring->space < bytes);
1693 int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
1695 GEM_BUG_ON(bytes > ring->effective_size);
1696 if (unlikely(bytes > ring->effective_size - ring->emit))
1697 bytes += ring->size - ring->emit;
1699 if (unlikely(bytes > ring->space)) {
1700 int ret = wait_for_space(ring, bytes);
1705 GEM_BUG_ON(ring->space < bytes);
1709 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
1711 struct intel_ring *ring = rq->ring;
1712 const unsigned int remain_usable = ring->effective_size - ring->emit;
1713 const unsigned int bytes = num_dwords * sizeof(u32);
1714 unsigned int need_wrap = 0;
1715 unsigned int total_bytes;
1718 /* Packets must be qword aligned. */
1719 GEM_BUG_ON(num_dwords & 1);
1721 total_bytes = bytes + rq->reserved_space;
1722 GEM_BUG_ON(total_bytes > ring->effective_size);
1724 if (unlikely(total_bytes > remain_usable)) {
1725 const int remain_actual = ring->size - ring->emit;
1727 if (bytes > remain_usable) {
1729 * Not enough space for the basic request. So need to
1730 * flush out the remainder and then wait for
1733 total_bytes += remain_actual;
1734 need_wrap = remain_actual | 1;
1737 * The base request will fit but the reserved space
1738 * falls off the end. So we don't need an immediate
1739 * wrap and only need to effectively wait for the
1740 * reserved size from the start of ringbuffer.
1742 total_bytes = rq->reserved_space + remain_actual;
1746 if (unlikely(total_bytes > ring->space)) {
1750 * Space is reserved in the ringbuffer for finalising the
1751 * request, as that cannot be allowed to fail. During request
1752 * finalisation, reserved_space is set to 0 to stop the
1753 * overallocation and the assumption is that then we never need
1754 * to wait (which has the risk of failing with EINTR).
1756 * See also i915_request_alloc() and i915_request_add().
1758 GEM_BUG_ON(!rq->reserved_space);
1760 ret = wait_for_space(ring, total_bytes);
1762 return ERR_PTR(ret);
1765 if (unlikely(need_wrap)) {
1767 GEM_BUG_ON(need_wrap > ring->space);
1768 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
1769 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
1771 /* Fill the tail with MI_NOOP */
1772 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
1773 ring->space -= need_wrap;
1777 GEM_BUG_ON(ring->emit > ring->size - bytes);
1778 GEM_BUG_ON(ring->space < bytes);
1779 cs = ring->vaddr + ring->emit;
1780 GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
1781 ring->emit += bytes;
1782 ring->space -= bytes;
1787 /* Align the ring tail to a cacheline boundary */
1788 int intel_ring_cacheline_align(struct i915_request *rq)
1793 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
1794 if (num_dwords == 0)
1797 num_dwords = CACHELINE_DWORDS - num_dwords;
1798 GEM_BUG_ON(num_dwords & 1);
1800 cs = intel_ring_begin(rq, num_dwords);
1804 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
1805 intel_ring_advance(rq, cs);
1807 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
1811 static void gen6_bsd_submit_request(struct i915_request *request)
1813 struct drm_i915_private *dev_priv = request->i915;
1815 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1817 /* Every tail move must follow the sequence below */
1819 /* Disable notification that the ring is IDLE. The GT
1820 * will then assume that it is busy and bring it out of rc6.
1822 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1823 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1825 /* Clear the context id. Here be magic! */
1826 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
1828 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1829 if (__intel_wait_for_register_fw(dev_priv,
1830 GEN6_BSD_SLEEP_PSMI_CONTROL,
1831 GEN6_BSD_SLEEP_INDICATOR,
1834 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1836 /* Now that the ring is fully powered up, update the tail */
1837 i9xx_submit_request(request);
1839 /* Let the ring send IDLE messages to the GT again,
1840 * and so let it sleep to conserve power when idle.
1842 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1843 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1845 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1848 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
1852 cs = intel_ring_begin(rq, 4);
1858 /* We always require a command barrier so that subsequent
1859 * commands, such as breadcrumb interrupts, are strictly ordered
1860 * wrt the contents of the write cache being flushed to memory
1861 * (and thus being coherent from the CPU).
1863 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1866 * Bspec vol 1c.5 - video engine command streamer:
1867 * "If ENABLED, all TLBs will be invalidated once the flush
1868 * operation is complete. This bit is only valid when the
1869 * Post-Sync Operation field is a value of 1h or 3h."
1871 if (mode & EMIT_INVALIDATE)
1872 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1875 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1878 intel_ring_advance(rq, cs);
1883 hsw_emit_bb_start(struct i915_request *rq,
1884 u64 offset, u32 len,
1885 unsigned int dispatch_flags)
1889 cs = intel_ring_begin(rq, 2);
1893 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1894 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
1895 (dispatch_flags & I915_DISPATCH_RS ?
1896 MI_BATCH_RESOURCE_STREAMER : 0);
1897 /* bit0-7 is the length on GEN6+ */
1899 intel_ring_advance(rq, cs);
1905 gen6_emit_bb_start(struct i915_request *rq,
1906 u64 offset, u32 len,
1907 unsigned int dispatch_flags)
1911 cs = intel_ring_begin(rq, 2);
1915 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1916 0 : MI_BATCH_NON_SECURE_I965);
1917 /* bit0-7 is the length on GEN6+ */
1919 intel_ring_advance(rq, cs);
1924 /* Blitter support (SandyBridge+) */
1926 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
1930 cs = intel_ring_begin(rq, 4);
1936 /* We always require a command barrier so that subsequent
1937 * commands, such as breadcrumb interrupts, are strictly ordered
1938 * wrt the contents of the write cache being flushed to memory
1939 * (and thus being coherent from the CPU).
1941 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1944 * Bspec vol 1c.3 - blitter engine command streamer:
1945 * "If ENABLED, all TLBs will be invalidated once the flush
1946 * operation is complete. This bit is only valid when the
1947 * Post-Sync Operation field is a value of 1h or 3h."
1949 if (mode & EMIT_INVALIDATE)
1950 cmd |= MI_INVALIDATE_TLB;
1952 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1955 intel_ring_advance(rq, cs);
1960 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
1961 struct intel_engine_cs *engine)
1965 if (!HAS_LEGACY_SEMAPHORES(dev_priv))
1968 GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
1969 engine->semaphore.sync_to = gen6_ring_sync_to;
1970 engine->semaphore.signal = gen6_signal;
1973 * The current semaphore is only applied on pre-gen8
1974 * platform. And there is no VCS2 ring on the pre-gen8
1975 * platform. So the semaphore between RCS and VCS2 is
1976 * initialized as INVALID.
1978 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
1979 static const struct {
1981 i915_reg_t mbox_reg;
1982 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
1984 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
1985 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
1986 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
1989 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
1990 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
1991 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
1994 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
1995 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
1996 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
1999 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2000 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2001 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2005 i915_reg_t mbox_reg;
2007 if (i == engine->hw_id) {
2008 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2009 mbox_reg = GEN6_NOSYNC;
2011 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2012 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2015 engine->semaphore.mbox.wait[i] = wait_mbox;
2016 engine->semaphore.mbox.signal[i] = mbox_reg;
2020 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2021 struct intel_engine_cs *engine)
2023 if (INTEL_GEN(dev_priv) >= 6) {
2024 engine->irq_enable = gen6_irq_enable;
2025 engine->irq_disable = gen6_irq_disable;
2026 engine->irq_seqno_barrier = gen6_seqno_barrier;
2027 } else if (INTEL_GEN(dev_priv) >= 5) {
2028 engine->irq_enable = gen5_irq_enable;
2029 engine->irq_disable = gen5_irq_disable;
2030 engine->irq_seqno_barrier = gen5_seqno_barrier;
2031 } else if (INTEL_GEN(dev_priv) >= 3) {
2032 engine->irq_enable = i9xx_irq_enable;
2033 engine->irq_disable = i9xx_irq_disable;
2035 engine->irq_enable = i8xx_irq_enable;
2036 engine->irq_disable = i8xx_irq_disable;
2040 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2042 engine->submit_request = i9xx_submit_request;
2043 engine->cancel_requests = cancel_requests;
2045 engine->park = NULL;
2046 engine->unpark = NULL;
2049 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2051 i9xx_set_default_submission(engine);
2052 engine->submit_request = gen6_bsd_submit_request;
2055 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2056 struct intel_engine_cs *engine)
2058 /* gen8+ are only supported with execlists */
2059 GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
2061 intel_ring_init_irq(dev_priv, engine);
2062 intel_ring_init_semaphores(dev_priv, engine);
2064 engine->init_hw = init_ring_common;
2065 engine->reset.prepare = reset_prepare;
2066 engine->reset.reset = reset_ring;
2067 engine->reset.finish = reset_finish;
2069 engine->context_pin = intel_ring_context_pin;
2070 engine->request_alloc = ring_request_alloc;
2072 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2073 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2074 if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
2077 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2079 num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2080 engine->emit_breadcrumb_sz += num_rings * 3;
2082 engine->emit_breadcrumb_sz++;
2085 engine->set_default_submission = i9xx_set_default_submission;
2087 if (INTEL_GEN(dev_priv) >= 6)
2088 engine->emit_bb_start = gen6_emit_bb_start;
2089 else if (INTEL_GEN(dev_priv) >= 4)
2090 engine->emit_bb_start = i965_emit_bb_start;
2091 else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2092 engine->emit_bb_start = i830_emit_bb_start;
2094 engine->emit_bb_start = i915_emit_bb_start;
2097 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2099 struct drm_i915_private *dev_priv = engine->i915;
2102 intel_ring_default_vfuncs(dev_priv, engine);
2104 if (HAS_L3_DPF(dev_priv))
2105 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2107 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2109 if (INTEL_GEN(dev_priv) >= 6) {
2110 engine->init_context = intel_rcs_ctx_init;
2111 engine->emit_flush = gen7_render_ring_flush;
2112 if (IS_GEN6(dev_priv))
2113 engine->emit_flush = gen6_render_ring_flush;
2114 } else if (IS_GEN5(dev_priv)) {
2115 engine->emit_flush = gen4_render_ring_flush;
2117 if (INTEL_GEN(dev_priv) < 4)
2118 engine->emit_flush = gen2_render_ring_flush;
2120 engine->emit_flush = gen4_render_ring_flush;
2121 engine->irq_enable_mask = I915_USER_INTERRUPT;
2124 if (IS_HASWELL(dev_priv))
2125 engine->emit_bb_start = hsw_emit_bb_start;
2127 engine->init_hw = init_render_ring;
2129 ret = intel_init_ring_buffer(engine);
2133 if (INTEL_GEN(dev_priv) >= 6) {
2134 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2137 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2138 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2146 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2148 struct drm_i915_private *dev_priv = engine->i915;
2150 intel_ring_default_vfuncs(dev_priv, engine);
2152 if (INTEL_GEN(dev_priv) >= 6) {
2153 /* gen6 bsd needs a special wa for tail updates */
2154 if (IS_GEN6(dev_priv))
2155 engine->set_default_submission = gen6_bsd_set_default_submission;
2156 engine->emit_flush = gen6_bsd_ring_flush;
2157 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2159 engine->emit_flush = bsd_ring_flush;
2160 if (IS_GEN5(dev_priv))
2161 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2163 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2166 return intel_init_ring_buffer(engine);
2169 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2171 struct drm_i915_private *dev_priv = engine->i915;
2173 intel_ring_default_vfuncs(dev_priv, engine);
2175 engine->emit_flush = gen6_ring_flush;
2176 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2178 return intel_init_ring_buffer(engine);
2181 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2183 struct drm_i915_private *dev_priv = engine->i915;
2185 intel_ring_default_vfuncs(dev_priv, engine);
2187 engine->emit_flush = gen6_ring_flush;
2188 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2189 engine->irq_enable = hsw_vebox_irq_enable;
2190 engine->irq_disable = hsw_vebox_irq_disable;
2192 return intel_init_ring_buffer(engine);