1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
6 #include "i915_gem_request.h"
7 #include "i915_gem_timeline.h"
8 #include "i915_selftest.h"
12 #define I915_CMD_HASH_ORDER 9
14 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
15 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
16 * to give some inclination as to some of the magic values used in the various
19 #define CACHELINE_BYTES 64
20 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
22 struct intel_hw_status_page {
28 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
29 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
31 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
32 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
34 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
35 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
37 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
38 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
40 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
41 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
43 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
44 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
46 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
47 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
49 #define gen8_semaphore_seqno_size sizeof(uint64_t)
50 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
51 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
52 #define GEN8_SIGNAL_OFFSET(__ring, to) \
53 (dev_priv->semaphore->node.start + \
54 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
55 #define GEN8_WAIT_OFFSET(__ring, from) \
56 (dev_priv->semaphore->node.start + \
57 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
59 enum intel_engine_hangcheck_action {
64 ENGINE_ACTIVE_SUBUNITS,
69 static inline const char *
70 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
77 case ENGINE_ACTIVE_SEQNO:
78 return "active seqno";
79 case ENGINE_ACTIVE_HEAD:
81 case ENGINE_ACTIVE_SUBUNITS:
82 return "active subunits";
83 case ENGINE_WAIT_KICK:
92 #define I915_MAX_SLICES 3
93 #define I915_MAX_SUBSLICES 3
95 #define instdone_slice_mask(dev_priv__) \
96 (INTEL_GEN(dev_priv__) == 7 ? \
97 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
99 #define instdone_subslice_mask(dev_priv__) \
100 (INTEL_GEN(dev_priv__) == 7 ? \
101 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
103 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
104 for ((slice__) = 0, (subslice__) = 0; \
105 (slice__) < I915_MAX_SLICES; \
106 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
107 (slice__) += ((subslice__) == 0)) \
108 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
109 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
111 struct intel_instdone {
113 /* The following exist only in the RCS engine */
115 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
116 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
119 struct intel_engine_hangcheck {
122 enum intel_engine_hangcheck_action action;
123 unsigned long action_timestamp;
125 struct intel_instdone instdone;
126 struct drm_i915_gem_request *active_request;
131 struct i915_vma *vma;
134 struct list_head request_list;
145 struct i915_gem_context;
146 struct drm_i915_reg_table;
149 * we use a single page to load ctx workarounds so all of these
150 * values are referred in terms of dwords
152 * struct i915_wa_ctx_bb:
153 * offset: specifies batch starting position, also helpful in case
154 * if we want to have multiple batches at different offsets based on
155 * some criteria. It is not a requirement at the moment but provides
156 * an option for future use.
157 * size: size of the batch in DWORDS
159 struct i915_ctx_workarounds {
160 struct i915_wa_ctx_bb {
163 } indirect_ctx, per_ctx;
164 struct i915_vma *vma;
167 struct drm_i915_gem_request;
168 struct intel_render_state;
171 * Engine IDs definitions.
172 * Keep instances of the same type engine together.
174 enum intel_engine_id {
179 #define _VCS(n) (VCS + (n))
183 struct i915_priolist {
185 struct list_head requests;
190 * struct intel_engine_execlists - execlist submission queue and port state
192 * The struct intel_engine_execlists represents the combined logical state of
193 * driver and the hardware state for execlist mode of submission.
195 struct intel_engine_execlists {
197 * @irq_tasklet: softirq tasklet for bottom handler
199 struct tasklet_struct irq_tasklet;
202 * @default_priolist: priority list for I915_PRIORITY_NORMAL
204 struct i915_priolist default_priolist;
207 * @no_priolist: priority lists disabled
212 * @port: execlist port states
214 * For each hardware ELSP (ExecList Submission Port) we keep
215 * track of the last request and the number of times we submitted
216 * that port to hw. We then count the number of times the hw reports
217 * a context completion or preemption. As only one context can
218 * be active on hw, we limit resubmission of context to port[0]. This
219 * is called Lite Restore, of the context.
221 struct execlist_port {
223 * @request_count: combined request and submission count
225 struct drm_i915_gem_request *request_count;
226 #define EXECLIST_COUNT_BITS 2
227 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
228 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
229 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
230 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
231 #define port_set(p, packed) ((p)->request_count = (packed))
232 #define port_isset(p) ((p)->request_count)
233 #define port_index(p, execlists) ((p) - (execlists)->port)
236 * @context_id: context ID for port
238 GEM_DEBUG_DECL(u32 context_id);
240 #define EXECLIST_MAX_PORTS 2
241 } port[EXECLIST_MAX_PORTS];
244 * @active: is the HW active? We consider the HW as active after
245 * submitting any context for execution and until we have seen the
246 * last context completion event. After that, we do not expect any
247 * more events until we submit, and so can park the HW.
249 * As we have a small number of different sources from which we feed
250 * the HW, we track the state of each inside a single bitfield.
253 #define EXECLISTS_ACTIVE_USER 0
254 #define EXECLISTS_ACTIVE_PREEMPT 1
257 * @port_mask: number of execlist ports - 1
259 unsigned int port_mask;
262 * @queue: queue of requests, in priority lists
264 struct rb_root queue;
267 * @first: leftmost level in priority @queue
269 struct rb_node *first;
272 * @fw_domains: forcewake domains for irq tasklet
274 unsigned int fw_domains;
277 * @csb_head: context status buffer head
279 unsigned int csb_head;
282 * @csb_use_mmio: access csb through mmio, instead of hwsp
287 #define INTEL_ENGINE_CS_MAX_NAME 8
289 struct intel_engine_cs {
290 struct drm_i915_private *i915;
291 char name[INTEL_ENGINE_CS_MAX_NAME];
292 enum intel_engine_id id;
293 unsigned int uabi_id;
301 unsigned int irq_shift;
303 struct intel_ring *buffer;
304 struct intel_timeline *timeline;
306 struct intel_render_state *render_state;
309 unsigned long irq_posted;
310 #define ENGINE_IRQ_BREADCRUMB 0
311 #define ENGINE_IRQ_EXECLIST 1
313 /* Rather than have every client wait upon all user interrupts,
314 * with the herd waking after every interrupt and each doing the
315 * heavyweight seqno dance, we delegate the task (of being the
316 * bottom-half of the user interrupt) to the first client. After
317 * every interrupt, we wake up one client, who does the heavyweight
318 * coherent seqno read and either goes back to sleep (if incomplete),
319 * or wakes up all the completed clients in parallel, before then
320 * transferring the bottom-half status to the next client in the queue.
322 * Compared to walking the entire list of waiters in a single dedicated
323 * bottom-half, we reduce the latency of the first waiter by avoiding
324 * a context switch, but incur additional coherent seqno reads when
325 * following the chain of request breadcrumbs. Since it is most likely
326 * that we have a single client waiting on each seqno, then reducing
327 * the overhead of waking that client is much preferred.
329 struct intel_breadcrumbs {
330 spinlock_t irq_lock; /* protects irq_*; irqsafe */
331 struct intel_wait *irq_wait; /* oldest waiter by retirement */
333 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
334 struct rb_root waiters; /* sorted by retirement, priority */
335 struct rb_root signals; /* sorted by retirement */
336 struct task_struct *signaler; /* used for fence signalling */
337 struct drm_i915_gem_request __rcu *first_signal;
338 struct timer_list fake_irq; /* used after a missed interrupt */
339 struct timer_list hangcheck; /* detect missed interrupts */
341 unsigned int hangcheck_interrupts;
342 unsigned int irq_enabled;
345 I915_SELFTEST_DECLARE(bool mock : 1);
349 * A pool of objects to use as shadow copies of client batch buffers
350 * when the command parser is enabled. Prevents the client from
351 * modifying the batch contents after software parsing.
353 struct i915_gem_batch_pool batch_pool;
355 struct intel_hw_status_page status_page;
356 struct i915_ctx_workarounds wa_ctx;
357 struct i915_vma *scratch;
359 u32 irq_keep_mask; /* always keep these interrupts */
360 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
361 void (*irq_enable)(struct intel_engine_cs *engine);
362 void (*irq_disable)(struct intel_engine_cs *engine);
364 int (*init_hw)(struct intel_engine_cs *engine);
365 void (*reset_hw)(struct intel_engine_cs *engine,
366 struct drm_i915_gem_request *req);
368 void (*park)(struct intel_engine_cs *engine);
369 void (*unpark)(struct intel_engine_cs *engine);
371 void (*set_default_submission)(struct intel_engine_cs *engine);
373 struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
374 struct i915_gem_context *ctx);
375 void (*context_unpin)(struct intel_engine_cs *engine,
376 struct i915_gem_context *ctx);
377 int (*request_alloc)(struct drm_i915_gem_request *req);
378 int (*init_context)(struct drm_i915_gem_request *req);
380 int (*emit_flush)(struct drm_i915_gem_request *request,
382 #define EMIT_INVALIDATE BIT(0)
383 #define EMIT_FLUSH BIT(1)
384 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
385 int (*emit_bb_start)(struct drm_i915_gem_request *req,
386 u64 offset, u32 length,
387 unsigned int dispatch_flags);
388 #define I915_DISPATCH_SECURE BIT(0)
389 #define I915_DISPATCH_PINNED BIT(1)
390 #define I915_DISPATCH_RS BIT(2)
391 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
393 int emit_breadcrumb_sz;
395 /* Pass the request to the hardware queue (e.g. directly into
396 * the legacy ringbuffer or to the end of an execlist).
398 * This is called from an atomic context with irqs disabled; must
401 void (*submit_request)(struct drm_i915_gem_request *req);
403 /* Call when the priority on a request has changed and it and its
404 * dependencies may need rescheduling. Note the request itself may
405 * not be ready to run!
407 * Called under the struct_mutex.
409 void (*schedule)(struct drm_i915_gem_request *request,
413 * Cancel all requests on the hardware, or queued for execution.
414 * This should only cancel the ready requests that have been
415 * submitted to the engine (via the engine->submit_request callback).
416 * This is called when marking the device as wedged.
418 void (*cancel_requests)(struct intel_engine_cs *engine);
420 /* Some chipsets are not quite as coherent as advertised and need
421 * an expensive kick to force a true read of the up-to-date seqno.
422 * However, the up-to-date seqno is not always required and the last
423 * seen value is good enough. Note that the seqno will always be
424 * monotonic, even if not coherent.
426 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
427 void (*cleanup)(struct intel_engine_cs *engine);
429 /* GEN8 signal/wait table - never trust comments!
430 * signal to signal to signal to signal to signal to
431 * RCS VCS BCS VECS VCS2
432 * --------------------------------------------------------------------
433 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
434 * |-------------------------------------------------------------------
435 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
436 * |-------------------------------------------------------------------
437 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
438 * |-------------------------------------------------------------------
439 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
440 * |-------------------------------------------------------------------
441 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
442 * |-------------------------------------------------------------------
445 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
446 * ie. transpose of g(x, y)
448 * sync from sync from sync from sync from sync from
449 * RCS VCS BCS VECS VCS2
450 * --------------------------------------------------------------------
451 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
452 * |-------------------------------------------------------------------
453 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
454 * |-------------------------------------------------------------------
455 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
456 * |-------------------------------------------------------------------
457 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
458 * |-------------------------------------------------------------------
459 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
460 * |-------------------------------------------------------------------
463 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
464 * ie. transpose of f(x, y)
468 #define GEN6_SEMAPHORE_LAST VECS_HW
469 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
470 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
472 /* our mbox written by others */
473 u32 wait[GEN6_NUM_SEMAPHORES];
474 /* mboxes this ring signals to */
475 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
477 u64 signal_ggtt[I915_NUM_ENGINES];
481 int (*sync_to)(struct drm_i915_gem_request *req,
482 struct drm_i915_gem_request *signal);
483 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
486 struct intel_engine_execlists execlists;
488 /* Contexts are pinned whilst they are active on the GPU. The last
489 * context executed remains active whilst the GPU is idle - the
490 * switch away and write to the context object only occurs on the
491 * next execution. Contexts are only unpinned on retirement of the
492 * following request ensuring that we can always write to the object
493 * on the context switch even after idling. Across suspend, we switch
494 * to the kernel context and trash it as the save may not happen
495 * before the hardware is powered down.
497 struct i915_gem_context *last_retired_context;
499 /* We track the current MI_SET_CONTEXT in order to eliminate
500 * redudant context switches. This presumes that requests are not
501 * reordered! Or when they are the tracking is updated along with
502 * the emission of individual requests into the legacy command
505 struct i915_gem_context *legacy_active_context;
507 /* status_notifier: list of callbacks for context-switch changes */
508 struct atomic_notifier_head context_status_notifier;
510 struct intel_engine_hangcheck hangcheck;
512 bool needs_cmd_parser;
515 * Table of commands the command parser needs to know about
518 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
521 * Table of registers allowed in commands that read/write registers.
523 const struct drm_i915_reg_table *reg_tables;
527 * Returns the bitmask for the length field of the specified command.
528 * Return 0 for an unrecognized/invalid command.
530 * If the command parser finds an entry for a command in the engine's
531 * cmd_tables, it gets the command's length based on the table entry.
532 * If not, it calls this function to determine the per-engine length
533 * field encoding for the command (i.e. different opcode ranges use
534 * certain bits to encode the command length in the header).
536 u32 (*get_cmd_length_mask)(u32 cmd_header);
540 execlists_set_active(struct intel_engine_execlists *execlists,
543 __set_bit(bit, (unsigned long *)&execlists->active);
547 execlists_clear_active(struct intel_engine_execlists *execlists,
550 __clear_bit(bit, (unsigned long *)&execlists->active);
554 execlists_is_active(const struct intel_engine_execlists *execlists,
557 return test_bit(bit, (unsigned long *)&execlists->active);
561 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
564 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
566 static inline unsigned int
567 execlists_num_ports(const struct intel_engine_execlists * const execlists)
569 return execlists->port_mask + 1;
573 execlists_port_complete(struct intel_engine_execlists * const execlists,
574 struct execlist_port * const port)
576 const unsigned int m = execlists->port_mask;
578 GEM_BUG_ON(port_index(port, execlists) != 0);
579 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
581 memmove(port, port + 1, m * sizeof(struct execlist_port));
582 memset(port + m, 0, sizeof(struct execlist_port));
585 static inline unsigned int
586 intel_engine_flag(const struct intel_engine_cs *engine)
588 return BIT(engine->id);
592 intel_read_status_page(struct intel_engine_cs *engine, int reg)
594 /* Ensure that the compiler doesn't optimize away the load. */
595 return READ_ONCE(engine->status_page.page_addr[reg]);
599 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
601 /* Writing into the status page should be done sparingly. Since
602 * we do when we are uncertain of the device state, we take a bit
603 * of extra paranoia to try and ensure that the HWS takes the value
604 * we give and that it doesn't end up trapped inside the CPU!
606 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
608 clflush(&engine->status_page.page_addr[reg]);
609 engine->status_page.page_addr[reg] = value;
610 clflush(&engine->status_page.page_addr[reg]);
613 WRITE_ONCE(engine->status_page.page_addr[reg], value);
618 * Reads a dword out of the status page, which is written to from the command
619 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
622 * The following dwords have a reserved meaning:
623 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
624 * 0x04: ring 0 head pointer
625 * 0x05: ring 1 head pointer (915-class)
626 * 0x06: ring 2 head pointer (915-class)
627 * 0x10-0x1b: Context status DWords (GM45)
628 * 0x1f: Last written status offset. (GM45)
629 * 0x20-0x2f: Reserved (Gen6+)
631 * The area from dword 0x30 to 0x3ff is available for driver usage.
633 #define I915_GEM_HWS_INDEX 0x30
634 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
635 #define I915_GEM_HWS_PREEMPT_INDEX 0x32
636 #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
637 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
638 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
640 #define I915_HWS_CSB_BUF0_INDEX 0x10
641 #define I915_HWS_CSB_WRITE_INDEX 0x1f
642 #define CNL_HWS_CSB_WRITE_INDEX 0x2f
645 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
646 int intel_ring_pin(struct intel_ring *ring,
647 struct drm_i915_private *i915,
648 unsigned int offset_bias);
649 void intel_ring_reset(struct intel_ring *ring, u32 tail);
650 unsigned int intel_ring_update_space(struct intel_ring *ring);
651 void intel_ring_unpin(struct intel_ring *ring);
652 void intel_ring_free(struct intel_ring *ring);
654 void intel_engine_stop(struct intel_engine_cs *engine);
655 void intel_engine_cleanup(struct intel_engine_cs *engine);
657 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
659 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
661 u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
665 intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
669 * This serves as a placeholder in the code so that the reader
670 * can compare against the preceding intel_ring_begin() and
671 * check that the number of dwords emitted matches the space
672 * reserved for the command packet (i.e. the value passed to
673 * intel_ring_begin()).
675 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
679 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
681 return pos & (ring->size - 1);
685 intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
687 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
688 u32 offset = addr - req->ring->vaddr;
689 GEM_BUG_ON(offset > req->ring->size);
690 return intel_ring_wrap(req->ring, offset);
694 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
696 /* We could combine these into a single tail operation, but keeping
697 * them as seperate tests will help identify the cause should one
700 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
701 GEM_BUG_ON(tail >= ring->size);
705 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
706 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
707 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
708 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
709 * same cacheline, the Head Pointer must not be greater than the Tail
712 * We use ring->head as the last known location of the actual RING_HEAD,
713 * it may have advanced but in the worst case it is equally the same
714 * as ring->head and so we should never program RING_TAIL to advance
715 * into the same cacheline as ring->head.
717 #define cacheline(a) round_down(a, CACHELINE_BYTES)
718 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
723 static inline unsigned int
724 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
726 /* Whilst writes to the tail are strictly order, there is no
727 * serialisation between readers and the writers. The tail may be
728 * read by i915_gem_request_retire() just as it is being updated
729 * by execlists, as although the breadcrumb is complete, the context
730 * switch hasn't been seen.
732 assert_ring_tail_valid(ring, tail);
737 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
739 void intel_engine_setup_common(struct intel_engine_cs *engine);
740 int intel_engine_init_common(struct intel_engine_cs *engine);
741 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
742 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
744 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
745 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
746 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
747 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
749 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
750 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
752 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
754 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
757 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
759 /* We are only peeking at the tail of the submit queue (and not the
760 * queue itself) in order to gain a hint as to the current active
761 * state of the engine. Callers are not expected to be taking
762 * engine->timeline->lock, nor are they expected to be concerned
763 * wtih serialising this hint with anything, so document it as
764 * a hint and nothing more.
766 return READ_ONCE(engine->timeline->seqno);
769 int init_workarounds_ring(struct intel_engine_cs *engine);
770 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
772 void intel_engine_get_instdone(struct intel_engine_cs *engine,
773 struct intel_instdone *instdone);
776 * Arbitrary size for largest possible 'add request' sequence. The code paths
777 * are complex and variable. Empirical measurement shows that the worst case
778 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
779 * we need to allocate double the largest single packet within that emission
780 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
782 #define MIN_SPACE_FOR_ADD_REQUEST 336
784 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
786 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
789 static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
791 return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
794 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
795 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
797 static inline void intel_wait_init(struct intel_wait *wait,
798 struct drm_i915_gem_request *rq)
804 static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
810 static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
816 intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
819 return intel_wait_has_seqno(wait);
823 intel_wait_update_request(struct intel_wait *wait,
824 const struct drm_i915_gem_request *rq)
826 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
830 intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
832 return wait->seqno == seqno;
836 intel_wait_check_request(const struct intel_wait *wait,
837 const struct drm_i915_gem_request *rq)
839 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
842 static inline bool intel_wait_complete(const struct intel_wait *wait)
844 return RB_EMPTY_NODE(&wait->node);
847 bool intel_engine_add_wait(struct intel_engine_cs *engine,
848 struct intel_wait *wait);
849 void intel_engine_remove_wait(struct intel_engine_cs *engine,
850 struct intel_wait *wait);
851 void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
853 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
855 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
857 return READ_ONCE(engine->breadcrumbs.irq_wait);
860 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
861 #define ENGINE_WAKEUP_WAITER BIT(0)
862 #define ENGINE_WAKEUP_ASLEEP BIT(1)
864 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
865 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
867 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
868 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
870 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
871 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
872 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
874 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
876 memset(batch, 0, 6 * sizeof(u32));
878 batch[0] = GFX_OP_PIPE_CONTROL(6);
886 gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
888 /* We're using qword write, offset should be aligned to 8 bytes. */
889 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
891 /* w/a for post sync ops following a GPGPU operation we
892 * need a prior CS_STALL, which is emitted by the flush
893 * following the batch.
895 *cs++ = GFX_OP_PIPE_CONTROL(6);
896 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
897 PIPE_CONTROL_QW_WRITE;
901 /* We're thrashing one dword of HWS. */
908 gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
910 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
911 GEM_BUG_ON(gtt_offset & (1 << 5));
912 /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
913 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
915 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
916 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
923 bool intel_engine_is_idle(struct intel_engine_cs *engine);
924 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
926 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
928 void intel_engines_park(struct drm_i915_private *i915);
929 void intel_engines_unpark(struct drm_i915_private *i915);
931 void intel_engines_reset_default_submission(struct drm_i915_private *i915);
933 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
935 void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
937 #endif /* _INTEL_RINGBUFFER_H_ */