2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef __INTEL_ENGINE_TYPES__
8 #define __INTEL_ENGINE_TYPES__
10 #include <linux/hashtable.h>
11 #include <linux/irq_work.h>
12 #include <linux/kref.h>
13 #include <linux/list.h>
14 #include <linux/llist.h>
15 #include <linux/types.h>
18 #include "i915_gem_batch_pool.h"
20 #include "i915_priolist_types.h"
21 #include "i915_selftest.h"
22 #include "i915_timeline_types.h"
23 #include "intel_sseu.h"
24 #include "intel_wakeref.h"
25 #include "intel_workarounds_types.h"
27 #define I915_MAX_SLICES 3
28 #define I915_MAX_SUBSLICES 8
30 #define I915_CMD_HASH_ORDER 9
33 struct drm_i915_gem_object;
34 struct drm_i915_reg_table;
35 struct i915_gem_context;
37 struct i915_sched_attr;
40 typedef u8 intel_engine_mask_t;
41 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
43 struct intel_hw_status_page {
48 struct intel_instdone {
50 /* The following exist only in the RCS engine */
52 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
53 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
56 struct intel_engine_hangcheck {
60 unsigned long action_timestamp;
61 struct intel_instdone instdone;
69 struct i915_timeline *timeline;
70 struct list_head request_list;
71 struct list_head active_link;
74 * As we have two types of rings, one global to the engine used
75 * by ringbuffer submission and those that are exclusive to a
76 * context used by execlists, we have to play safe and allow
77 * atomic updates to the pin_count. However, the actual pinning
78 * of the context is either done during initialisation for
79 * ringbuffer submission or serialised as part of the context
80 * pinning for execlists, and so we do not need a mutex ourselves
81 * to serialise intel_ring_pin/intel_ring_unpin.
95 * we use a single page to load ctx workarounds so all of these
96 * values are referred in terms of dwords
98 * struct i915_wa_ctx_bb:
99 * offset: specifies batch starting position, also helpful in case
100 * if we want to have multiple batches at different offsets based on
101 * some criteria. It is not a requirement at the moment but provides
102 * an option for future use.
103 * size: size of the batch in DWORDS
105 struct i915_ctx_workarounds {
106 struct i915_wa_ctx_bb {
109 } indirect_ctx, per_ctx;
110 struct i915_vma *vma;
113 #define I915_MAX_VCS 4
114 #define I915_MAX_VECS 2
117 * Engine IDs definitions.
118 * Keep instances of the same type engine together.
120 enum intel_engine_id {
127 #define _VCS(n) (VCS0 + (n))
130 #define _VECS(n) (VECS0 + (n))
134 struct st_preempt_hang {
135 struct completion completion;
141 * struct intel_engine_execlists - execlist submission queue and port state
143 * The struct intel_engine_execlists represents the combined logical state of
144 * driver and the hardware state for execlist mode of submission.
146 struct intel_engine_execlists {
148 * @tasklet: softirq tasklet for bottom handler
150 struct tasklet_struct tasklet;
153 * @default_priolist: priority list for I915_PRIORITY_NORMAL
155 struct i915_priolist default_priolist;
158 * @no_priolist: priority lists disabled
163 * @submit_reg: gen-specific execlist submission register
164 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
165 * the ExecList Submission Queue Contents register array for Gen11+
167 u32 __iomem *submit_reg;
170 * @ctrl_reg: the enhanced execlists control register, used to load the
171 * submit queue on the HW and to request preemptions to idle
173 u32 __iomem *ctrl_reg;
176 * @port: execlist port states
178 * For each hardware ELSP (ExecList Submission Port) we keep
179 * track of the last request and the number of times we submitted
180 * that port to hw. We then count the number of times the hw reports
181 * a context completion or preemption. As only one context can
182 * be active on hw, we limit resubmission of context to port[0]. This
183 * is called Lite Restore, of the context.
185 struct execlist_port {
187 * @request_count: combined request and submission count
189 struct i915_request *request_count;
190 #define EXECLIST_COUNT_BITS 2
191 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
192 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
193 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
194 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
195 #define port_set(p, packed) ((p)->request_count = (packed))
196 #define port_isset(p) ((p)->request_count)
197 #define port_index(p, execlists) ((p) - (execlists)->port)
200 * @context_id: context ID for port
202 GEM_DEBUG_DECL(u32 context_id);
204 #define EXECLIST_MAX_PORTS 2
205 } port[EXECLIST_MAX_PORTS];
208 * @active: is the HW active? We consider the HW as active after
209 * submitting any context for execution and until we have seen the
210 * last context completion event. After that, we do not expect any
211 * more events until we submit, and so can park the HW.
213 * As we have a small number of different sources from which we feed
214 * the HW, we track the state of each inside a single bitfield.
217 #define EXECLISTS_ACTIVE_USER 0
218 #define EXECLISTS_ACTIVE_PREEMPT 1
219 #define EXECLISTS_ACTIVE_HWACK 2
222 * @port_mask: number of execlist ports - 1
224 unsigned int port_mask;
227 * @queue_priority_hint: Highest pending priority.
229 * When we add requests into the queue, or adjust the priority of
230 * executing requests, we compute the maximum priority of those
231 * pending requests. We can then use this value to determine if
232 * we need to preempt the executing requests to service the queue.
233 * However, since the we may have recorded the priority of an inflight
234 * request we wanted to preempt but since completed, at the time of
235 * dequeuing the priority hint may no longer may match the highest
236 * available request priority.
238 int queue_priority_hint;
241 * @queue: queue of requests, in priority lists
243 struct rb_root_cached queue;
244 struct rb_root_cached virtual;
247 * @csb_write: control register for Context Switch buffer
249 * Note this register may be either mmio or HWSP shadow.
254 * @csb_status: status array for Context Switch buffer
256 * Note these register may be either mmio or HWSP shadow.
261 * @preempt_complete_status: expected CSB upon completing preemption
263 u32 preempt_complete_status;
266 * @csb_size: context status buffer FIFO size
271 * @csb_head: context status buffer head
275 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
278 #define INTEL_ENGINE_CS_MAX_NAME 8
280 struct intel_engine_cs {
281 struct drm_i915_private *i915;
282 struct intel_uncore *uncore;
283 char name[INTEL_ENGINE_CS_MAX_NAME];
285 enum intel_engine_id id;
288 intel_engine_mask_t mask;
297 u32 uabi_capabilities;
299 struct intel_sseu sseu;
301 struct intel_ring *buffer;
305 struct list_head requests;
308 struct llist_head barrier_tasks;
310 struct intel_context *kernel_context; /* pinned */
311 struct intel_context *preempt_context; /* pinned; optional */
313 intel_engine_mask_t saturated; /* submitting semaphores too late? */
315 unsigned long serial;
317 unsigned long wakeref_serial;
318 struct intel_wakeref wakeref;
319 struct drm_i915_gem_object *default_state;
320 void *pinned_default_state;
322 /* Rather than have every client wait upon all user interrupts,
323 * with the herd waking after every interrupt and each doing the
324 * heavyweight seqno dance, we delegate the task (of being the
325 * bottom-half of the user interrupt) to the first client. After
326 * every interrupt, we wake up one client, who does the heavyweight
327 * coherent seqno read and either goes back to sleep (if incomplete),
328 * or wakes up all the completed clients in parallel, before then
329 * transferring the bottom-half status to the next client in the queue.
331 * Compared to walking the entire list of waiters in a single dedicated
332 * bottom-half, we reduce the latency of the first waiter by avoiding
333 * a context switch, but incur additional coherent seqno reads when
334 * following the chain of request breadcrumbs. Since it is most likely
335 * that we have a single client waiting on each seqno, then reducing
336 * the overhead of waking that client is much preferred.
338 struct intel_breadcrumbs {
340 struct list_head signalers;
342 struct irq_work irq_work; /* for use from inside irq_lock */
344 unsigned int irq_enabled;
349 struct intel_engine_pmu {
351 * @enable: Bitmask of enable sample events on this engine.
353 * Bits correspond to sample event types, for instance
354 * I915_SAMPLE_QUEUED is bit 0 etc.
358 * @enable_count: Reference count for the enabled samplers.
360 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
362 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
364 * @sample: Counter values for sampling events.
366 * Our internal timer stores the current counters in this field.
368 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
370 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
374 * A pool of objects to use as shadow copies of client batch buffers
375 * when the command parser is enabled. Prevents the client from
376 * modifying the batch contents after software parsing.
378 struct i915_gem_batch_pool batch_pool;
380 struct intel_hw_status_page status_page;
381 struct i915_ctx_workarounds wa_ctx;
382 struct i915_wa_list ctx_wa_list;
383 struct i915_wa_list wa_list;
384 struct i915_wa_list whitelist;
386 u32 irq_keep_mask; /* always keep these interrupts */
387 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
388 void (*irq_enable)(struct intel_engine_cs *engine);
389 void (*irq_disable)(struct intel_engine_cs *engine);
391 int (*resume)(struct intel_engine_cs *engine);
394 void (*prepare)(struct intel_engine_cs *engine);
395 void (*reset)(struct intel_engine_cs *engine, bool stalled);
396 void (*finish)(struct intel_engine_cs *engine);
399 void (*park)(struct intel_engine_cs *engine);
400 void (*unpark)(struct intel_engine_cs *engine);
402 void (*set_default_submission)(struct intel_engine_cs *engine);
404 const struct intel_context_ops *cops;
406 int (*request_alloc)(struct i915_request *rq);
407 int (*init_context)(struct i915_request *rq);
409 int (*emit_flush)(struct i915_request *request, u32 mode);
410 #define EMIT_INVALIDATE BIT(0)
411 #define EMIT_FLUSH BIT(1)
412 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
413 int (*emit_bb_start)(struct i915_request *rq,
414 u64 offset, u32 length,
415 unsigned int dispatch_flags);
416 #define I915_DISPATCH_SECURE BIT(0)
417 #define I915_DISPATCH_PINNED BIT(1)
418 int (*emit_init_breadcrumb)(struct i915_request *rq);
419 u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
421 unsigned int emit_fini_breadcrumb_dw;
423 /* Pass the request to the hardware queue (e.g. directly into
424 * the legacy ringbuffer or to the end of an execlist).
426 * This is called from an atomic context with irqs disabled; must
429 void (*submit_request)(struct i915_request *rq);
432 * Called on signaling of a SUBMIT_FENCE, passing along the signaling
433 * request down to the bonded pairs.
435 void (*bond_execute)(struct i915_request *rq,
436 struct dma_fence *signal);
439 * Call when the priority on a request has changed and it and its
440 * dependencies may need rescheduling. Note the request itself may
441 * not be ready to run!
443 void (*schedule)(struct i915_request *request,
444 const struct i915_sched_attr *attr);
447 * Cancel all requests on the hardware, or queued for execution.
448 * This should only cancel the ready requests that have been
449 * submitted to the engine (via the engine->submit_request callback).
450 * This is called when marking the device as wedged.
452 void (*cancel_requests)(struct intel_engine_cs *engine);
454 void (*destroy)(struct intel_engine_cs *engine);
456 struct intel_engine_execlists execlists;
458 /* status_notifier: list of callbacks for context-switch changes */
459 struct atomic_notifier_head context_status_notifier;
461 struct intel_engine_hangcheck hangcheck;
463 #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
464 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
465 #define I915_ENGINE_HAS_PREEMPTION BIT(2)
466 #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
467 #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
468 #define I915_ENGINE_IS_VIRTUAL BIT(5)
472 * Table of commands the command parser needs to know about
475 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
478 * Table of registers allowed in commands that read/write registers.
480 const struct drm_i915_reg_table *reg_tables;
484 * Returns the bitmask for the length field of the specified command.
485 * Return 0 for an unrecognized/invalid command.
487 * If the command parser finds an entry for a command in the engine's
488 * cmd_tables, it gets the command's length based on the table entry.
489 * If not, it calls this function to determine the per-engine length
490 * field encoding for the command (i.e. different opcode ranges use
491 * certain bits to encode the command length in the header).
493 u32 (*get_cmd_length_mask)(u32 cmd_header);
497 * @lock: Lock protecting the below fields.
501 * @enabled: Reference count indicating number of listeners.
503 unsigned int enabled;
505 * @active: Number of contexts currently scheduled in.
509 * @enabled_at: Timestamp when busy stats were enabled.
513 * @start: Timestamp of the last idle to active transition.
515 * Idle is defined as active == 0, active is active > 0.
519 * @total: Total time this engine was busy.
521 * Accumulated time not counting the most recent block in cases
522 * where engine is currently busy (active > 0).
529 intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
531 return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
535 intel_engine_supports_stats(const struct intel_engine_cs *engine)
537 return engine->flags & I915_ENGINE_SUPPORTS_STATS;
541 intel_engine_has_preemption(const struct intel_engine_cs *engine)
543 return engine->flags & I915_ENGINE_HAS_PREEMPTION;
547 intel_engine_has_semaphores(const struct intel_engine_cs *engine)
549 return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
553 intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
555 return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
559 intel_engine_is_virtual(const struct intel_engine_cs *engine)
561 return engine->flags & I915_ENGINE_IS_VIRTUAL;
564 #define instdone_slice_mask(dev_priv__) \
565 (IS_GEN(dev_priv__, 7) ? \
566 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
568 #define instdone_subslice_mask(dev_priv__) \
569 (IS_GEN(dev_priv__, 7) ? \
570 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
572 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
573 for ((slice__) = 0, (subslice__) = 0; \
574 (slice__) < I915_MAX_SLICES; \
575 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
576 (slice__) += ((subslice__) == 0)) \
577 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
578 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
580 #endif /* __INTEL_ENGINE_TYPES_H__ */