1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2022 Intel Corporation
6 #ifndef _XE_EXEC_QUEUE_TYPES_H_
7 #define _XE_EXEC_QUEUE_TYPES_H_
9 #include <linux/kref.h>
11 #include <drm/gpu_scheduler.h>
13 #include "xe_gpu_scheduler_types.h"
14 #include "xe_hw_engine_types.h"
15 #include "xe_hw_fence_types.h"
16 #include "xe_lrc_types.h"
18 struct xe_execlist_exec_queue;
20 struct xe_guc_exec_queue;
24 enum xe_exec_queue_priority {
25 XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
26 XE_EXEC_QUEUE_PRIORITY_LOW = 0,
27 XE_EXEC_QUEUE_PRIORITY_NORMAL,
28 XE_EXEC_QUEUE_PRIORITY_HIGH,
29 XE_EXEC_QUEUE_PRIORITY_KERNEL,
31 XE_EXEC_QUEUE_PRIORITY_COUNT
35 * struct xe_exec_queue - Execution queue
37 * Contains all state necessary for submissions. Can either be a user object or
40 struct xe_exec_queue {
41 /** @gt: graphics tile this exec queue can submit to */
44 * @hwe: A hardware of the same class. May (physical engine) or may not
45 * (virtual engine) be where jobs actual engine up running. Should never
46 * really be used for submissions.
48 struct xe_hw_engine *hwe;
49 /** @refcount: ref count of this exec queue */
51 /** @vm: VM (address space) for this exec queue */
53 /** @class: class of this exec queue */
54 enum xe_engine_class class;
56 * @logical_mask: logical mask of where job submitted to exec queue can run
59 /** @name: name of this exec queue */
60 char name[MAX_FENCE_NAME_LEN];
61 /** @width: width (number BB submitted per exec) of this exec queue */
63 /** @fence_irq: fence IRQ used to signal job completion */
64 struct xe_hw_fence_irq *fence_irq;
67 * @last_fence: last fence on exec queue, protected by vm->lock in write
68 * mode if bind exec queue, protected by dma resv lock if non-bind exec
71 struct dma_fence *last_fence;
73 /* queue no longer allowed to submit */
74 #define EXEC_QUEUE_FLAG_BANNED BIT(0)
75 /* queue used for kernel submission only */
76 #define EXEC_QUEUE_FLAG_KERNEL BIT(1)
77 /* kernel engine only destroyed at driver unload */
78 #define EXEC_QUEUE_FLAG_PERMANENT BIT(2)
79 /* queue keeps running pending jobs after destroy ioctl */
80 #define EXEC_QUEUE_FLAG_PERSISTENT BIT(3)
81 /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
82 #define EXEC_QUEUE_FLAG_VM BIT(4)
83 /* child of VM queue for multi-tile VM jobs */
84 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
85 /* kernel exec_queue only, set priority to highest level */
86 #define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6)
89 * @flags: flags for this exec queue, should statically setup aside from ban
95 /** @multi_gt_list: list head for VM bind engines if multi-GT */
96 struct list_head multi_gt_list;
97 /** @multi_gt_link: link for VM bind engines if multi-GT */
98 struct list_head multi_gt_link;
102 /** @execlist: execlist backend specific state for exec queue */
103 struct xe_execlist_exec_queue *execlist;
104 /** @guc: GuC backend specific state for exec queue */
105 struct xe_guc_exec_queue *guc;
109 * @persistent: persistent exec queue state
112 /** @xef: file which this exec queue belongs to */
114 /** @link: link in list of persistent exec queues */
115 struct list_head link;
120 * @parallel: parallel submission state
123 /** @composite_fence_ctx: context composite fence */
124 u64 composite_fence_ctx;
125 /** @composite_fence_seqno: seqno for composite fence */
126 u32 composite_fence_seqno;
129 * @bind: bind submission state
132 /** @fence_ctx: context bind fence */
134 /** @fence_seqno: seqno for bind fence */
139 /** @sched_props: scheduling properties */
141 /** @timeslice_us: timeslice period in micro-seconds */
143 /** @preempt_timeout_us: preemption timeout in micro-seconds */
144 u32 preempt_timeout_us;
145 /** @priority: priority of this exec queue */
146 enum xe_exec_queue_priority priority;
149 /** @compute: compute exec queue state */
151 /** @pfence: preemption fence */
152 struct dma_fence *pfence;
153 /** @context: preemption fence context */
155 /** @seqno: preemption fence seqno */
157 /** @link: link into VM's list of exec queues */
158 struct list_head link;
159 /** @lock: preemption fences lock */
163 /** @usm: unified shared memory state */
165 /** @acc_trigger: access counter trigger */
167 /** @acc_notify: access counter notify */
169 /** @acc_granularity: access counter granularity */
173 /** @ops: submission backend exec queue operations */
174 const struct xe_exec_queue_ops *ops;
176 /** @ring_ops: ring operations for this exec queue */
177 const struct xe_ring_ops *ring_ops;
178 /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
179 struct drm_sched_entity *entity;
180 /** @lrc: logical ring context for this exec queue */
185 * struct xe_exec_queue_ops - Submission backend exec queue operations
187 struct xe_exec_queue_ops {
188 /** @init: Initialize exec queue for submission backend */
189 int (*init)(struct xe_exec_queue *q);
190 /** @kill: Kill inflight submissions for backend */
191 void (*kill)(struct xe_exec_queue *q);
192 /** @fini: Fini exec queue for submission backend */
193 void (*fini)(struct xe_exec_queue *q);
194 /** @set_priority: Set priority for exec queue */
195 int (*set_priority)(struct xe_exec_queue *q,
196 enum xe_exec_queue_priority priority);
197 /** @set_timeslice: Set timeslice for exec queue */
198 int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
199 /** @set_preempt_timeout: Set preemption timeout for exec queue */
200 int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
201 /** @set_job_timeout: Set job timeout for exec queue */
202 int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
204 * @suspend: Suspend exec queue from executing, allowed to be called
205 * multiple times in a row before resume with the caveat that
206 * suspend_wait returns before calling suspend again.
208 int (*suspend)(struct xe_exec_queue *q);
210 * @suspend_wait: Wait for an exec queue to suspend executing, should be
211 * call after suspend.
213 void (*suspend_wait)(struct xe_exec_queue *q);
215 * @resume: Resume exec queue execution, exec queue must be in a suspended
216 * state and dma fence returned from most recent suspend call must be
217 * signalled when this function is called.
219 void (*resume)(struct xe_exec_queue *q);
220 /** @reset_status: check exec queue reset status */
221 bool (*reset_status)(struct xe_exec_queue *q);