1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2023 Intel Corporation
6 #ifndef _UAPI_XE_DRM_H_
7 #define _UAPI_XE_DRM_H_
11 #if defined(__cplusplus)
16 * Please note that modifications to all structs defined here are
17 * subject to backwards-compatibility constraints.
18 * Sections in this file are organized as follows:
20 * 2. Extension definition and helper structs
21 * 3. IOCTL's Query structs in the order of the Query's entries.
22 * 4. The rest of IOCTL structs in the order of IOCTL declaration.
26 * DOC: Xe Device Block Diagram
28 * The diagram below represents a high-level simplification of a discrete
29 * GPU supported by the Xe driver. It shows some device components which
30 * are necessary to understand this API, as well as how their relations
31 * to each other. This diagram does not represent real hardware::
33 * ┌──────────────────────────────────────────────────────────────────┐
34 * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
35 * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
36 * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
37 * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
38 * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
39 * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
40 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
41 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
42 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
43 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
44 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
45 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
46 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
47 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
48 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
49 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
50 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
51 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
52 * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
53 * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
54 * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
55 * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
56 * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
57 * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
58 * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
59 * └─────────────────────────────Device0───────┬──────────────────────┘
61 * ───────────────────────┴────────── PCI bus
65 * DOC: Xe uAPI Overview
67 * This section aims to describe the Xe's IOCTL entries, its structs, and other
68 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
71 * List of supported IOCTLs:
72 * - &DRM_IOCTL_XE_DEVICE_QUERY
73 * - &DRM_IOCTL_XE_GEM_CREATE
74 * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
75 * - &DRM_IOCTL_XE_VM_CREATE
76 * - &DRM_IOCTL_XE_VM_DESTROY
77 * - &DRM_IOCTL_XE_VM_BIND
78 * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
79 * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
80 * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
81 * - &DRM_IOCTL_XE_EXEC
82 * - &DRM_IOCTL_XE_WAIT_USER_FENCE
88 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
89 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
90 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
92 #define DRM_XE_DEVICE_QUERY 0x00
93 #define DRM_XE_GEM_CREATE 0x01
94 #define DRM_XE_GEM_MMAP_OFFSET 0x02
95 #define DRM_XE_VM_CREATE 0x03
96 #define DRM_XE_VM_DESTROY 0x04
97 #define DRM_XE_VM_BIND 0x05
98 #define DRM_XE_EXEC_QUEUE_CREATE 0x06
99 #define DRM_XE_EXEC_QUEUE_DESTROY 0x07
100 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
101 #define DRM_XE_EXEC 0x09
102 #define DRM_XE_WAIT_USER_FENCE 0x0a
103 /* Must be kept compact -- no holes */
105 #define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
106 #define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
107 #define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
108 #define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
109 #define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
110 #define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
111 #define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
112 #define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
113 #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
114 #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
115 #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
118 * DOC: Xe IOCTL Extensions
120 * Before detailing the IOCTLs and its structs, it is important to highlight
121 * that every IOCTL in Xe is extensible.
123 * Many interfaces need to grow over time. In most cases we can simply
124 * extend the struct and have userspace pass in more data. Another option,
125 * as demonstrated by Vulkan's approach to providing extensions for forward
126 * and backward compatibility, is to use a list of optional structs to
127 * provide those extra details.
129 * The key advantage to using an extension chain is that it allows us to
130 * redefine the interface more easily than an ever growing struct of
131 * increasing complexity, and for large parts of that interface to be
132 * entirely optional. The downside is more pointer chasing; chasing across
133 * the __user boundary with pointers encapsulated inside u64.
139 * struct drm_xe_user_extension ext3 {
140 * .next_extension = 0, // end
143 * struct drm_xe_user_extension ext2 {
144 * .next_extension = (uintptr_t)&ext3,
147 * struct drm_xe_user_extension ext1 {
148 * .next_extension = (uintptr_t)&ext2,
152 * Typically the struct drm_xe_user_extension would be embedded in some uAPI
153 * struct, and in this case we would feed it the head of the chain(i.e ext1),
154 * which would then apply all of the above extensions.
158 * struct drm_xe_user_extension - Base class for defining a chain of extensions
160 struct drm_xe_user_extension {
164 * Pointer to the next struct drm_xe_user_extension, or zero if the end.
166 __u64 next_extension;
169 * @name: Name of the extension.
171 * Note that the name here is just some integer.
173 * Also note that the name space for this is not global for the whole
174 * driver, but rather its scope/meaning is limited to the specific piece
175 * of uAPI which has embedded the struct drm_xe_user_extension.
182 * All undefined bits must be zero.
188 * struct drm_xe_ext_set_property - Generic set property extension
190 * A generic struct that allows any of the Xe's IOCTL to be extended
191 * with a set_property operation.
193 struct drm_xe_ext_set_property {
194 /** @base: base user extension */
195 struct drm_xe_user_extension base;
197 /** @property: property to set */
203 /** @value: property value */
206 /** @reserved: Reserved */
211 * struct drm_xe_engine_class_instance - instance of an engine class
213 * It is returned as part of the @drm_xe_engine, but it also is used as
214 * the input of engine selection for both @drm_xe_exec_queue_create and
215 * @drm_xe_query_engine_cycles
217 * The @engine_class can be:
218 * - %DRM_XE_ENGINE_CLASS_RENDER
219 * - %DRM_XE_ENGINE_CLASS_COPY
220 * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
221 * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
222 * - %DRM_XE_ENGINE_CLASS_COMPUTE
223 * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
224 * hardware engine class). Used for creating ordered queues of VM
227 struct drm_xe_engine_class_instance {
228 #define DRM_XE_ENGINE_CLASS_RENDER 0
229 #define DRM_XE_ENGINE_CLASS_COPY 1
230 #define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
231 #define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
232 #define DRM_XE_ENGINE_CLASS_COMPUTE 4
233 #define DRM_XE_ENGINE_CLASS_VM_BIND 5
234 /** @engine_class: engine class id */
236 /** @engine_instance: engine instance id */
237 __u16 engine_instance;
238 /** @gt_id: Unique ID of this GT within the PCI Device */
245 * struct drm_xe_engine - describe hardware engine
247 struct drm_xe_engine {
248 /** @instance: The @drm_xe_engine_class_instance */
249 struct drm_xe_engine_class_instance instance;
251 /** @reserved: Reserved */
256 * struct drm_xe_query_engines - describe engines
258 * If a query is made with a struct @drm_xe_device_query where .query
259 * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
260 * struct @drm_xe_query_engines in .data.
262 struct drm_xe_query_engines {
263 /** @num_engines: number of engines returned in @engines */
267 /** @engines: The returned engines for this device */
268 struct drm_xe_engine engines[];
272 * enum drm_xe_memory_class - Supported memory classes.
274 enum drm_xe_memory_class {
275 /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
276 DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
278 * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
279 * represents the memory that is local to the device, which we
280 * call VRAM. Not valid on integrated platforms.
282 DRM_XE_MEM_REGION_CLASS_VRAM
286 * struct drm_xe_mem_region - Describes some region as known to
289 struct drm_xe_mem_region {
291 * @mem_class: The memory class describing this region.
293 * See enum drm_xe_memory_class for supported values.
297 * @instance: The unique ID for this region, which serves as the
298 * index in the placement bitmask used as argument for
299 * &DRM_IOCTL_XE_GEM_CREATE
303 * @min_page_size: Min page-size in bytes for this region.
305 * When the kernel allocates memory for this region, the
306 * underlying pages will be at least @min_page_size in size.
307 * Buffer objects with an allowable placement in this region must be
308 * created with a size aligned to this value.
309 * GPU virtual address mappings of (parts of) buffer objects that
310 * may be placed in this region must also have their GPU virtual
311 * address and range aligned to this value.
312 * Affected IOCTLS will return %-EINVAL if alignment restrictions are
317 * @total_size: The usable size in bytes for this region.
321 * @used: Estimate of the memory used in bytes for this region.
323 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
324 * accounting. Without this the value here will always equal
329 * @cpu_visible_size: How much of this region can be CPU
330 * accessed, in bytes.
332 * This will always be <= @total_size, and the remainder (if
333 * any) will not be CPU accessible. If the CPU accessible part
334 * is smaller than @total_size then this is referred to as a
337 * On systems without small BAR (full BAR), the probed_size will
338 * always equal the @total_size, since all of it will be CPU
341 * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
342 * regions (for other types the value here will always equal
345 __u64 cpu_visible_size;
347 * @cpu_visible_used: Estimate of CPU visible memory used, in
350 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
351 * accounting. Without this the value here will always equal
352 * zero. Note this is only currently tracked for
353 * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
354 * here will always be zero).
356 __u64 cpu_visible_used;
357 /** @reserved: Reserved */
362 * struct drm_xe_query_mem_regions - describe memory regions
364 * If a query is made with a struct drm_xe_device_query where .query
365 * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
366 * struct drm_xe_query_mem_regions in .data.
368 struct drm_xe_query_mem_regions {
369 /** @num_mem_regions: number of memory regions returned in @mem_regions */
370 __u32 num_mem_regions;
373 /** @mem_regions: The returned memory regions for this device */
374 struct drm_xe_mem_region mem_regions[];
378 * struct drm_xe_query_config - describe the device configuration
380 * If a query is made with a struct drm_xe_device_query where .query
381 * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
382 * struct drm_xe_query_config in .data.
384 * The index in @info can be:
385 * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
386 * and the device revision (next 8 bits)
387 * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
388 * configuration, see list below
390 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
392 * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
393 * required by this device, typically SZ_4K or SZ_64K
394 * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
395 * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
396 * available exec queue priority
398 struct drm_xe_query_config {
399 /** @num_params: number of parameters returned in info */
405 #define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
406 #define DRM_XE_QUERY_CONFIG_FLAGS 1
407 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
408 #define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
409 #define DRM_XE_QUERY_CONFIG_VA_BITS 3
410 #define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
411 /** @info: array of elements containing the config info */
416 * struct drm_xe_gt - describe an individual GT.
418 * To be used with drm_xe_query_gt_list, which will return a list with all the
419 * existing GT individual descriptions.
420 * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
421 * implementing graphics and/or media operations.
423 * The index in @type can be:
424 * - %DRM_XE_QUERY_GT_TYPE_MAIN
425 * - %DRM_XE_QUERY_GT_TYPE_MEDIA
428 #define DRM_XE_QUERY_GT_TYPE_MAIN 0
429 #define DRM_XE_QUERY_GT_TYPE_MEDIA 1
430 /** @type: GT type: Main or Media */
432 /** @tile_id: Tile ID where this GT lives (Information only) */
434 /** @gt_id: Unique ID of this GT within the PCI Device */
438 /** @reference_clock: A clock frequency for timestamp */
439 __u32 reference_clock;
441 * @near_mem_regions: Bit mask of instances from
442 * drm_xe_query_mem_regions that are nearest to the current engines
444 * Each index in this mask refers directly to the struct
445 * drm_xe_query_mem_regions' instance, no assumptions should
446 * be made about order. The type of each region is described
447 * by struct drm_xe_query_mem_regions' mem_class.
449 __u64 near_mem_regions;
451 * @far_mem_regions: Bit mask of instances from
452 * drm_xe_query_mem_regions that are far from the engines of this GT.
453 * In general, they have extra indirections when compared to the
454 * @near_mem_regions. For a discrete device this could mean system
455 * memory and memory living in a different tile.
456 * Each index in this mask refers directly to the struct
457 * drm_xe_query_mem_regions' instance, no assumptions should
458 * be made about order. The type of each region is described
459 * by struct drm_xe_query_mem_regions' mem_class.
461 __u64 far_mem_regions;
462 /** @reserved: Reserved */
467 * struct drm_xe_query_gt_list - A list with GT description items.
469 * If a query is made with a struct drm_xe_device_query where .query
470 * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
471 * drm_xe_query_gt_list in .data.
473 struct drm_xe_query_gt_list {
474 /** @num_gt: number of GT items returned in gt_list */
478 /** @gt_list: The GT list returned for this device */
479 struct drm_xe_gt gt_list[];
483 * struct drm_xe_query_topology_mask - describe the topology mask of a GT
485 * This is the hardware topology which reflects the internal physical
486 * structure of the GPU.
488 * If a query is made with a struct drm_xe_device_query where .query
489 * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
490 * struct drm_xe_query_topology_mask in .data.
493 * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
494 * (DSS) available for geometry operations. For example a query response
495 * containing the following in mask:
496 * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
497 * means 32 DSS are available for geometry.
498 * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
499 * (DSS) available for compute operations. For example a query response
500 * containing the following in mask:
501 * ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
502 * means 32 DSS are available for compute.
503 * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
504 * available per Dual Sub Slices (DSS). For example a query response
505 * containing the following in mask:
506 * ``EU_PER_DSS ff ff 00 00 00 00 00 00``
507 * means each DSS has 16 EU.
509 struct drm_xe_query_topology_mask {
510 /** @gt_id: GT ID the mask is associated with */
513 #define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
514 #define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
515 #define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
516 /** @type: type of mask */
519 /** @num_bytes: number of bytes in requested mask */
522 /** @mask: little-endian mask of @num_bytes */
527 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
529 * If a query is made with a struct drm_xe_device_query where .query is equal to
530 * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
531 * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
532 * .data points to this allocated structure.
534 * The query returns the engine cycles, which along with GT's @reference_clock,
535 * can be used to calculate the engine timestamp. In addition the
536 * query returns a set of cpu timestamps that indicate when the command
537 * streamer cycle count was captured.
539 struct drm_xe_query_engine_cycles {
541 * @eci: This is input by the user and is the engine for which command
542 * streamer cycles is queried.
544 struct drm_xe_engine_class_instance eci;
547 * @clockid: This is input by the user and is the reference clock id for
548 * CPU timestamp. For definition, see clock_gettime(2) and
549 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
550 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
554 /** @width: Width of the engine cycle counter in bits. */
558 * @engine_cycles: Engine cycles as read from its register
564 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
565 * reading the engine_cycles register using the reference clockid set by the
571 * @cpu_delta: Time delta in ns captured around reading the lower dword
572 * of the engine_cycles register.
578 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
579 * structure to query device information
581 * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
582 * and sets the value in the query member. This determines the type of
583 * the structure provided by the driver in data, among struct drm_xe_query_*.
586 * - %DRM_XE_DEVICE_QUERY_ENGINES
587 * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
588 * - %DRM_XE_DEVICE_QUERY_CONFIG
589 * - %DRM_XE_DEVICE_QUERY_GT_LIST
590 * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
591 * configuration of the device such as information on slices, memory,
592 * caches, and so on. It is provided as a table of key / value
594 * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
595 * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
597 * If size is set to 0, the driver fills it with the required size for
598 * the requested type of data to query. If size is equal to the required
599 * size, the queried information is copied into data. If size is set to
600 * a value different from 0 and different from the required size, the
601 * IOCTL call returns -EINVAL.
603 * For example the following code snippet allows retrieving and printing
604 * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
608 * struct drm_xe_query_engines *engines;
609 * struct drm_xe_device_query query = {
611 * .query = DRM_XE_DEVICE_QUERY_ENGINES,
615 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
616 * engines = malloc(query.size);
617 * query.data = (uintptr_t)engines;
618 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
619 * for (int i = 0; i < engines->num_engines; i++) {
620 * printf("Engine %d: %s\n", i,
621 * engines->engines[i].instance.engine_class ==
622 * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
623 * engines->engines[i].instance.engine_class ==
624 * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
625 * engines->engines[i].instance.engine_class ==
626 * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
627 * engines->engines[i].instance.engine_class ==
628 * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
629 * engines->engines[i].instance.engine_class ==
630 * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
635 struct drm_xe_device_query {
636 /** @extensions: Pointer to the first extension struct, if any */
639 #define DRM_XE_DEVICE_QUERY_ENGINES 0
640 #define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
641 #define DRM_XE_DEVICE_QUERY_CONFIG 2
642 #define DRM_XE_DEVICE_QUERY_GT_LIST 3
643 #define DRM_XE_DEVICE_QUERY_HWCONFIG 4
644 #define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
645 #define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
646 /** @query: The type of data to query */
649 /** @size: Size of the queried data */
652 /** @data: Queried data is placed here */
655 /** @reserved: Reserved */
660 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
664 * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
665 * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
666 * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
667 * possible placement, ensure that the corresponding VRAM allocation
668 * will always use the CPU accessible part of VRAM. This is important
669 * for small-bar systems (on full-bar systems this gets turned into a
671 * Note1: System memory can be used as an extra placement if the kernel
672 * should spill the allocation to system memory, if space can't be made
673 * available in the CPU accessible part of VRAM (giving the same
674 * behaviour as the i915 interface, see
675 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
676 * Note2: For clear-color CCS surfaces the kernel needs to read the
677 * clear-color value stored in the buffer, and on discrete platforms we
678 * need to use VRAM for display surfaces, therefore the kernel requires
679 * setting this flag for such objects, otherwise an error is thrown on
682 * @cpu_caching supports the following values:
683 * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
684 * caching. On iGPU this can't be used for scanout surfaces. Currently
685 * not allowed for objects placed in VRAM.
686 * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
687 * is uncached. Scanout surfaces should likely use this. All objects
688 * that can be placed in VRAM must use this.
690 struct drm_xe_gem_create {
691 /** @extensions: Pointer to the first extension struct, if any */
695 * @size: Size of the object to be created, must match region
696 * (system or vram) minimum alignment (&min_page_size).
701 * @placement: A mask of memory instances of where BO can be placed.
702 * Each index in this mask refers directly to the struct
703 * drm_xe_query_mem_regions' instance, no assumptions should
704 * be made about order. The type of each region is described
705 * by struct drm_xe_query_mem_regions' mem_class.
709 #define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
710 #define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
711 #define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
713 * @flags: Flags, currently a mask of memory instances of where BO can
719 * @vm_id: Attached VM, if any
721 * If a VM is specified, this BO must:
723 * 1. Only ever be bound to that VM.
724 * 2. Cannot be exported as a PRIME fd.
729 * @handle: Returned handle for the object.
731 * Object handles are nonzero.
735 #define DRM_XE_GEM_CPU_CACHING_WB 1
736 #define DRM_XE_GEM_CPU_CACHING_WC 2
738 * @cpu_caching: The CPU caching mode to select for this object. If
739 * mmaping the object the mode selected here will also be used.
745 /** @reserved: Reserved */
750 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
752 struct drm_xe_gem_mmap_offset {
753 /** @extensions: Pointer to the first extension struct, if any */
756 /** @handle: Handle for the object being mapped. */
759 /** @flags: Must be zero */
762 /** @offset: The fake offset to use for subsequent mmap call */
765 /** @reserved: Reserved */
770 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
773 * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
774 * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
775 * exec submissions to its exec_queues that don't have an upper time
776 * limit on the job execution time. But exec submissions to these
777 * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
778 * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
779 * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
780 * LR VMs can be created in recoverable page-fault mode using
781 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
782 * If that flag is omitted, the UMD can not rely on the slightly
783 * different per-VM overcommit semantics that are enabled by
784 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
785 * still enable recoverable pagefaults if supported by the device.
786 * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
787 * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
788 * demand when accessed, and also allows per-VM overcommit of memory.
789 * The xe driver internally uses recoverable pagefaults to implement
792 struct drm_xe_vm_create {
793 /** @extensions: Pointer to the first extension struct, if any */
796 #define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
797 #define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
798 #define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
802 /** @vm_id: Returned VM ID */
805 /** @reserved: Reserved */
810 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
812 struct drm_xe_vm_destroy {
819 /** @reserved: Reserved */
824 * struct drm_xe_vm_bind_op - run bind operations
827 * - %DRM_XE_VM_BIND_OP_MAP
828 * - %DRM_XE_VM_BIND_OP_UNMAP
829 * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
830 * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
831 * - %DRM_XE_VM_BIND_OP_PREFETCH
833 * and the @flags can be:
834 * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
835 * tables are setup with a special bit which indicates writes are
836 * dropped and all reads return zero. In the future, the NULL flags
837 * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
838 * handle MBZ, and the BO offset MBZ. This flag is intended to
839 * implement VK sparse bindings.
841 struct drm_xe_vm_bind_op {
842 /** @extensions: Pointer to the first extension struct, if any */
846 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
851 * @pat_index: The platform defined @pat_index to use for this mapping.
852 * The index basically maps to some predefined memory attributes,
853 * including things like caching, coherency, compression etc. The exact
854 * meaning of the pat_index is platform specific and defined in the
855 * Bspec and PRMs. When the KMD sets up the binding the index here is
856 * encoded into the ppGTT PTE.
858 * For coherency the @pat_index needs to be at least 1way coherent when
859 * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
860 * will extract the coherency mode from the @pat_index and reject if
861 * there is a mismatch (see note below for pre-MTL platforms).
863 * Note: On pre-MTL platforms there is only a caching mode and no
864 * explicit coherency mode, but on such hardware there is always a
865 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
866 * CPU caches even with the caching mode set as uncached. It's only the
867 * display engine that is incoherent (on dgpu it must be in VRAM which
868 * is always mapped as WC on the CPU). However to keep the uapi somewhat
869 * consistent with newer platforms the KMD groups the different cache
870 * levels into the following coherency buckets on all pre-MTL platforms:
872 * ppGTT UC -> COH_NONE
873 * ppGTT WC -> COH_NONE
874 * ppGTT WT -> COH_NONE
875 * ppGTT WB -> COH_AT_LEAST_1WAY
877 * In practice UC/WC/WT should only ever used for scanout surfaces on
878 * such platforms (or perhaps in general for dma-buf if shared with
879 * another device) since it is only the display engine that is actually
880 * incoherent. Everything else should typically use WB given that we
881 * have a shared-LLC. On MTL+ this completely changes and the HW
882 * defines the coherency mode as part of the @pat_index, where
883 * incoherent GT access is possible.
885 * Note: For userptr and externally imported dma-buf the kernel expects
886 * either 1WAY or 2WAY for the @pat_index.
888 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
889 * on the @pat_index. For such mappings there is no actual memory being
890 * mapped (the address in the PTE is invalid), so the various PAT memory
891 * attributes likely do not apply. Simply leaving as zero is one
892 * option (still a valid pat_index).
901 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
906 /** @userptr: user pointer to bind on */
911 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
915 /** @addr: Address to operate on, MBZ for UNMAP_ALL */
918 #define DRM_XE_VM_BIND_OP_MAP 0x0
919 #define DRM_XE_VM_BIND_OP_UNMAP 0x1
920 #define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
921 #define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
922 #define DRM_XE_VM_BIND_OP_PREFETCH 0x4
923 /** @op: Bind operation to perform */
926 #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
927 #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
928 /** @flags: Bind flags */
932 * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
933 * It is a region instance, not a mask.
934 * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
936 __u32 prefetch_mem_region_instance;
941 /** @reserved: Reserved */
946 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
948 * Below is an example of a minimal use of @drm_xe_vm_bind to
949 * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
950 * illustrate `userptr`. It can be synchronized by using the example
951 * provided for @drm_xe_sync.
955 * data = aligned_alloc(ALIGNMENT, BO_SIZE);
956 * struct drm_xe_vm_bind bind = {
960 * .bind.obj_offset = to_user_pointer(data),
961 * .bind.range = BO_SIZE,
962 * .bind.addr = BIND_ADDRESS,
963 * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
967 * .exec_queue_id = 0,
969 * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
972 struct drm_xe_vm_bind {
973 /** @extensions: Pointer to the first extension struct, if any */
976 /** @vm_id: The ID of the VM to bind to */
980 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
981 * and exec queue must have same vm_id. If zero, the default VM bind engine
989 /** @num_binds: number of binds in this IOCTL */
993 /** @bind: used if num_binds == 1 */
994 struct drm_xe_vm_bind_op bind;
997 * @vector_of_binds: userptr to array of struct
998 * drm_xe_vm_bind_op if num_binds > 1
1000 __u64 vector_of_binds;
1006 /** @num_syncs: amount of syncs to wait on */
1009 /** @syncs: pointer to struct drm_xe_sync array */
1012 /** @reserved: Reserved */
1017 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1019 * The example below shows how to use @drm_xe_exec_queue_create to create
1020 * a simple exec_queue (no parallel submission) of class
1021 * &DRM_XE_ENGINE_CLASS_RENDER.
1025 * struct drm_xe_engine_class_instance instance = {
1026 * .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
1028 * struct drm_xe_exec_queue_create exec_queue_create = {
1031 * .num_bb_per_exec = 1,
1032 * .num_eng_per_bb = 1,
1033 * .instances = to_user_pointer(&instance),
1035 * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
1038 struct drm_xe_exec_queue_create {
1039 #define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
1040 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
1041 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
1043 /** @extensions: Pointer to the first extension struct, if any */
1046 /** @width: submission width (number BB per exec) for this exec queue */
1049 /** @num_placements: number of valid placements for this exec queue */
1050 __u16 num_placements;
1052 /** @vm_id: VM to use for this exec queue */
1058 /** @exec_queue_id: Returned exec queue ID */
1059 __u32 exec_queue_id;
1062 * @instances: user pointer to a 2-d array of struct
1063 * drm_xe_engine_class_instance
1065 * length = width (i) * num_placements (j)
1066 * index = j + i * width
1070 /** @reserved: Reserved */
1075 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1077 struct drm_xe_exec_queue_destroy {
1078 /** @exec_queue_id: Exec queue ID */
1079 __u32 exec_queue_id;
1084 /** @reserved: Reserved */
1089 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1091 * The @property can be:
1092 * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1094 struct drm_xe_exec_queue_get_property {
1095 /** @extensions: Pointer to the first extension struct, if any */
1098 /** @exec_queue_id: Exec queue ID */
1099 __u32 exec_queue_id;
1101 #define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
1102 /** @property: property to get */
1105 /** @value: property value */
1108 /** @reserved: Reserved */
1113 * struct drm_xe_sync - sync object
1116 * - %DRM_XE_SYNC_TYPE_SYNCOBJ
1117 * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
1118 * - %DRM_XE_SYNC_TYPE_USER_FENCE
1120 * and the @flags can be:
1121 * - %DRM_XE_SYNC_FLAG_SIGNAL
1123 * A minimal use of @drm_xe_sync looks like this:
1127 * struct drm_xe_sync sync = {
1128 * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
1129 * .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
1131 * struct drm_syncobj_create syncobj_create = { 0 };
1132 * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
1133 * sync.handle = syncobj_create.handle;
1135 * use of &sync in drm_xe_exec or drm_xe_vm_bind
1137 * struct drm_syncobj_wait wait = {
1138 * .handles = &sync.handle,
1139 * .timeout_nsec = INT64_MAX,
1140 * .count_handles = 1,
1142 * .first_signaled = 0,
1145 * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
1147 struct drm_xe_sync {
1148 /** @extensions: Pointer to the first extension struct, if any */
1151 #define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
1152 #define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
1153 #define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
1154 /** @type: Type of the this sync object */
1157 #define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
1158 /** @flags: Sync Flags */
1162 /** @handle: Handle for the object */
1166 * @addr: Address of user fence. When sync is passed in via exec
1167 * IOCTL this is a GPU address in the VM. When sync passed in via
1168 * VM bind IOCTL this is a user pointer. In either case, it is
1169 * the users responsibility that this address is present and
1170 * mapped when the user fence is signalled. Must be qword
1177 * @timeline_value: Input for the timeline sync object. Needs to be
1178 * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
1180 __u64 timeline_value;
1182 /** @reserved: Reserved */
1187 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1189 * This is an example to use @drm_xe_exec for execution of the object
1190 * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
1191 * (see example in @drm_xe_exec_queue_create). It can be synchronized
1192 * by using the example provided for @drm_xe_sync.
1196 * struct drm_xe_exec exec = {
1197 * .exec_queue_id = exec_queue,
1200 * .address = BIND_ADDRESS,
1201 * .num_batch_buffer = 1,
1203 * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
1206 struct drm_xe_exec {
1207 /** @extensions: Pointer to the first extension struct, if any */
1210 /** @exec_queue_id: Exec queue ID for the batch buffer */
1211 __u32 exec_queue_id;
1213 /** @num_syncs: Amount of struct drm_xe_sync in array. */
1216 /** @syncs: Pointer to struct drm_xe_sync array. */
1220 * @address: address of batch buffer if num_batch_buffer == 1 or an
1221 * array of batch buffer addresses
1226 * @num_batch_buffer: number of batch buffer in this exec, must match
1227 * the width of the engine
1229 __u16 num_batch_buffer;
1234 /** @reserved: Reserved */
1239 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1241 * Wait on user fence, XE will wake-up on every HW engine interrupt in the
1242 * instances list and check if user fence is complete::
1244 * (*addr & MASK) OP (VALUE & MASK)
1246 * Returns to user on user fence completion or timeout.
1249 * - %DRM_XE_UFENCE_WAIT_OP_EQ
1250 * - %DRM_XE_UFENCE_WAIT_OP_NEQ
1251 * - %DRM_XE_UFENCE_WAIT_OP_GT
1252 * - %DRM_XE_UFENCE_WAIT_OP_GTE
1253 * - %DRM_XE_UFENCE_WAIT_OP_LT
1254 * - %DRM_XE_UFENCE_WAIT_OP_LTE
1256 * and the @flags can be:
1257 * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
1258 * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
1260 * The @mask values can be for example:
1263 * - 0xffffffffu for u32
1264 * - 0xffffffffffffffffu for u64
1266 struct drm_xe_wait_user_fence {
1267 /** @extensions: Pointer to the first extension struct, if any */
1271 * @addr: user pointer address to wait on, must qword aligned
1275 #define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
1276 #define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
1277 #define DRM_XE_UFENCE_WAIT_OP_GT 0x2
1278 #define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
1279 #define DRM_XE_UFENCE_WAIT_OP_LT 0x4
1280 #define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
1281 /** @op: wait operation (type of comparison) */
1284 #define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
1285 /** @flags: wait flags */
1291 /** @value: compare value */
1294 /** @mask: comparison mask */
1298 * @timeout: how long to wait before bailing, value in nanoseconds.
1299 * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
1300 * it contains timeout expressed in nanoseconds to wait (fence will
1301 * expire at now() + timeout).
1302 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
1303 * will end at timeout (uses system MONOTONIC_CLOCK).
1304 * Passing negative timeout leads to neverending wait.
1306 * On relative timeout this value is updated with timeout left
1307 * (for restarting the call in case of signal delivery).
1308 * On absolute timeout this value stays intact (restarted call still
1309 * expire at the same point of time).
1313 /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
1314 __u32 exec_queue_id;
1319 /** @reserved: Reserved */
1323 #if defined(__cplusplus)
1327 #endif /* _UAPI_XE_DRM_H_ */