1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
8 #include <linux/nospec.h>
9 #include <linux/sched/clock.h>
11 #include <drm/ttm/ttm_placement.h>
12 #include <drm/xe_drm.h>
14 #include "regs/xe_engine_regs.h"
16 #include "xe_device.h"
17 #include "xe_exec_queue.h"
20 #include "xe_guc_hwconfig.h"
21 #include "xe_macros.h"
23 #include "xe_ttm_vram_mgr.h"
25 static const u16 xe_to_user_engine_class[] = {
26 [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
27 [XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY,
28 [XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE,
29 [XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE,
30 [XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE,
33 static const enum xe_engine_class user_to_xe_engine_class[] = {
34 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
35 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
36 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
37 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
38 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
41 static size_t calc_hw_engine_info_size(struct xe_device *xe)
43 struct xe_hw_engine *hwe;
44 enum xe_hw_engine_id id;
49 for_each_gt(gt, xe, gt_id)
50 for_each_hw_engine(hwe, gt, id) {
51 if (xe_hw_engine_is_reserved(hwe))
56 return sizeof(struct drm_xe_query_engines) +
57 i * sizeof(struct drm_xe_engine);
60 typedef u64 (*__ktime_func_t)(void);
61 static __ktime_func_t __clock_id_to_func(clockid_t clk_id)
64 * Use logic same as the perf subsystem to allow user to select the
65 * reference clock id to be used for timestamps.
70 case CLOCK_MONOTONIC_RAW:
71 return &ktime_get_raw_ns;
73 return &ktime_get_real_ns;
75 return &ktime_get_boottime_ns;
77 return &ktime_get_clocktai_ns;
84 __read_timestamps(struct xe_gt *gt,
85 struct xe_reg lower_reg,
86 struct xe_reg upper_reg,
90 __ktime_func_t cpu_clock)
92 u32 upper, lower, old_upper, loop = 0;
94 upper = xe_mmio_read32(gt, upper_reg);
96 *cpu_delta = local_clock();
97 *cpu_ts = cpu_clock();
98 lower = xe_mmio_read32(gt, lower_reg);
99 *cpu_delta = local_clock() - *cpu_delta;
101 upper = xe_mmio_read32(gt, upper_reg);
102 } while (upper != old_upper && loop++ < 2);
104 *engine_ts = (u64)upper << 32 | lower;
108 query_engine_cycles(struct xe_device *xe,
109 struct drm_xe_device_query *query)
111 struct drm_xe_query_engine_cycles __user *query_ptr;
112 struct drm_xe_engine_class_instance *eci;
113 struct drm_xe_query_engine_cycles resp;
114 size_t size = sizeof(resp);
115 __ktime_func_t cpu_clock;
116 struct xe_hw_engine *hwe;
119 if (query->size == 0) {
122 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
126 query_ptr = u64_to_user_ptr(query->data);
127 if (copy_from_user(&resp, query_ptr, size))
130 cpu_clock = __clock_id_to_func(resp.clockid);
135 if (eci->gt_id > XE_MAX_GT_PER_TILE)
138 gt = xe_device_get_gt(xe, eci->gt_id);
142 if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
145 hwe = xe_gt_hw_engine(gt, user_to_xe_engine_class[eci->engine_class],
146 eci->engine_instance, true);
150 xe_device_mem_access_get(xe);
151 xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
153 __read_timestamps(gt,
154 RING_TIMESTAMP(hwe->mmio_base),
155 RING_TIMESTAMP_UDW(hwe->mmio_base),
161 xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
162 xe_device_mem_access_put(xe);
165 /* Only write to the output fields of user query */
166 if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp))
169 if (put_user(resp.cpu_delta, &query_ptr->cpu_delta))
172 if (put_user(resp.engine_cycles, &query_ptr->engine_cycles))
175 if (put_user(resp.width, &query_ptr->width))
181 static int query_engines(struct xe_device *xe,
182 struct drm_xe_device_query *query)
184 size_t size = calc_hw_engine_info_size(xe);
185 struct drm_xe_query_engines __user *query_ptr =
186 u64_to_user_ptr(query->data);
187 struct drm_xe_query_engines *engines;
188 struct xe_hw_engine *hwe;
189 enum xe_hw_engine_id id;
194 if (query->size == 0) {
197 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
201 engines = kmalloc(size, GFP_KERNEL);
205 for_each_gt(gt, xe, gt_id)
206 for_each_hw_engine(hwe, gt, id) {
207 if (xe_hw_engine_is_reserved(hwe))
210 engines->engines[i].instance.engine_class =
211 xe_to_user_engine_class[hwe->class];
212 engines->engines[i].instance.engine_instance =
213 hwe->logical_instance;
214 engines->engines[i].instance.gt_id = gt->info.id;
215 engines->engines[i].instance.pad = 0;
216 memset(engines->engines[i].reserved, 0,
217 sizeof(engines->engines[i].reserved));
223 engines->num_engines = i;
225 if (copy_to_user(query_ptr, engines, size)) {
234 static size_t calc_mem_regions_size(struct xe_device *xe)
236 u32 num_managers = 1;
239 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i)
240 if (ttm_manager_type(&xe->ttm, i))
243 return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]);
246 static int query_mem_regions(struct xe_device *xe,
247 struct drm_xe_device_query *query)
249 size_t size = calc_mem_regions_size(xe);
250 struct drm_xe_query_mem_regions *mem_regions;
251 struct drm_xe_query_mem_regions __user *query_ptr =
252 u64_to_user_ptr(query->data);
253 struct ttm_resource_manager *man;
256 if (query->size == 0) {
259 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
263 mem_regions = kzalloc(size, GFP_KERNEL);
264 if (XE_IOCTL_DBG(xe, !mem_regions))
267 man = ttm_manager_type(&xe->ttm, XE_PL_TT);
268 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM;
270 * The instance needs to be a unique number that represents the index
271 * in the placement mask used at xe_gem_create_ioctl() for the
272 * xe_bo_create() placement.
274 mem_regions->mem_regions[0].instance = 0;
275 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
276 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
277 if (perfmon_capable())
278 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
279 mem_regions->num_mem_regions = 1;
281 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
282 man = ttm_manager_type(&xe->ttm, i);
284 mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class =
285 DRM_XE_MEM_REGION_CLASS_VRAM;
286 mem_regions->mem_regions[mem_regions->num_mem_regions].instance =
287 mem_regions->num_mem_regions;
288 mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size =
289 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ?
291 mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
294 if (perfmon_capable()) {
295 xe_ttm_vram_get_used(man,
296 &mem_regions->mem_regions
297 [mem_regions->num_mem_regions].used,
298 &mem_regions->mem_regions
299 [mem_regions->num_mem_regions].cpu_visible_used);
302 mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
303 xe_ttm_vram_get_cpu_visible_size(man);
304 mem_regions->num_mem_regions++;
308 if (!copy_to_user(query_ptr, mem_regions, size))
317 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
319 const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
321 sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
322 struct drm_xe_query_config __user *query_ptr =
323 u64_to_user_ptr(query->data);
324 struct drm_xe_query_config *config;
326 if (query->size == 0) {
329 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
333 config = kzalloc(size, GFP_KERNEL);
337 config->num_params = num_params;
338 config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
339 xe->info.devid | (xe->info.revid << 16);
340 if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
341 config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
342 DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
343 config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
344 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
345 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
346 config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
347 xe_exec_queue_device_get_max_priority(xe);
349 if (copy_to_user(query_ptr, config, size)) {
358 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query)
361 size_t size = sizeof(struct drm_xe_query_gt_list) +
362 xe->info.gt_count * sizeof(struct drm_xe_gt);
363 struct drm_xe_query_gt_list __user *query_ptr =
364 u64_to_user_ptr(query->data);
365 struct drm_xe_query_gt_list *gt_list;
368 if (query->size == 0) {
371 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
375 gt_list = kzalloc(size, GFP_KERNEL);
379 gt_list->num_gt = xe->info.gt_count;
381 for_each_gt(gt, xe, id) {
382 if (xe_gt_is_media_type(gt))
383 gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
385 gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
386 gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id;
387 gt_list->gt_list[id].gt_id = gt->info.id;
388 gt_list->gt_list[id].reference_clock = gt->info.reference_clock;
390 * The mem_regions indexes in the mask below need to
391 * directly identify the struct
392 * drm_xe_query_mem_regions' instance constructed at
393 * query_mem_regions()
395 * For our current platforms:
396 * Bit 0 -> System Memory
397 * Bit 1 -> VRAM0 on Tile0
398 * Bit 2 -> VRAM1 on Tile1
399 * However the uAPI is generic and it's userspace's
400 * responsibility to check the mem_class, without any
404 gt_list->gt_list[id].near_mem_regions = 0x1;
406 gt_list->gt_list[id].near_mem_regions =
407 BIT(gt_to_tile(gt)->id) << 1;
408 gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
409 gt_list->gt_list[id].near_mem_regions;
412 if (copy_to_user(query_ptr, gt_list, size)) {
421 static int query_hwconfig(struct xe_device *xe,
422 struct drm_xe_device_query *query)
424 struct xe_gt *gt = xe_root_mmio_gt(xe);
425 size_t size = xe_guc_hwconfig_size(>->uc.guc);
426 void __user *query_ptr = u64_to_user_ptr(query->data);
429 if (query->size == 0) {
432 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
436 hwconfig = kzalloc(size, GFP_KERNEL);
440 xe_device_mem_access_get(xe);
441 xe_guc_hwconfig_copy(>->uc.guc, hwconfig);
442 xe_device_mem_access_put(xe);
444 if (copy_to_user(query_ptr, hwconfig, size)) {
453 static size_t calc_topo_query_size(struct xe_device *xe)
455 return xe->info.gt_count *
456 (3 * sizeof(struct drm_xe_query_topology_mask) +
457 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
458 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
459 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
462 static void __user *copy_mask(void __user *ptr,
463 struct drm_xe_query_topology_mask *topo,
464 void *mask, size_t mask_size)
466 topo->num_bytes = mask_size;
468 if (copy_to_user(ptr, topo, sizeof(*topo)))
469 return ERR_PTR(-EFAULT);
472 if (copy_to_user(ptr, mask, mask_size))
473 return ERR_PTR(-EFAULT);
479 static int query_gt_topology(struct xe_device *xe,
480 struct drm_xe_device_query *query)
482 void __user *query_ptr = u64_to_user_ptr(query->data);
483 size_t size = calc_topo_query_size(xe);
484 struct drm_xe_query_topology_mask topo;
488 if (query->size == 0) {
491 } else if (XE_IOCTL_DBG(xe, query->size != size)) {
495 for_each_gt(gt, xe, id) {
498 topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
499 query_ptr = copy_mask(query_ptr, &topo,
500 gt->fuse_topo.g_dss_mask,
501 sizeof(gt->fuse_topo.g_dss_mask));
502 if (IS_ERR(query_ptr))
503 return PTR_ERR(query_ptr);
505 topo.type = DRM_XE_TOPO_DSS_COMPUTE;
506 query_ptr = copy_mask(query_ptr, &topo,
507 gt->fuse_topo.c_dss_mask,
508 sizeof(gt->fuse_topo.c_dss_mask));
509 if (IS_ERR(query_ptr))
510 return PTR_ERR(query_ptr);
512 topo.type = DRM_XE_TOPO_EU_PER_DSS;
513 query_ptr = copy_mask(query_ptr, &topo,
514 gt->fuse_topo.eu_mask_per_dss,
515 sizeof(gt->fuse_topo.eu_mask_per_dss));
516 if (IS_ERR(query_ptr))
517 return PTR_ERR(query_ptr);
523 static int (* const xe_query_funcs[])(struct xe_device *xe,
524 struct drm_xe_device_query *query) = {
534 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
536 struct xe_device *xe = to_xe_device(dev);
537 struct drm_xe_device_query *query = data;
540 if (XE_IOCTL_DBG(xe, query->extensions) ||
541 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1]))
544 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs)))
547 idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
548 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx]))
551 return xe_query_funcs[idx](xe, query);