1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/prime_numbers.h>
7 #include <linux/sort.h>
9 #include <drm/drm_buddy.h>
11 #include "../i915_selftest.h"
14 #include "mock_gem_device.h"
15 #include "mock_region.h"
17 #include "gem/i915_gem_context.h"
18 #include "gem/i915_gem_lmem.h"
19 #include "gem/i915_gem_region.h"
20 #include "gem/selftests/igt_gem_utils.h"
21 #include "gem/selftests/mock_context.h"
22 #include "gt/intel_engine_pm.h"
23 #include "gt/intel_engine_user.h"
24 #include "gt/intel_gt.h"
25 #include "gt/intel_migrate.h"
26 #include "i915_memcpy.h"
27 #include "i915_ttm_buddy_manager.h"
28 #include "selftests/igt_flush_test.h"
29 #include "selftests/i915_random.h"
31 static void close_objects(struct intel_memory_region *mem,
32 struct list_head *objects)
34 struct drm_i915_private *i915 = mem->i915;
35 struct drm_i915_gem_object *obj, *on;
37 list_for_each_entry_safe(obj, on, objects, st_link) {
38 i915_gem_object_lock(obj, NULL);
39 if (i915_gem_object_has_pinned_pages(obj))
40 i915_gem_object_unpin_pages(obj);
41 /* No polluting the memory region between tests */
42 __i915_gem_object_put_pages(obj);
43 i915_gem_object_unlock(obj);
44 list_del(&obj->st_link);
45 i915_gem_object_put(obj);
50 i915_gem_drain_freed_objects(i915);
53 static int igt_mock_fill(void *arg)
55 struct intel_memory_region *mem = arg;
56 resource_size_t total = resource_size(&mem->region);
57 resource_size_t page_size;
59 unsigned long max_pages;
60 unsigned long page_num;
64 page_size = PAGE_SIZE;
65 max_pages = div64_u64(total, page_size);
68 for_each_prime_number_from(page_num, 1, max_pages) {
69 resource_size_t size = page_num * page_size;
70 struct drm_i915_gem_object *obj;
72 obj = i915_gem_object_create_region(mem, size, 0, 0);
78 err = i915_gem_object_pin_pages_unlocked(obj);
80 i915_gem_object_put(obj);
84 list_add(&obj->st_link, &objects);
91 if (page_num * page_size <= rem) {
92 pr_err("%s failed, space still left in region\n",
100 close_objects(mem, &objects);
105 static struct drm_i915_gem_object *
106 igt_object_create(struct intel_memory_region *mem,
107 struct list_head *objects,
111 struct drm_i915_gem_object *obj;
114 obj = i915_gem_object_create_region(mem, size, 0, flags);
118 err = i915_gem_object_pin_pages_unlocked(obj);
122 list_add(&obj->st_link, objects);
126 i915_gem_object_put(obj);
130 static void igt_object_release(struct drm_i915_gem_object *obj)
132 i915_gem_object_lock(obj, NULL);
133 i915_gem_object_unpin_pages(obj);
134 __i915_gem_object_put_pages(obj);
135 i915_gem_object_unlock(obj);
136 list_del(&obj->st_link);
137 i915_gem_object_put(obj);
140 static bool is_contiguous(struct drm_i915_gem_object *obj)
142 struct scatterlist *sg;
143 dma_addr_t addr = -1;
145 for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
146 if (addr != -1 && sg_dma_address(sg) != addr)
149 addr = sg_dma_address(sg) + sg_dma_len(sg);
155 static int igt_mock_reserve(void *arg)
157 struct intel_memory_region *mem = arg;
158 struct drm_i915_private *i915 = mem->i915;
159 resource_size_t avail = resource_size(&mem->region);
160 struct drm_i915_gem_object *obj;
161 const u32 chunk_size = SZ_32M;
162 u32 i, offset, count, *order;
163 u64 allocated, cur_avail;
164 I915_RND_STATE(prng);
168 count = avail / chunk_size;
169 order = i915_random_order(count, &prng);
173 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
175 pr_err("failed to create memory region\n");
180 /* Reserve a bunch of ranges within the region */
181 for (i = 0; i < count; ++i) {
182 u64 start = order[i] * chunk_size;
183 u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
185 /* Allow for some really big holes */
189 size = round_up(size, PAGE_SIZE);
190 offset = igt_random_offset(&prng, 0, chunk_size, size,
193 err = intel_memory_region_reserve(mem, start + offset, size);
195 pr_err("%s failed to reserve range", __func__);
199 /* XXX: maybe sanity check the block range here? */
203 /* Try to see if we can allocate from the remaining space */
207 u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
209 size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
210 obj = igt_object_create(mem, &objects, size, 0);
212 if (PTR_ERR(obj) == -ENXIO)
222 if (allocated != avail) {
223 pr_err("%s mismatch between allocation and free space", __func__);
228 close_objects(mem, &objects);
229 intel_memory_region_destroy(mem);
235 static int igt_mock_contiguous(void *arg)
237 struct intel_memory_region *mem = arg;
238 struct drm_i915_gem_object *obj;
239 unsigned long n_objects;
242 I915_RND_STATE(prng);
243 resource_size_t total;
248 total = resource_size(&mem->region);
251 obj = igt_object_create(mem, &objects, PAGE_SIZE,
252 I915_BO_ALLOC_CONTIGUOUS);
256 if (!is_contiguous(obj)) {
257 pr_err("%s min object spans disjoint sg entries\n", __func__);
259 goto err_close_objects;
262 igt_object_release(obj);
265 obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
269 if (!is_contiguous(obj)) {
270 pr_err("%s max object spans disjoint sg entries\n", __func__);
272 goto err_close_objects;
275 igt_object_release(obj);
277 /* Internal fragmentation should not bleed into the object size */
278 target = i915_prandom_u64_state(&prng);
279 div64_u64_rem(target, total, &target);
280 target = round_up(target, PAGE_SIZE);
281 target = max_t(u64, PAGE_SIZE, target);
283 obj = igt_object_create(mem, &objects, target,
284 I915_BO_ALLOC_CONTIGUOUS);
288 if (obj->base.size != target) {
289 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
290 obj->base.size, target);
292 goto err_close_objects;
295 if (!is_contiguous(obj)) {
296 pr_err("%s object spans disjoint sg entries\n", __func__);
298 goto err_close_objects;
301 igt_object_release(obj);
304 * Try to fragment the address space, such that half of it is free, but
305 * the max contiguous block size is SZ_64K.
309 n_objects = div64_u64(total, target);
311 while (n_objects--) {
312 struct list_head *list;
319 obj = igt_object_create(mem, list, target,
320 I915_BO_ALLOC_CONTIGUOUS);
323 goto err_close_objects;
327 close_objects(mem, &holes);
332 /* Make sure we can still allocate all the fragmented space */
333 obj = igt_object_create(mem, &objects, target, 0);
336 goto err_close_objects;
339 igt_object_release(obj);
342 * Even though we have enough free space, we don't have a big enough
343 * contiguous block. Make sure that holds true.
347 bool should_fail = target > min;
349 obj = igt_object_create(mem, &objects, target,
350 I915_BO_ALLOC_CONTIGUOUS);
351 if (should_fail != IS_ERR(obj)) {
352 pr_err("%s target allocation(%llx) mismatch\n",
355 goto err_close_objects;
359 } while (target >= PAGE_SIZE);
362 list_splice_tail(&holes, &objects);
363 close_objects(mem, &objects);
367 static int igt_mock_splintered_region(void *arg)
369 struct intel_memory_region *mem = arg;
370 struct drm_i915_private *i915 = mem->i915;
371 struct i915_ttm_buddy_resource *res;
372 struct drm_i915_gem_object *obj;
373 struct drm_buddy *mm;
374 unsigned int expected_order;
380 * Sanity check we can still allocate everything even if the
381 * mm.max_order != mm.size. i.e our starting address space size is not a
385 size = (SZ_4G - 1) & PAGE_MASK;
386 mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
390 obj = igt_object_create(mem, &objects, size, 0);
396 res = to_ttm_buddy_resource(obj->mm.res);
398 if (mm->size != size) {
399 pr_err("%s size mismatch(%llu != %llu)\n",
400 __func__, mm->size, size);
405 expected_order = get_order(rounddown_pow_of_two(size));
406 if (mm->max_order != expected_order) {
407 pr_err("%s order mismatch(%u != %u)\n",
408 __func__, mm->max_order, expected_order);
413 close_objects(mem, &objects);
416 * While we should be able allocate everything without any flag
417 * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
418 * actually limited to the largest power-of-two for the region size i.e
419 * max_order, due to the inner workings of the buddy allocator. So make
420 * sure that does indeed hold true.
423 obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
425 pr_err("%s too large contiguous allocation was not rejected\n",
431 obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
432 I915_BO_ALLOC_CONTIGUOUS);
434 pr_err("%s largest possible contiguous allocation failed\n",
441 close_objects(mem, &objects);
443 intel_memory_region_destroy(mem);
448 #define SZ_8G BIT_ULL(33)
451 static int igt_mock_max_segment(void *arg)
453 const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);
454 struct intel_memory_region *mem = arg;
455 struct drm_i915_private *i915 = mem->i915;
456 struct i915_ttm_buddy_resource *res;
457 struct drm_i915_gem_object *obj;
458 struct drm_buddy_block *block;
459 struct drm_buddy *mm;
460 struct list_head *blocks;
461 struct scatterlist *sg;
467 * While we may create very large contiguous blocks, we may need
468 * to break those down for consumption elsewhere. In particular,
469 * dma-mapping with scatterlist elements have an implicit limit of
470 * UINT_MAX on each element.
474 mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
478 obj = igt_object_create(mem, &objects, size, 0);
484 res = to_ttm_buddy_resource(obj->mm.res);
485 blocks = &res->blocks;
488 list_for_each_entry(block, blocks, link) {
489 if (drm_buddy_block_size(mm, block) > size)
490 size = drm_buddy_block_size(mm, block);
492 if (size < max_segment) {
493 pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
494 __func__, max_segment, size);
499 for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
500 if (sg->length > max_segment) {
501 pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
502 __func__, sg->length, max_segment);
509 close_objects(mem, &objects);
511 intel_memory_region_destroy(mem);
515 static int igt_gpu_write_dw(struct intel_context *ce,
516 struct i915_vma *vma,
520 return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
521 vma->size >> PAGE_SHIFT, value);
524 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
526 unsigned long n = obj->base.size >> PAGE_SHIFT;
530 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
534 ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
541 pr_err("base[%u]=%08x, val=%08x\n",
547 ptr += PAGE_SIZE / sizeof(*ptr);
550 i915_gem_object_unpin_map(obj);
554 static int igt_gpu_write(struct i915_gem_context *ctx,
555 struct drm_i915_gem_object *obj)
557 struct i915_gem_engines *engines;
558 struct i915_gem_engines_iter it;
559 struct i915_address_space *vm;
560 struct intel_context *ce;
561 I915_RND_STATE(prng);
562 IGT_TIMEOUT(end_time);
564 struct i915_vma *vma;
569 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
573 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
575 if (!intel_engine_can_store_dword(ce->engine))
581 i915_gem_context_unlock_engines(ctx);
585 order = i915_random_order(count * count, &prng);
589 vma = i915_vma_instance(obj, vm, NULL);
595 err = i915_vma_pin(vma, 0, 0, PIN_USER);
600 engines = i915_gem_context_lock_engines(ctx);
602 u32 rng = prandom_u32_state(&prng);
603 u32 dword = offset_in_page(rng) / 4;
605 ce = engines->engines[order[i] % engines->num_engines];
606 i = (i + 1) % (count * count);
607 if (!ce || !intel_engine_can_store_dword(ce->engine))
610 err = igt_gpu_write_dw(ce, vma, dword, rng);
614 i915_gem_object_lock(obj, NULL);
615 err = igt_cpu_check(obj, dword, rng);
616 i915_gem_object_unlock(obj);
619 } while (!__igt_timeout(end_time, NULL));
620 i915_gem_context_unlock_engines(ctx);
631 static int igt_lmem_create(void *arg)
633 struct drm_i915_private *i915 = arg;
634 struct drm_i915_gem_object *obj;
637 obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
641 err = i915_gem_object_pin_pages_unlocked(obj);
645 i915_gem_object_unpin_pages(obj);
647 i915_gem_object_put(obj);
652 static int igt_lmem_create_with_ps(void *arg)
654 struct drm_i915_private *i915 = arg;
658 for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
659 struct drm_i915_gem_object *obj;
662 obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
665 if (err == -ENXIO || err == -E2BIG) {
666 pr_info("%s not enough lmem for ps(%u) err=%d\n",
674 if (obj->base.size != ps) {
675 pr_err("%s size(%zu) != ps(%u)\n",
676 __func__, obj->base.size, ps);
681 i915_gem_object_lock(obj, NULL);
682 err = i915_gem_object_pin_pages(obj);
686 daddr = i915_gem_object_get_dma_address(obj, 0);
687 if (!IS_ALIGNED(daddr, ps)) {
688 pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
689 __func__, &daddr, ps);
695 i915_gem_object_unpin_pages(obj);
696 __i915_gem_object_put_pages(obj);
698 i915_gem_object_unlock(obj);
699 i915_gem_object_put(obj);
708 static int igt_lmem_create_cleared_cpu(void *arg)
710 struct drm_i915_private *i915 = arg;
711 I915_RND_STATE(prng);
712 IGT_TIMEOUT(end_time);
716 i915_gem_drain_freed_objects(i915);
718 size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
719 size = round_up(size, PAGE_SIZE);
723 struct drm_i915_gem_object *obj;
729 * Alternate between cleared and uncleared allocations, while
730 * also dirtying the pages each time to check that the pages are
731 * always cleared if requested, since we should get some overlap
732 * of the underlying pages, if not all, since we are the only
736 flags = I915_BO_ALLOC_CPU_CLEAR;
740 obj = i915_gem_object_create_lmem(i915, size, flags);
744 i915_gem_object_lock(obj, NULL);
745 err = i915_gem_object_pin_pages(obj);
749 dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
752 if (flags & I915_BO_ALLOC_CPU_CLEAR) {
753 err = igt_cpu_check(obj, dword, 0);
755 pr_err("%s failed with size=%u, flags=%u\n",
756 __func__, size, flags);
761 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
763 err = PTR_ERR(vaddr);
767 val = prandom_u32_state(&prng);
769 memset32(vaddr, val, obj->base.size / sizeof(u32));
771 i915_gem_object_flush_map(obj);
772 i915_gem_object_unpin_map(obj);
774 i915_gem_object_unpin_pages(obj);
775 __i915_gem_object_put_pages(obj);
777 i915_gem_object_unlock(obj);
778 i915_gem_object_put(obj);
783 } while (!__igt_timeout(end_time, NULL));
785 pr_info("%s completed (%u) iterations\n", __func__, i);
790 static int igt_lmem_write_gpu(void *arg)
792 struct drm_i915_private *i915 = arg;
793 struct drm_i915_gem_object *obj;
794 struct i915_gem_context *ctx;
796 I915_RND_STATE(prng);
800 file = mock_file(i915);
802 return PTR_ERR(file);
804 ctx = live_context(i915, file);
810 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
812 obj = i915_gem_object_create_lmem(i915, sz, 0);
818 err = i915_gem_object_pin_pages_unlocked(obj);
822 err = igt_gpu_write(ctx, obj);
824 pr_err("igt_gpu_write failed(%d)\n", err);
826 i915_gem_object_unpin_pages(obj);
828 i915_gem_object_put(obj);
834 static struct intel_engine_cs *
835 random_engine_class(struct drm_i915_private *i915,
837 struct rnd_state *prng)
839 struct intel_engine_cs *engine;
843 for (engine = intel_engine_lookup_user(i915, class, 0);
844 engine && engine->uabi_class == class;
845 engine = rb_entry_safe(rb_next(&engine->uabi_node),
846 typeof(*engine), uabi_node))
849 count = i915_prandom_u32_max_state(count, prng);
850 return intel_engine_lookup_user(i915, class, count);
853 static int igt_lmem_write_cpu(void *arg)
855 struct drm_i915_private *i915 = arg;
856 struct drm_i915_gem_object *obj;
857 I915_RND_STATE(prng);
858 IGT_TIMEOUT(end_time);
860 0, /* rng placeholder */
865 PAGE_SIZE - sizeof(u32),
866 PAGE_SIZE - sizeof(u64),
869 struct intel_engine_cs *engine;
870 struct i915_request *rq;
878 engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
882 pr_info("%s: using %s\n", __func__, engine->name);
884 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
885 sz = max_t(u32, 2 * PAGE_SIZE, sz);
887 obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
891 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
893 err = PTR_ERR(vaddr);
897 i915_gem_object_lock(obj, NULL);
898 /* Put the pages into a known state -- from the gpu for added fun */
899 intel_engine_pm_get(engine);
900 err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
901 obj->mm.pages->sgl, I915_CACHE_NONE,
902 true, 0xdeadbeaf, &rq);
904 dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
905 i915_request_put(rq);
908 intel_engine_pm_put(engine);
910 err = i915_gem_object_set_to_wc_domain(obj, true);
911 i915_gem_object_unlock(obj);
915 count = ARRAY_SIZE(bytes);
916 order = i915_random_order(count * count, &prng);
922 /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
923 bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
924 GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
934 size = bytes[order[i] % count];
935 i = (i + 1) % (count * count);
937 align = bytes[order[i] % count];
938 i = (i + 1) % (count * count);
940 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
942 offset = igt_random_offset(&prng, 0, obj->base.size,
945 val = prandom_u32_state(&prng);
946 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
950 * Sample random dw -- don't waste precious time reading every
953 dword = igt_random_offset(&prng, offset,
955 sizeof(u32), sizeof(u32));
956 dword /= sizeof(u32);
957 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
958 pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
959 __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
960 size, align, offset);
964 } while (!__igt_timeout(end_time, NULL));
967 i915_gem_object_unpin_map(obj);
969 i915_gem_object_put(obj);
974 static const char *repr_type(u32 type)
986 static struct drm_i915_gem_object *
987 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
990 struct drm_i915_gem_object *obj;
993 obj = i915_gem_object_create_region(mr, size, 0, 0);
995 if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
996 return ERR_PTR(-ENODEV);
1000 addr = i915_gem_object_pin_map_unlocked(obj, type);
1002 i915_gem_object_put(obj);
1003 if (PTR_ERR(addr) == -ENXIO)
1004 return ERR_PTR(-ENODEV);
1012 static int wrap_ktime_compare(const void *A, const void *B)
1014 const ktime_t *a = A, *b = B;
1016 return ktime_compare(*a, *b);
1019 static void igt_memcpy_long(void *dst, const void *src, size_t size)
1021 unsigned long *tmp = dst;
1022 const unsigned long *s = src;
1024 size = size / sizeof(unsigned long);
1029 static inline void igt_memcpy(void *dst, const void *src, size_t size)
1031 memcpy(dst, src, size);
1034 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
1036 i915_memcpy_from_wc(dst, src, size);
1039 static int _perf_memcpy(struct intel_memory_region *src_mr,
1040 struct intel_memory_region *dst_mr,
1041 u64 size, u32 src_type, u32 dst_type)
1043 struct drm_i915_private *i915 = src_mr->i915;
1046 void (*copy)(void *dst, const void *src, size_t size);
1060 !i915_has_memcpy_from_wc(),
1063 struct drm_i915_gem_object *src, *dst;
1064 void *src_addr, *dst_addr;
1068 src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
1074 dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
1080 for (i = 0; i < ARRAY_SIZE(tests); ++i) {
1087 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
1092 tests[i].copy(dst_addr, src_addr, size);
1095 t[pass] = ktime_sub(t1, t0);
1098 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
1100 /* ignore the impossible to protect our sanity */
1101 pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
1103 src_mr->name, repr_type(src_type),
1104 dst_mr->name, repr_type(dst_type),
1105 tests[i].name, size >> 10,
1110 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
1112 src_mr->name, repr_type(src_type),
1113 dst_mr->name, repr_type(dst_type),
1114 tests[i].name, size >> 10,
1115 div64_u64(mul_u32_u32(4 * size,
1116 1000 * 1000 * 1000),
1117 t[1] + 2 * t[2] + t[3]) >> 20);
1122 i915_gem_object_unpin_map(dst);
1123 i915_gem_object_put(dst);
1125 i915_gem_object_unpin_map(src);
1126 i915_gem_object_put(src);
1128 i915_gem_drain_freed_objects(i915);
1136 static int perf_memcpy(void *arg)
1138 struct drm_i915_private *i915 = arg;
1139 static const u32 types[] = {
1143 static const u32 sizes[] = {
1148 struct intel_memory_region *src_mr, *dst_mr;
1153 for_each_memory_region(src_mr, i915, src_id) {
1154 for_each_memory_region(dst_mr, i915, dst_id) {
1155 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1156 for (j = 0; j < ARRAY_SIZE(types); ++j) {
1157 for (k = 0; k < ARRAY_SIZE(types); ++k) {
1158 ret = _perf_memcpy(src_mr,
1174 int intel_memory_region_mock_selftests(void)
1176 static const struct i915_subtest tests[] = {
1177 SUBTEST(igt_mock_reserve),
1178 SUBTEST(igt_mock_fill),
1179 SUBTEST(igt_mock_contiguous),
1180 SUBTEST(igt_mock_splintered_region),
1181 SUBTEST(igt_mock_max_segment),
1183 struct intel_memory_region *mem;
1184 struct drm_i915_private *i915;
1187 i915 = mock_gem_device();
1191 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
1193 pr_err("failed to create memory region\n");
1198 err = i915_subtests(tests, mem);
1200 intel_memory_region_destroy(mem);
1202 mock_destroy_device(i915);
1206 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
1208 static const struct i915_subtest tests[] = {
1209 SUBTEST(igt_lmem_create),
1210 SUBTEST(igt_lmem_create_with_ps),
1211 SUBTEST(igt_lmem_create_cleared_cpu),
1212 SUBTEST(igt_lmem_write_cpu),
1213 SUBTEST(igt_lmem_write_gpu),
1216 if (!HAS_LMEM(i915)) {
1217 pr_info("device lacks LMEM support, skipping\n");
1221 if (intel_gt_is_wedged(to_gt(i915)))
1224 return i915_live_subtests(tests, i915);
1227 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
1229 static const struct i915_subtest tests[] = {
1230 SUBTEST(perf_memcpy),
1233 if (intel_gt_is_wedged(to_gt(i915)))
1236 return i915_live_subtests(tests, i915);