Merge tag 'drm-misc-next-2022-01-27' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / selftests / intel_memory_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <linux/prime_numbers.h>
7 #include <linux/sort.h>
8
9 #include <drm/drm_buddy.h>
10
11 #include "../i915_selftest.h"
12
13 #include "mock_drm.h"
14 #include "mock_gem_device.h"
15 #include "mock_region.h"
16
17 #include "gem/i915_gem_context.h"
18 #include "gem/i915_gem_lmem.h"
19 #include "gem/i915_gem_region.h"
20 #include "gem/selftests/igt_gem_utils.h"
21 #include "gem/selftests/mock_context.h"
22 #include "gt/intel_engine_pm.h"
23 #include "gt/intel_engine_user.h"
24 #include "gt/intel_gt.h"
25 #include "gt/intel_migrate.h"
26 #include "i915_memcpy.h"
27 #include "i915_ttm_buddy_manager.h"
28 #include "selftests/igt_flush_test.h"
29 #include "selftests/i915_random.h"
30
31 static void close_objects(struct intel_memory_region *mem,
32                           struct list_head *objects)
33 {
34         struct drm_i915_private *i915 = mem->i915;
35         struct drm_i915_gem_object *obj, *on;
36
37         list_for_each_entry_safe(obj, on, objects, st_link) {
38                 i915_gem_object_lock(obj, NULL);
39                 if (i915_gem_object_has_pinned_pages(obj))
40                         i915_gem_object_unpin_pages(obj);
41                 /* No polluting the memory region between tests */
42                 __i915_gem_object_put_pages(obj);
43                 i915_gem_object_unlock(obj);
44                 list_del(&obj->st_link);
45                 i915_gem_object_put(obj);
46         }
47
48         cond_resched();
49
50         i915_gem_drain_freed_objects(i915);
51 }
52
53 static int igt_mock_fill(void *arg)
54 {
55         struct intel_memory_region *mem = arg;
56         resource_size_t total = resource_size(&mem->region);
57         resource_size_t page_size;
58         resource_size_t rem;
59         unsigned long max_pages;
60         unsigned long page_num;
61         LIST_HEAD(objects);
62         int err = 0;
63
64         page_size = PAGE_SIZE;
65         max_pages = div64_u64(total, page_size);
66         rem = total;
67
68         for_each_prime_number_from(page_num, 1, max_pages) {
69                 resource_size_t size = page_num * page_size;
70                 struct drm_i915_gem_object *obj;
71
72                 obj = i915_gem_object_create_region(mem, size, 0, 0);
73                 if (IS_ERR(obj)) {
74                         err = PTR_ERR(obj);
75                         break;
76                 }
77
78                 err = i915_gem_object_pin_pages_unlocked(obj);
79                 if (err) {
80                         i915_gem_object_put(obj);
81                         break;
82                 }
83
84                 list_add(&obj->st_link, &objects);
85                 rem -= size;
86         }
87
88         if (err == -ENOMEM)
89                 err = 0;
90         if (err == -ENXIO) {
91                 if (page_num * page_size <= rem) {
92                         pr_err("%s failed, space still left in region\n",
93                                __func__);
94                         err = -EINVAL;
95                 } else {
96                         err = 0;
97                 }
98         }
99
100         close_objects(mem, &objects);
101
102         return err;
103 }
104
105 static struct drm_i915_gem_object *
106 igt_object_create(struct intel_memory_region *mem,
107                   struct list_head *objects,
108                   u64 size,
109                   unsigned int flags)
110 {
111         struct drm_i915_gem_object *obj;
112         int err;
113
114         obj = i915_gem_object_create_region(mem, size, 0, flags);
115         if (IS_ERR(obj))
116                 return obj;
117
118         err = i915_gem_object_pin_pages_unlocked(obj);
119         if (err)
120                 goto put;
121
122         list_add(&obj->st_link, objects);
123         return obj;
124
125 put:
126         i915_gem_object_put(obj);
127         return ERR_PTR(err);
128 }
129
130 static void igt_object_release(struct drm_i915_gem_object *obj)
131 {
132         i915_gem_object_lock(obj, NULL);
133         i915_gem_object_unpin_pages(obj);
134         __i915_gem_object_put_pages(obj);
135         i915_gem_object_unlock(obj);
136         list_del(&obj->st_link);
137         i915_gem_object_put(obj);
138 }
139
140 static bool is_contiguous(struct drm_i915_gem_object *obj)
141 {
142         struct scatterlist *sg;
143         dma_addr_t addr = -1;
144
145         for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
146                 if (addr != -1 && sg_dma_address(sg) != addr)
147                         return false;
148
149                 addr = sg_dma_address(sg) + sg_dma_len(sg);
150         }
151
152         return true;
153 }
154
155 static int igt_mock_reserve(void *arg)
156 {
157         struct intel_memory_region *mem = arg;
158         struct drm_i915_private *i915 = mem->i915;
159         resource_size_t avail = resource_size(&mem->region);
160         struct drm_i915_gem_object *obj;
161         const u32 chunk_size = SZ_32M;
162         u32 i, offset, count, *order;
163         u64 allocated, cur_avail;
164         I915_RND_STATE(prng);
165         LIST_HEAD(objects);
166         int err = 0;
167
168         count = avail / chunk_size;
169         order = i915_random_order(count, &prng);
170         if (!order)
171                 return 0;
172
173         mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
174         if (IS_ERR(mem)) {
175                 pr_err("failed to create memory region\n");
176                 err = PTR_ERR(mem);
177                 goto out_free_order;
178         }
179
180         /* Reserve a bunch of ranges within the region */
181         for (i = 0; i < count; ++i) {
182                 u64 start = order[i] * chunk_size;
183                 u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
184
185                 /* Allow for some really big holes */
186                 if (!size)
187                         continue;
188
189                 size = round_up(size, PAGE_SIZE);
190                 offset = igt_random_offset(&prng, 0, chunk_size, size,
191                                            PAGE_SIZE);
192
193                 err = intel_memory_region_reserve(mem, start + offset, size);
194                 if (err) {
195                         pr_err("%s failed to reserve range", __func__);
196                         goto out_close;
197                 }
198
199                 /* XXX: maybe sanity check the block range here? */
200                 avail -= size;
201         }
202
203         /* Try to see if we can allocate from the remaining space */
204         allocated = 0;
205         cur_avail = avail;
206         do {
207                 u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
208
209                 size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
210                 obj = igt_object_create(mem, &objects, size, 0);
211                 if (IS_ERR(obj)) {
212                         if (PTR_ERR(obj) == -ENXIO)
213                                 break;
214
215                         err = PTR_ERR(obj);
216                         goto out_close;
217                 }
218                 cur_avail -= size;
219                 allocated += size;
220         } while (1);
221
222         if (allocated != avail) {
223                 pr_err("%s mismatch between allocation and free space", __func__);
224                 err = -EINVAL;
225         }
226
227 out_close:
228         close_objects(mem, &objects);
229         intel_memory_region_destroy(mem);
230 out_free_order:
231         kfree(order);
232         return err;
233 }
234
235 static int igt_mock_contiguous(void *arg)
236 {
237         struct intel_memory_region *mem = arg;
238         struct drm_i915_gem_object *obj;
239         unsigned long n_objects;
240         LIST_HEAD(objects);
241         LIST_HEAD(holes);
242         I915_RND_STATE(prng);
243         resource_size_t total;
244         resource_size_t min;
245         u64 target;
246         int err = 0;
247
248         total = resource_size(&mem->region);
249
250         /* Min size */
251         obj = igt_object_create(mem, &objects, PAGE_SIZE,
252                                 I915_BO_ALLOC_CONTIGUOUS);
253         if (IS_ERR(obj))
254                 return PTR_ERR(obj);
255
256         if (!is_contiguous(obj)) {
257                 pr_err("%s min object spans disjoint sg entries\n", __func__);
258                 err = -EINVAL;
259                 goto err_close_objects;
260         }
261
262         igt_object_release(obj);
263
264         /* Max size */
265         obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
266         if (IS_ERR(obj))
267                 return PTR_ERR(obj);
268
269         if (!is_contiguous(obj)) {
270                 pr_err("%s max object spans disjoint sg entries\n", __func__);
271                 err = -EINVAL;
272                 goto err_close_objects;
273         }
274
275         igt_object_release(obj);
276
277         /* Internal fragmentation should not bleed into the object size */
278         target = i915_prandom_u64_state(&prng);
279         div64_u64_rem(target, total, &target);
280         target = round_up(target, PAGE_SIZE);
281         target = max_t(u64, PAGE_SIZE, target);
282
283         obj = igt_object_create(mem, &objects, target,
284                                 I915_BO_ALLOC_CONTIGUOUS);
285         if (IS_ERR(obj))
286                 return PTR_ERR(obj);
287
288         if (obj->base.size != target) {
289                 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
290                        obj->base.size, target);
291                 err = -EINVAL;
292                 goto err_close_objects;
293         }
294
295         if (!is_contiguous(obj)) {
296                 pr_err("%s object spans disjoint sg entries\n", __func__);
297                 err = -EINVAL;
298                 goto err_close_objects;
299         }
300
301         igt_object_release(obj);
302
303         /*
304          * Try to fragment the address space, such that half of it is free, but
305          * the max contiguous block size is SZ_64K.
306          */
307
308         target = SZ_64K;
309         n_objects = div64_u64(total, target);
310
311         while (n_objects--) {
312                 struct list_head *list;
313
314                 if (n_objects % 2)
315                         list = &holes;
316                 else
317                         list = &objects;
318
319                 obj = igt_object_create(mem, list, target,
320                                         I915_BO_ALLOC_CONTIGUOUS);
321                 if (IS_ERR(obj)) {
322                         err = PTR_ERR(obj);
323                         goto err_close_objects;
324                 }
325         }
326
327         close_objects(mem, &holes);
328
329         min = target;
330         target = total >> 1;
331
332         /* Make sure we can still allocate all the fragmented space */
333         obj = igt_object_create(mem, &objects, target, 0);
334         if (IS_ERR(obj)) {
335                 err = PTR_ERR(obj);
336                 goto err_close_objects;
337         }
338
339         igt_object_release(obj);
340
341         /*
342          * Even though we have enough free space, we don't have a big enough
343          * contiguous block. Make sure that holds true.
344          */
345
346         do {
347                 bool should_fail = target > min;
348
349                 obj = igt_object_create(mem, &objects, target,
350                                         I915_BO_ALLOC_CONTIGUOUS);
351                 if (should_fail != IS_ERR(obj)) {
352                         pr_err("%s target allocation(%llx) mismatch\n",
353                                __func__, target);
354                         err = -EINVAL;
355                         goto err_close_objects;
356                 }
357
358                 target >>= 1;
359         } while (target >= PAGE_SIZE);
360
361 err_close_objects:
362         list_splice_tail(&holes, &objects);
363         close_objects(mem, &objects);
364         return err;
365 }
366
367 static int igt_mock_splintered_region(void *arg)
368 {
369         struct intel_memory_region *mem = arg;
370         struct drm_i915_private *i915 = mem->i915;
371         struct i915_ttm_buddy_resource *res;
372         struct drm_i915_gem_object *obj;
373         struct drm_buddy *mm;
374         unsigned int expected_order;
375         LIST_HEAD(objects);
376         u64 size;
377         int err = 0;
378
379         /*
380          * Sanity check we can still allocate everything even if the
381          * mm.max_order != mm.size. i.e our starting address space size is not a
382          * power-of-two.
383          */
384
385         size = (SZ_4G - 1) & PAGE_MASK;
386         mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
387         if (IS_ERR(mem))
388                 return PTR_ERR(mem);
389
390         obj = igt_object_create(mem, &objects, size, 0);
391         if (IS_ERR(obj)) {
392                 err = PTR_ERR(obj);
393                 goto out_close;
394         }
395
396         res = to_ttm_buddy_resource(obj->mm.res);
397         mm = res->mm;
398         if (mm->size != size) {
399                 pr_err("%s size mismatch(%llu != %llu)\n",
400                        __func__, mm->size, size);
401                 err = -EINVAL;
402                 goto out_put;
403         }
404
405         expected_order = get_order(rounddown_pow_of_two(size));
406         if (mm->max_order != expected_order) {
407                 pr_err("%s order mismatch(%u != %u)\n",
408                        __func__, mm->max_order, expected_order);
409                 err = -EINVAL;
410                 goto out_put;
411         }
412
413         close_objects(mem, &objects);
414
415         /*
416          * While we should be able allocate everything without any flag
417          * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
418          * actually limited to the largest power-of-two for the region size i.e
419          * max_order, due to the inner workings of the buddy allocator. So make
420          * sure that does indeed hold true.
421          */
422
423         obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
424         if (!IS_ERR(obj)) {
425                 pr_err("%s too large contiguous allocation was not rejected\n",
426                        __func__);
427                 err = -EINVAL;
428                 goto out_close;
429         }
430
431         obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
432                                 I915_BO_ALLOC_CONTIGUOUS);
433         if (IS_ERR(obj)) {
434                 pr_err("%s largest possible contiguous allocation failed\n",
435                        __func__);
436                 err = PTR_ERR(obj);
437                 goto out_close;
438         }
439
440 out_close:
441         close_objects(mem, &objects);
442 out_put:
443         intel_memory_region_destroy(mem);
444         return err;
445 }
446
447 #ifndef SZ_8G
448 #define SZ_8G BIT_ULL(33)
449 #endif
450
451 static int igt_mock_max_segment(void *arg)
452 {
453         const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);
454         struct intel_memory_region *mem = arg;
455         struct drm_i915_private *i915 = mem->i915;
456         struct i915_ttm_buddy_resource *res;
457         struct drm_i915_gem_object *obj;
458         struct drm_buddy_block *block;
459         struct drm_buddy *mm;
460         struct list_head *blocks;
461         struct scatterlist *sg;
462         LIST_HEAD(objects);
463         u64 size;
464         int err = 0;
465
466         /*
467          * While we may create very large contiguous blocks, we may need
468          * to break those down for consumption elsewhere. In particular,
469          * dma-mapping with scatterlist elements have an implicit limit of
470          * UINT_MAX on each element.
471          */
472
473         size = SZ_8G;
474         mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
475         if (IS_ERR(mem))
476                 return PTR_ERR(mem);
477
478         obj = igt_object_create(mem, &objects, size, 0);
479         if (IS_ERR(obj)) {
480                 err = PTR_ERR(obj);
481                 goto out_put;
482         }
483
484         res = to_ttm_buddy_resource(obj->mm.res);
485         blocks = &res->blocks;
486         mm = res->mm;
487         size = 0;
488         list_for_each_entry(block, blocks, link) {
489                 if (drm_buddy_block_size(mm, block) > size)
490                         size = drm_buddy_block_size(mm, block);
491         }
492         if (size < max_segment) {
493                 pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
494                        __func__, max_segment, size);
495                 err = -EINVAL;
496                 goto out_close;
497         }
498
499         for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
500                 if (sg->length > max_segment) {
501                         pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
502                                __func__, sg->length, max_segment);
503                         err = -EINVAL;
504                         goto out_close;
505                 }
506         }
507
508 out_close:
509         close_objects(mem, &objects);
510 out_put:
511         intel_memory_region_destroy(mem);
512         return err;
513 }
514
515 static int igt_gpu_write_dw(struct intel_context *ce,
516                             struct i915_vma *vma,
517                             u32 dword,
518                             u32 value)
519 {
520         return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
521                                vma->size >> PAGE_SHIFT, value);
522 }
523
524 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
525 {
526         unsigned long n = obj->base.size >> PAGE_SHIFT;
527         u32 *ptr;
528         int err;
529
530         err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
531         if (err)
532                 return err;
533
534         ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
535         if (IS_ERR(ptr))
536                 return PTR_ERR(ptr);
537
538         ptr += dword;
539         while (n--) {
540                 if (*ptr != val) {
541                         pr_err("base[%u]=%08x, val=%08x\n",
542                                dword, *ptr, val);
543                         err = -EINVAL;
544                         break;
545                 }
546
547                 ptr += PAGE_SIZE / sizeof(*ptr);
548         }
549
550         i915_gem_object_unpin_map(obj);
551         return err;
552 }
553
554 static int igt_gpu_write(struct i915_gem_context *ctx,
555                          struct drm_i915_gem_object *obj)
556 {
557         struct i915_gem_engines *engines;
558         struct i915_gem_engines_iter it;
559         struct i915_address_space *vm;
560         struct intel_context *ce;
561         I915_RND_STATE(prng);
562         IGT_TIMEOUT(end_time);
563         unsigned int count;
564         struct i915_vma *vma;
565         int *order;
566         int i, n;
567         int err = 0;
568
569         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
570
571         n = 0;
572         count = 0;
573         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
574                 count++;
575                 if (!intel_engine_can_store_dword(ce->engine))
576                         continue;
577
578                 vm = ce->vm;
579                 n++;
580         }
581         i915_gem_context_unlock_engines(ctx);
582         if (!n)
583                 return 0;
584
585         order = i915_random_order(count * count, &prng);
586         if (!order)
587                 return -ENOMEM;
588
589         vma = i915_vma_instance(obj, vm, NULL);
590         if (IS_ERR(vma)) {
591                 err = PTR_ERR(vma);
592                 goto out_free;
593         }
594
595         err = i915_vma_pin(vma, 0, 0, PIN_USER);
596         if (err)
597                 goto out_free;
598
599         i = 0;
600         engines = i915_gem_context_lock_engines(ctx);
601         do {
602                 u32 rng = prandom_u32_state(&prng);
603                 u32 dword = offset_in_page(rng) / 4;
604
605                 ce = engines->engines[order[i] % engines->num_engines];
606                 i = (i + 1) % (count * count);
607                 if (!ce || !intel_engine_can_store_dword(ce->engine))
608                         continue;
609
610                 err = igt_gpu_write_dw(ce, vma, dword, rng);
611                 if (err)
612                         break;
613
614                 i915_gem_object_lock(obj, NULL);
615                 err = igt_cpu_check(obj, dword, rng);
616                 i915_gem_object_unlock(obj);
617                 if (err)
618                         break;
619         } while (!__igt_timeout(end_time, NULL));
620         i915_gem_context_unlock_engines(ctx);
621
622 out_free:
623         kfree(order);
624
625         if (err == -ENOMEM)
626                 err = 0;
627
628         return err;
629 }
630
631 static int igt_lmem_create(void *arg)
632 {
633         struct drm_i915_private *i915 = arg;
634         struct drm_i915_gem_object *obj;
635         int err = 0;
636
637         obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
638         if (IS_ERR(obj))
639                 return PTR_ERR(obj);
640
641         err = i915_gem_object_pin_pages_unlocked(obj);
642         if (err)
643                 goto out_put;
644
645         i915_gem_object_unpin_pages(obj);
646 out_put:
647         i915_gem_object_put(obj);
648
649         return err;
650 }
651
652 static int igt_lmem_create_with_ps(void *arg)
653 {
654         struct drm_i915_private *i915 = arg;
655         int err = 0;
656         u32 ps;
657
658         for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
659                 struct drm_i915_gem_object *obj;
660                 dma_addr_t daddr;
661
662                 obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
663                 if (IS_ERR(obj)) {
664                         err = PTR_ERR(obj);
665                         if (err == -ENXIO || err == -E2BIG) {
666                                 pr_info("%s not enough lmem for ps(%u) err=%d\n",
667                                         __func__, ps, err);
668                                 err = 0;
669                         }
670
671                         break;
672                 }
673
674                 if (obj->base.size != ps) {
675                         pr_err("%s size(%zu) != ps(%u)\n",
676                                __func__, obj->base.size, ps);
677                         err = -EINVAL;
678                         goto out_put;
679                 }
680
681                 i915_gem_object_lock(obj, NULL);
682                 err = i915_gem_object_pin_pages(obj);
683                 if (err)
684                         goto out_put;
685
686                 daddr = i915_gem_object_get_dma_address(obj, 0);
687                 if (!IS_ALIGNED(daddr, ps)) {
688                         pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
689                                __func__, &daddr, ps);
690                         err = -EINVAL;
691                         goto out_unpin;
692                 }
693
694 out_unpin:
695                 i915_gem_object_unpin_pages(obj);
696                 __i915_gem_object_put_pages(obj);
697 out_put:
698                 i915_gem_object_unlock(obj);
699                 i915_gem_object_put(obj);
700
701                 if (err)
702                         break;
703         }
704
705         return err;
706 }
707
708 static int igt_lmem_create_cleared_cpu(void *arg)
709 {
710         struct drm_i915_private *i915 = arg;
711         I915_RND_STATE(prng);
712         IGT_TIMEOUT(end_time);
713         u32 size, i;
714         int err;
715
716         i915_gem_drain_freed_objects(i915);
717
718         size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
719         size = round_up(size, PAGE_SIZE);
720         i = 0;
721
722         do {
723                 struct drm_i915_gem_object *obj;
724                 unsigned int flags;
725                 u32 dword, val;
726                 void *vaddr;
727
728                 /*
729                  * Alternate between cleared and uncleared allocations, while
730                  * also dirtying the pages each time to check that the pages are
731                  * always cleared if requested, since we should get some overlap
732                  * of the underlying pages, if not all, since we are the only
733                  * user.
734                  */
735
736                 flags = I915_BO_ALLOC_CPU_CLEAR;
737                 if (i & 1)
738                         flags = 0;
739
740                 obj = i915_gem_object_create_lmem(i915, size, flags);
741                 if (IS_ERR(obj))
742                         return PTR_ERR(obj);
743
744                 i915_gem_object_lock(obj, NULL);
745                 err = i915_gem_object_pin_pages(obj);
746                 if (err)
747                         goto out_put;
748
749                 dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
750                                                    &prng);
751
752                 if (flags & I915_BO_ALLOC_CPU_CLEAR) {
753                         err = igt_cpu_check(obj, dword, 0);
754                         if (err) {
755                                 pr_err("%s failed with size=%u, flags=%u\n",
756                                        __func__, size, flags);
757                                 goto out_unpin;
758                         }
759                 }
760
761                 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
762                 if (IS_ERR(vaddr)) {
763                         err = PTR_ERR(vaddr);
764                         goto out_unpin;
765                 }
766
767                 val = prandom_u32_state(&prng);
768
769                 memset32(vaddr, val, obj->base.size / sizeof(u32));
770
771                 i915_gem_object_flush_map(obj);
772                 i915_gem_object_unpin_map(obj);
773 out_unpin:
774                 i915_gem_object_unpin_pages(obj);
775                 __i915_gem_object_put_pages(obj);
776 out_put:
777                 i915_gem_object_unlock(obj);
778                 i915_gem_object_put(obj);
779
780                 if (err)
781                         break;
782                 ++i;
783         } while (!__igt_timeout(end_time, NULL));
784
785         pr_info("%s completed (%u) iterations\n", __func__, i);
786
787         return err;
788 }
789
790 static int igt_lmem_write_gpu(void *arg)
791 {
792         struct drm_i915_private *i915 = arg;
793         struct drm_i915_gem_object *obj;
794         struct i915_gem_context *ctx;
795         struct file *file;
796         I915_RND_STATE(prng);
797         u32 sz;
798         int err;
799
800         file = mock_file(i915);
801         if (IS_ERR(file))
802                 return PTR_ERR(file);
803
804         ctx = live_context(i915, file);
805         if (IS_ERR(ctx)) {
806                 err = PTR_ERR(ctx);
807                 goto out_file;
808         }
809
810         sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
811
812         obj = i915_gem_object_create_lmem(i915, sz, 0);
813         if (IS_ERR(obj)) {
814                 err = PTR_ERR(obj);
815                 goto out_file;
816         }
817
818         err = i915_gem_object_pin_pages_unlocked(obj);
819         if (err)
820                 goto out_put;
821
822         err = igt_gpu_write(ctx, obj);
823         if (err)
824                 pr_err("igt_gpu_write failed(%d)\n", err);
825
826         i915_gem_object_unpin_pages(obj);
827 out_put:
828         i915_gem_object_put(obj);
829 out_file:
830         fput(file);
831         return err;
832 }
833
834 static struct intel_engine_cs *
835 random_engine_class(struct drm_i915_private *i915,
836                     unsigned int class,
837                     struct rnd_state *prng)
838 {
839         struct intel_engine_cs *engine;
840         unsigned int count;
841
842         count = 0;
843         for (engine = intel_engine_lookup_user(i915, class, 0);
844              engine && engine->uabi_class == class;
845              engine = rb_entry_safe(rb_next(&engine->uabi_node),
846                                     typeof(*engine), uabi_node))
847                 count++;
848
849         count = i915_prandom_u32_max_state(count, prng);
850         return intel_engine_lookup_user(i915, class, count);
851 }
852
853 static int igt_lmem_write_cpu(void *arg)
854 {
855         struct drm_i915_private *i915 = arg;
856         struct drm_i915_gem_object *obj;
857         I915_RND_STATE(prng);
858         IGT_TIMEOUT(end_time);
859         u32 bytes[] = {
860                 0, /* rng placeholder */
861                 sizeof(u32),
862                 sizeof(u64),
863                 64, /* cl */
864                 PAGE_SIZE,
865                 PAGE_SIZE - sizeof(u32),
866                 PAGE_SIZE - sizeof(u64),
867                 PAGE_SIZE - 64,
868         };
869         struct intel_engine_cs *engine;
870         struct i915_request *rq;
871         u32 *vaddr;
872         u32 sz;
873         u32 i;
874         int *order;
875         int count;
876         int err;
877
878         engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
879         if (!engine)
880                 return 0;
881
882         pr_info("%s: using %s\n", __func__, engine->name);
883
884         sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
885         sz = max_t(u32, 2 * PAGE_SIZE, sz);
886
887         obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
888         if (IS_ERR(obj))
889                 return PTR_ERR(obj);
890
891         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
892         if (IS_ERR(vaddr)) {
893                 err = PTR_ERR(vaddr);
894                 goto out_put;
895         }
896
897         i915_gem_object_lock(obj, NULL);
898         /* Put the pages into a known state -- from the gpu for added fun */
899         intel_engine_pm_get(engine);
900         err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
901                                           obj->mm.pages->sgl, I915_CACHE_NONE,
902                                           true, 0xdeadbeaf, &rq);
903         if (rq) {
904                 dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
905                 i915_request_put(rq);
906         }
907
908         intel_engine_pm_put(engine);
909         if (!err)
910                 err = i915_gem_object_set_to_wc_domain(obj, true);
911         i915_gem_object_unlock(obj);
912         if (err)
913                 goto out_unpin;
914
915         count = ARRAY_SIZE(bytes);
916         order = i915_random_order(count * count, &prng);
917         if (!order) {
918                 err = -ENOMEM;
919                 goto out_unpin;
920         }
921
922         /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
923         bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
924         GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
925
926         i = 0;
927         do {
928                 u32 offset;
929                 u32 align;
930                 u32 dword;
931                 u32 size;
932                 u32 val;
933
934                 size = bytes[order[i] % count];
935                 i = (i + 1) % (count * count);
936
937                 align = bytes[order[i] % count];
938                 i = (i + 1) % (count * count);
939
940                 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
941
942                 offset = igt_random_offset(&prng, 0, obj->base.size,
943                                            size, align);
944
945                 val = prandom_u32_state(&prng);
946                 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
947                          size / sizeof(u32));
948
949                 /*
950                  * Sample random dw -- don't waste precious time reading every
951                  * single dw.
952                  */
953                 dword = igt_random_offset(&prng, offset,
954                                           offset + size,
955                                           sizeof(u32), sizeof(u32));
956                 dword /= sizeof(u32);
957                 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
958                         pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
959                                __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
960                                size, align, offset);
961                         err = -EINVAL;
962                         break;
963                 }
964         } while (!__igt_timeout(end_time, NULL));
965
966 out_unpin:
967         i915_gem_object_unpin_map(obj);
968 out_put:
969         i915_gem_object_put(obj);
970
971         return err;
972 }
973
974 static const char *repr_type(u32 type)
975 {
976         switch (type) {
977         case I915_MAP_WB:
978                 return "WB";
979         case I915_MAP_WC:
980                 return "WC";
981         }
982
983         return "";
984 }
985
986 static struct drm_i915_gem_object *
987 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
988                           void **out_addr)
989 {
990         struct drm_i915_gem_object *obj;
991         void *addr;
992
993         obj = i915_gem_object_create_region(mr, size, 0, 0);
994         if (IS_ERR(obj)) {
995                 if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
996                         return ERR_PTR(-ENODEV);
997                 return obj;
998         }
999
1000         addr = i915_gem_object_pin_map_unlocked(obj, type);
1001         if (IS_ERR(addr)) {
1002                 i915_gem_object_put(obj);
1003                 if (PTR_ERR(addr) == -ENXIO)
1004                         return ERR_PTR(-ENODEV);
1005                 return addr;
1006         }
1007
1008         *out_addr = addr;
1009         return obj;
1010 }
1011
1012 static int wrap_ktime_compare(const void *A, const void *B)
1013 {
1014         const ktime_t *a = A, *b = B;
1015
1016         return ktime_compare(*a, *b);
1017 }
1018
1019 static void igt_memcpy_long(void *dst, const void *src, size_t size)
1020 {
1021         unsigned long *tmp = dst;
1022         const unsigned long *s = src;
1023
1024         size = size / sizeof(unsigned long);
1025         while (size--)
1026                 *tmp++ = *s++;
1027 }
1028
1029 static inline void igt_memcpy(void *dst, const void *src, size_t size)
1030 {
1031         memcpy(dst, src, size);
1032 }
1033
1034 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
1035 {
1036         i915_memcpy_from_wc(dst, src, size);
1037 }
1038
1039 static int _perf_memcpy(struct intel_memory_region *src_mr,
1040                         struct intel_memory_region *dst_mr,
1041                         u64 size, u32 src_type, u32 dst_type)
1042 {
1043         struct drm_i915_private *i915 = src_mr->i915;
1044         const struct {
1045                 const char *name;
1046                 void (*copy)(void *dst, const void *src, size_t size);
1047                 bool skip;
1048         } tests[] = {
1049                 {
1050                         "memcpy",
1051                         igt_memcpy,
1052                 },
1053                 {
1054                         "memcpy_long",
1055                         igt_memcpy_long,
1056                 },
1057                 {
1058                         "memcpy_from_wc",
1059                         igt_memcpy_from_wc,
1060                         !i915_has_memcpy_from_wc(),
1061                 },
1062         };
1063         struct drm_i915_gem_object *src, *dst;
1064         void *src_addr, *dst_addr;
1065         int ret = 0;
1066         int i;
1067
1068         src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
1069         if (IS_ERR(src)) {
1070                 ret = PTR_ERR(src);
1071                 goto out;
1072         }
1073
1074         dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
1075         if (IS_ERR(dst)) {
1076                 ret = PTR_ERR(dst);
1077                 goto out_unpin_src;
1078         }
1079
1080         for (i = 0; i < ARRAY_SIZE(tests); ++i) {
1081                 ktime_t t[5];
1082                 int pass;
1083
1084                 if (tests[i].skip)
1085                         continue;
1086
1087                 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
1088                         ktime_t t0, t1;
1089
1090                         t0 = ktime_get();
1091
1092                         tests[i].copy(dst_addr, src_addr, size);
1093
1094                         t1 = ktime_get();
1095                         t[pass] = ktime_sub(t1, t0);
1096                 }
1097
1098                 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
1099                 if (t[0] <= 0) {
1100                         /* ignore the impossible to protect our sanity */
1101                         pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
1102                                  __func__,
1103                                  src_mr->name, repr_type(src_type),
1104                                  dst_mr->name, repr_type(dst_type),
1105                                  tests[i].name, size >> 10,
1106                                  t[0], t[4]);
1107                         continue;
1108                 }
1109
1110                 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
1111                         __func__,
1112                         src_mr->name, repr_type(src_type),
1113                         dst_mr->name, repr_type(dst_type),
1114                         tests[i].name, size >> 10,
1115                         div64_u64(mul_u32_u32(4 * size,
1116                                               1000 * 1000 * 1000),
1117                                   t[1] + 2 * t[2] + t[3]) >> 20);
1118
1119                 cond_resched();
1120         }
1121
1122         i915_gem_object_unpin_map(dst);
1123         i915_gem_object_put(dst);
1124 out_unpin_src:
1125         i915_gem_object_unpin_map(src);
1126         i915_gem_object_put(src);
1127
1128         i915_gem_drain_freed_objects(i915);
1129 out:
1130         if (ret == -ENODEV)
1131                 ret = 0;
1132
1133         return ret;
1134 }
1135
1136 static int perf_memcpy(void *arg)
1137 {
1138         struct drm_i915_private *i915 = arg;
1139         static const u32 types[] = {
1140                 I915_MAP_WB,
1141                 I915_MAP_WC,
1142         };
1143         static const u32 sizes[] = {
1144                 SZ_4K,
1145                 SZ_64K,
1146                 SZ_4M,
1147         };
1148         struct intel_memory_region *src_mr, *dst_mr;
1149         int src_id, dst_id;
1150         int i, j, k;
1151         int ret;
1152
1153         for_each_memory_region(src_mr, i915, src_id) {
1154                 for_each_memory_region(dst_mr, i915, dst_id) {
1155                         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1156                                 for (j = 0; j < ARRAY_SIZE(types); ++j) {
1157                                         for (k = 0; k < ARRAY_SIZE(types); ++k) {
1158                                                 ret = _perf_memcpy(src_mr,
1159                                                                    dst_mr,
1160                                                                    sizes[i],
1161                                                                    types[j],
1162                                                                    types[k]);
1163                                                 if (ret)
1164                                                         return ret;
1165                                         }
1166                                 }
1167                         }
1168                 }
1169         }
1170
1171         return 0;
1172 }
1173
1174 int intel_memory_region_mock_selftests(void)
1175 {
1176         static const struct i915_subtest tests[] = {
1177                 SUBTEST(igt_mock_reserve),
1178                 SUBTEST(igt_mock_fill),
1179                 SUBTEST(igt_mock_contiguous),
1180                 SUBTEST(igt_mock_splintered_region),
1181                 SUBTEST(igt_mock_max_segment),
1182         };
1183         struct intel_memory_region *mem;
1184         struct drm_i915_private *i915;
1185         int err;
1186
1187         i915 = mock_gem_device();
1188         if (!i915)
1189                 return -ENOMEM;
1190
1191         mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
1192         if (IS_ERR(mem)) {
1193                 pr_err("failed to create memory region\n");
1194                 err = PTR_ERR(mem);
1195                 goto out_unref;
1196         }
1197
1198         err = i915_subtests(tests, mem);
1199
1200         intel_memory_region_destroy(mem);
1201 out_unref:
1202         mock_destroy_device(i915);
1203         return err;
1204 }
1205
1206 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
1207 {
1208         static const struct i915_subtest tests[] = {
1209                 SUBTEST(igt_lmem_create),
1210                 SUBTEST(igt_lmem_create_with_ps),
1211                 SUBTEST(igt_lmem_create_cleared_cpu),
1212                 SUBTEST(igt_lmem_write_cpu),
1213                 SUBTEST(igt_lmem_write_gpu),
1214         };
1215
1216         if (!HAS_LMEM(i915)) {
1217                 pr_info("device lacks LMEM support, skipping\n");
1218                 return 0;
1219         }
1220
1221         if (intel_gt_is_wedged(to_gt(i915)))
1222                 return 0;
1223
1224         return i915_live_subtests(tests, i915);
1225 }
1226
1227 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
1228 {
1229         static const struct i915_subtest tests[] = {
1230                 SUBTEST(perf_memcpy),
1231         };
1232
1233         if (intel_gt_is_wedged(to_gt(i915)))
1234                 return 0;
1235
1236         return i915_live_subtests(tests, i915);
1237 }