2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
31 #include "mock_context.h"
33 #include "mock_gem_device.h"
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36 struct sg_table *pages)
42 static int fake_get_pages(struct drm_i915_gem_object *obj)
44 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
45 #define PFN_BIAS 0x1000
46 struct sg_table *pages;
47 struct scatterlist *sg;
48 unsigned int sg_page_sizes;
49 typeof(obj->base.size) rem;
51 pages = kmalloc(sizeof(*pages), GFP);
55 rem = round_up(obj->base.size, BIT(31)) >> 31;
56 if (sg_alloc_table(pages, rem, GFP)) {
63 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
64 unsigned long len = min_t(typeof(rem), rem, BIT(31));
67 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
68 sg_dma_address(sg) = page_to_phys(sg_page(sg));
76 obj->mm.madv = I915_MADV_DONTNEED;
78 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
84 static void fake_put_pages(struct drm_i915_gem_object *obj,
85 struct sg_table *pages)
87 fake_free_pages(obj, pages);
88 obj->mm.dirty = false;
89 obj->mm.madv = I915_MADV_WILLNEED;
92 static const struct drm_i915_gem_object_ops fake_ops = {
93 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
94 .get_pages = fake_get_pages,
95 .put_pages = fake_put_pages,
98 static struct drm_i915_gem_object *
99 fake_dma_object(struct drm_i915_private *i915, u64 size)
101 struct drm_i915_gem_object *obj;
104 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
106 if (overflows_type(size, obj->base.size))
107 return ERR_PTR(-E2BIG);
109 obj = i915_gem_object_alloc(i915);
113 drm_gem_private_object_init(&i915->drm, &obj->base, size);
114 i915_gem_object_init(obj, &fake_ops);
116 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
117 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
118 obj->cache_level = I915_CACHE_NONE;
120 /* Preallocate the "backing storage" */
121 if (i915_gem_object_pin_pages(obj))
124 i915_gem_object_unpin_pages(obj);
128 i915_gem_object_put(obj);
130 return ERR_PTR(-ENOMEM);
133 static int igt_ppgtt_alloc(void *arg)
135 struct drm_i915_private *dev_priv = arg;
136 struct i915_hw_ppgtt *ppgtt;
140 /* Allocate a ppggt and try to fill the entire range */
142 if (!USES_PPGTT(dev_priv))
145 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
149 mutex_lock(&dev_priv->drm.struct_mutex);
150 err = __hw_ppgtt_init(ppgtt, dev_priv);
154 if (!ppgtt->base.allocate_va_range)
155 goto err_ppgtt_cleanup;
157 /* Check we can allocate the entire range */
159 size <= ppgtt->base.total;
161 err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
163 if (err == -ENOMEM) {
164 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
166 err = 0; /* virtual space too large! */
168 goto err_ppgtt_cleanup;
171 ppgtt->base.clear_range(&ppgtt->base, 0, size);
174 /* Check we can incrementally allocate the entire range */
175 for (last = 0, size = 4096;
176 size <= ppgtt->base.total;
177 last = size, size <<= 2) {
178 err = ppgtt->base.allocate_va_range(&ppgtt->base,
181 if (err == -ENOMEM) {
182 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
183 last, size - last, ilog2(size));
184 err = 0; /* virtual space too large! */
186 goto err_ppgtt_cleanup;
191 ppgtt->base.cleanup(&ppgtt->base);
193 mutex_unlock(&dev_priv->drm.struct_mutex);
198 static int lowlevel_hole(struct drm_i915_private *i915,
199 struct i915_address_space *vm,
200 u64 hole_start, u64 hole_end,
201 unsigned long end_time)
203 I915_RND_STATE(seed_prng);
205 struct i915_vma mock_vma;
207 memset(&mock_vma, 0, sizeof(struct i915_vma));
209 /* Keep creating larger objects until one cannot fit into the hole */
210 for (size = 12; (hole_end - hole_start) >> size; size++) {
211 I915_RND_SUBSTATE(prng, seed_prng);
212 struct drm_i915_gem_object *obj;
213 unsigned int *order, count, n;
216 hole_size = (hole_end - hole_start) >> size;
217 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
218 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
222 order = i915_random_order(count, &prng);
223 } while (!order && count);
227 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
228 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
230 /* Ignore allocation failures (i.e. don't report them as
231 * a test failure) as we are purposefully allocating very
232 * large objects without checking that we have sufficient
233 * memory. We expect to hit -ENOMEM.
236 obj = fake_dma_object(i915, BIT_ULL(size));
242 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
244 if (i915_gem_object_pin_pages(obj)) {
245 i915_gem_object_put(obj);
250 for (n = 0; n < count; n++) {
251 u64 addr = hole_start + order[n] * BIT_ULL(size);
253 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
255 if (igt_timeout(end_time,
256 "%s timed out before %d/%d\n",
257 __func__, n, count)) {
258 hole_end = hole_start; /* quit */
262 if (vm->allocate_va_range &&
263 vm->allocate_va_range(vm, addr, BIT_ULL(size)))
266 mock_vma.pages = obj->mm.pages;
267 mock_vma.node.size = BIT_ULL(size);
268 mock_vma.node.start = addr;
270 vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
274 i915_random_reorder(order, count, &prng);
275 for (n = 0; n < count; n++) {
276 u64 addr = hole_start + order[n] * BIT_ULL(size);
278 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
279 vm->clear_range(vm, addr, BIT_ULL(size));
282 i915_gem_object_unpin_pages(obj);
283 i915_gem_object_put(obj);
291 static void close_object_list(struct list_head *objects,
292 struct i915_address_space *vm)
294 struct drm_i915_gem_object *obj, *on;
297 list_for_each_entry_safe(obj, on, objects, st_link) {
298 struct i915_vma *vma;
300 vma = i915_vma_instance(obj, vm, NULL);
302 ignored = i915_vma_unbind(vma);
303 /* Only ppgtt vma may be closed before the object is freed */
304 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
307 list_del(&obj->st_link);
308 i915_gem_object_put(obj);
312 static int fill_hole(struct drm_i915_private *i915,
313 struct i915_address_space *vm,
314 u64 hole_start, u64 hole_end,
315 unsigned long end_time)
317 const u64 hole_size = hole_end - hole_start;
318 struct drm_i915_gem_object *obj;
319 const unsigned long max_pages =
320 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
321 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
322 unsigned long npages, prime, flags;
323 struct i915_vma *vma;
327 /* Try binding many VMA working inwards from either edge */
329 flags = PIN_OFFSET_FIXED | PIN_USER;
330 if (i915_is_ggtt(vm))
333 for_each_prime_number_from(prime, 2, max_step) {
334 for (npages = 1; npages <= max_pages; npages *= prime) {
335 const u64 full_size = npages << PAGE_SHIFT;
341 { "top-down", hole_end, -1, },
342 { "bottom-up", hole_start, 1, },
346 obj = fake_dma_object(i915, full_size);
350 list_add(&obj->st_link, &objects);
352 /* Align differing sized objects against the edges, and
353 * check we don't walk off into the void when binding
356 for (p = phases; p->name; p++) {
360 list_for_each_entry(obj, &objects, st_link) {
361 vma = i915_vma_instance(obj, vm, NULL);
366 if (offset < hole_start + obj->base.size)
368 offset -= obj->base.size;
371 err = i915_vma_pin(vma, 0, 0, offset | flags);
373 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
374 __func__, p->name, err, npages, prime, offset);
378 if (!drm_mm_node_allocated(&vma->node) ||
379 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
380 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
381 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
390 if (offset + obj->base.size > hole_end)
392 offset += obj->base.size;
397 list_for_each_entry(obj, &objects, st_link) {
398 vma = i915_vma_instance(obj, vm, NULL);
403 if (offset < hole_start + obj->base.size)
405 offset -= obj->base.size;
408 if (!drm_mm_node_allocated(&vma->node) ||
409 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
410 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
411 __func__, p->name, vma->node.start, vma->node.size,
417 err = i915_vma_unbind(vma);
419 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
420 __func__, p->name, vma->node.start, vma->node.size,
426 if (offset + obj->base.size > hole_end)
428 offset += obj->base.size;
433 list_for_each_entry_reverse(obj, &objects, st_link) {
434 vma = i915_vma_instance(obj, vm, NULL);
439 if (offset < hole_start + obj->base.size)
441 offset -= obj->base.size;
444 err = i915_vma_pin(vma, 0, 0, offset | flags);
446 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
447 __func__, p->name, err, npages, prime, offset);
451 if (!drm_mm_node_allocated(&vma->node) ||
452 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
453 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
454 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
463 if (offset + obj->base.size > hole_end)
465 offset += obj->base.size;
470 list_for_each_entry_reverse(obj, &objects, st_link) {
471 vma = i915_vma_instance(obj, vm, NULL);
476 if (offset < hole_start + obj->base.size)
478 offset -= obj->base.size;
481 if (!drm_mm_node_allocated(&vma->node) ||
482 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
483 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
484 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
490 err = i915_vma_unbind(vma);
492 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
493 __func__, p->name, vma->node.start, vma->node.size,
499 if (offset + obj->base.size > hole_end)
501 offset += obj->base.size;
506 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
507 __func__, npages, prime)) {
513 close_object_list(&objects, vm);
519 close_object_list(&objects, vm);
523 static int walk_hole(struct drm_i915_private *i915,
524 struct i915_address_space *vm,
525 u64 hole_start, u64 hole_end,
526 unsigned long end_time)
528 const u64 hole_size = hole_end - hole_start;
529 const unsigned long max_pages =
530 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
534 /* Try binding a single VMA in different positions within the hole */
536 flags = PIN_OFFSET_FIXED | PIN_USER;
537 if (i915_is_ggtt(vm))
540 for_each_prime_number_from(size, 1, max_pages) {
541 struct drm_i915_gem_object *obj;
542 struct i915_vma *vma;
546 obj = fake_dma_object(i915, size << PAGE_SHIFT);
550 vma = i915_vma_instance(obj, vm, NULL);
556 for (addr = hole_start;
557 addr + obj->base.size < hole_end;
558 addr += obj->base.size) {
559 err = i915_vma_pin(vma, 0, 0, addr | flags);
561 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
562 __func__, addr, vma->size,
563 hole_start, hole_end, err);
568 if (!drm_mm_node_allocated(&vma->node) ||
569 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
570 pr_err("%s incorrect at %llx + %llx\n",
571 __func__, addr, vma->size);
576 err = i915_vma_unbind(vma);
578 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
579 __func__, addr, vma->size, err);
583 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
585 if (igt_timeout(end_time,
586 "%s timed out at %llx\n",
594 if (!i915_vma_is_ggtt(vma))
597 i915_gem_object_put(obj);
605 static int pot_hole(struct drm_i915_private *i915,
606 struct i915_address_space *vm,
607 u64 hole_start, u64 hole_end,
608 unsigned long end_time)
610 struct drm_i915_gem_object *obj;
611 struct i915_vma *vma;
616 flags = PIN_OFFSET_FIXED | PIN_USER;
617 if (i915_is_ggtt(vm))
620 obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
624 vma = i915_vma_instance(obj, vm, NULL);
630 /* Insert a pair of pages across every pot boundary within the hole */
631 for (pot = fls64(hole_end - 1) - 1;
632 pot > ilog2(2 * I915_GTT_PAGE_SIZE);
634 u64 step = BIT_ULL(pot);
637 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
638 addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
640 err = i915_vma_pin(vma, 0, 0, addr | flags);
642 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
645 hole_start, hole_end,
650 if (!drm_mm_node_allocated(&vma->node) ||
651 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
652 pr_err("%s incorrect at %llx + %llx\n",
653 __func__, addr, vma->size);
655 err = i915_vma_unbind(vma);
661 err = i915_vma_unbind(vma);
665 if (igt_timeout(end_time,
666 "%s timed out after %d/%d\n",
667 __func__, pot, fls64(hole_end - 1) - 1)) {
674 if (!i915_vma_is_ggtt(vma))
677 i915_gem_object_put(obj);
681 static int drunk_hole(struct drm_i915_private *i915,
682 struct i915_address_space *vm,
683 u64 hole_start, u64 hole_end,
684 unsigned long end_time)
686 I915_RND_STATE(prng);
690 flags = PIN_OFFSET_FIXED | PIN_USER;
691 if (i915_is_ggtt(vm))
694 /* Keep creating larger objects until one cannot fit into the hole */
695 for (size = 12; (hole_end - hole_start) >> size; size++) {
696 struct drm_i915_gem_object *obj;
697 unsigned int *order, count, n;
698 struct i915_vma *vma;
702 hole_size = (hole_end - hole_start) >> size;
703 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
704 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
708 order = i915_random_order(count, &prng);
709 } while (!order && count);
713 /* Ignore allocation failures (i.e. don't report them as
714 * a test failure) as we are purposefully allocating very
715 * large objects without checking that we have sufficient
716 * memory. We expect to hit -ENOMEM.
719 obj = fake_dma_object(i915, BIT_ULL(size));
725 vma = i915_vma_instance(obj, vm, NULL);
731 GEM_BUG_ON(vma->size != BIT_ULL(size));
733 for (n = 0; n < count; n++) {
734 u64 addr = hole_start + order[n] * BIT_ULL(size);
736 err = i915_vma_pin(vma, 0, 0, addr | flags);
738 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
741 hole_start, hole_end,
746 if (!drm_mm_node_allocated(&vma->node) ||
747 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
748 pr_err("%s incorrect at %llx + %llx\n",
749 __func__, addr, BIT_ULL(size));
751 err = i915_vma_unbind(vma);
757 err = i915_vma_unbind(vma);
760 if (igt_timeout(end_time,
761 "%s timed out after %d/%d\n",
762 __func__, n, count)) {
769 if (!i915_vma_is_ggtt(vma))
772 i915_gem_object_put(obj);
781 static int __shrink_hole(struct drm_i915_private *i915,
782 struct i915_address_space *vm,
783 u64 hole_start, u64 hole_end,
784 unsigned long end_time)
786 struct drm_i915_gem_object *obj;
787 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
788 unsigned int order = 12;
793 /* Keep creating larger objects until one cannot fit into the hole */
794 for (addr = hole_start; addr < hole_end; ) {
795 struct i915_vma *vma;
796 u64 size = BIT_ULL(order++);
798 size = min(size, hole_end - addr);
799 obj = fake_dma_object(i915, size);
805 list_add(&obj->st_link, &objects);
807 vma = i915_vma_instance(obj, vm, NULL);
813 GEM_BUG_ON(vma->size != size);
815 err = i915_vma_pin(vma, 0, 0, addr | flags);
817 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
818 __func__, addr, size, hole_start, hole_end, err);
822 if (!drm_mm_node_allocated(&vma->node) ||
823 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
824 pr_err("%s incorrect at %llx + %llx\n",
825 __func__, addr, size);
827 err = i915_vma_unbind(vma);
835 if (igt_timeout(end_time,
836 "%s timed out at ofset %llx [%llx - %llx]\n",
837 __func__, addr, hole_start, hole_end)) {
843 close_object_list(&objects, vm);
847 static int shrink_hole(struct drm_i915_private *i915,
848 struct i915_address_space *vm,
849 u64 hole_start, u64 hole_end,
850 unsigned long end_time)
855 vm->fault_attr.probability = 999;
856 atomic_set(&vm->fault_attr.times, -1);
858 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
859 vm->fault_attr.interval = prime;
860 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
865 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
870 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
871 int (*func)(struct drm_i915_private *i915,
872 struct i915_address_space *vm,
873 u64 hole_start, u64 hole_end,
874 unsigned long end_time))
876 struct drm_file *file;
877 struct i915_hw_ppgtt *ppgtt;
878 IGT_TIMEOUT(end_time);
881 if (!USES_FULL_PPGTT(dev_priv))
884 file = mock_file(dev_priv);
886 return PTR_ERR(file);
888 mutex_lock(&dev_priv->drm.struct_mutex);
889 ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
891 err = PTR_ERR(ppgtt);
894 GEM_BUG_ON(offset_in_page(ppgtt->base.total));
895 GEM_BUG_ON(ppgtt->base.closed);
897 err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
899 i915_ppgtt_close(&ppgtt->base);
900 i915_ppgtt_put(ppgtt);
902 mutex_unlock(&dev_priv->drm.struct_mutex);
904 mock_file_free(dev_priv, file);
908 static int igt_ppgtt_fill(void *arg)
910 return exercise_ppgtt(arg, fill_hole);
913 static int igt_ppgtt_walk(void *arg)
915 return exercise_ppgtt(arg, walk_hole);
918 static int igt_ppgtt_pot(void *arg)
920 return exercise_ppgtt(arg, pot_hole);
923 static int igt_ppgtt_drunk(void *arg)
925 return exercise_ppgtt(arg, drunk_hole);
928 static int igt_ppgtt_lowlevel(void *arg)
930 return exercise_ppgtt(arg, lowlevel_hole);
933 static int igt_ppgtt_shrink(void *arg)
935 return exercise_ppgtt(arg, shrink_hole);
938 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
940 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
941 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
943 if (a->start < b->start)
949 static int exercise_ggtt(struct drm_i915_private *i915,
950 int (*func)(struct drm_i915_private *i915,
951 struct i915_address_space *vm,
952 u64 hole_start, u64 hole_end,
953 unsigned long end_time))
955 struct i915_ggtt *ggtt = &i915->ggtt;
956 u64 hole_start, hole_end, last = 0;
957 struct drm_mm_node *node;
958 IGT_TIMEOUT(end_time);
961 mutex_lock(&i915->drm.struct_mutex);
963 list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
964 drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
965 if (hole_start < last)
968 if (ggtt->base.mm.color_adjust)
969 ggtt->base.mm.color_adjust(node, 0,
970 &hole_start, &hole_end);
971 if (hole_start >= hole_end)
974 err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
978 /* As we have manipulated the drm_mm, the list may be corrupt */
982 mutex_unlock(&i915->drm.struct_mutex);
987 static int igt_ggtt_fill(void *arg)
989 return exercise_ggtt(arg, fill_hole);
992 static int igt_ggtt_walk(void *arg)
994 return exercise_ggtt(arg, walk_hole);
997 static int igt_ggtt_pot(void *arg)
999 return exercise_ggtt(arg, pot_hole);
1002 static int igt_ggtt_drunk(void *arg)
1004 return exercise_ggtt(arg, drunk_hole);
1007 static int igt_ggtt_lowlevel(void *arg)
1009 return exercise_ggtt(arg, lowlevel_hole);
1012 static int igt_ggtt_page(void *arg)
1014 const unsigned int count = PAGE_SIZE/sizeof(u32);
1015 I915_RND_STATE(prng);
1016 struct drm_i915_private *i915 = arg;
1017 struct i915_ggtt *ggtt = &i915->ggtt;
1018 struct drm_i915_gem_object *obj;
1019 struct drm_mm_node tmp;
1020 unsigned int *order, n;
1023 mutex_lock(&i915->drm.struct_mutex);
1025 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1031 err = i915_gem_object_pin_pages(obj);
1035 memset(&tmp, 0, sizeof(tmp));
1036 err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1037 1024 * PAGE_SIZE, 0,
1038 I915_COLOR_UNEVICTABLE,
1039 0, ggtt->mappable_end,
1044 order = i915_random_order(count, &prng);
1050 for (n = 0; n < count; n++) {
1051 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1054 ggtt->base.insert_page(&ggtt->base,
1055 i915_gem_object_get_dma_address(obj, 0),
1056 offset, I915_CACHE_NONE, 0);
1058 vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1059 iowrite32(n, vaddr + n);
1060 io_mapping_unmap_atomic(vaddr);
1063 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1066 i915_random_reorder(order, count, &prng);
1067 for (n = 0; n < count; n++) {
1068 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1072 ggtt->base.insert_page(&ggtt->base,
1073 i915_gem_object_get_dma_address(obj, 0),
1074 offset, I915_CACHE_NONE, 0);
1076 vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1077 val = ioread32(vaddr + n);
1078 io_mapping_unmap_atomic(vaddr);
1080 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1083 pr_err("insert page failed: found %d, expected %d\n",
1092 drm_mm_remove_node(&tmp);
1094 i915_gem_object_unpin_pages(obj);
1096 i915_gem_object_put(obj);
1098 mutex_unlock(&i915->drm.struct_mutex);
1102 static void track_vma_bind(struct i915_vma *vma)
1104 struct drm_i915_gem_object *obj = vma->obj;
1106 obj->bind_count++; /* track for eviction later */
1107 __i915_gem_object_pin_pages(obj);
1109 vma->pages = obj->mm.pages;
1110 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1113 static int exercise_mock(struct drm_i915_private *i915,
1114 int (*func)(struct drm_i915_private *i915,
1115 struct i915_address_space *vm,
1116 u64 hole_start, u64 hole_end,
1117 unsigned long end_time))
1119 struct i915_gem_context *ctx;
1120 struct i915_hw_ppgtt *ppgtt;
1121 IGT_TIMEOUT(end_time);
1124 ctx = mock_context(i915, "mock");
1131 err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
1133 mock_context_close(ctx);
1137 static int igt_mock_fill(void *arg)
1139 return exercise_mock(arg, fill_hole);
1142 static int igt_mock_walk(void *arg)
1144 return exercise_mock(arg, walk_hole);
1147 static int igt_mock_pot(void *arg)
1149 return exercise_mock(arg, pot_hole);
1152 static int igt_mock_drunk(void *arg)
1154 return exercise_mock(arg, drunk_hole);
1157 static int igt_gtt_reserve(void *arg)
1159 struct drm_i915_private *i915 = arg;
1160 struct drm_i915_gem_object *obj, *on;
1165 /* i915_gem_gtt_reserve() tries to reserve the precise range
1166 * for the node, and evicts if it has to. So our test checks that
1167 * it can give us the requsted space and prevent overlaps.
1170 /* Start by filling the GGTT */
1172 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1173 total += 2*I915_GTT_PAGE_SIZE) {
1174 struct i915_vma *vma;
1176 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1182 err = i915_gem_object_pin_pages(obj);
1184 i915_gem_object_put(obj);
1188 list_add(&obj->st_link, &objects);
1190 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1196 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1202 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1203 total, i915->ggtt.base.total, err);
1206 track_vma_bind(vma);
1208 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1209 if (vma->node.start != total ||
1210 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1211 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1212 vma->node.start, vma->node.size,
1213 total, 2*I915_GTT_PAGE_SIZE);
1219 /* Now we start forcing evictions */
1220 for (total = I915_GTT_PAGE_SIZE;
1221 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1222 total += 2*I915_GTT_PAGE_SIZE) {
1223 struct i915_vma *vma;
1225 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1231 err = i915_gem_object_pin_pages(obj);
1233 i915_gem_object_put(obj);
1237 list_add(&obj->st_link, &objects);
1239 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1245 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1251 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1252 total, i915->ggtt.base.total, err);
1255 track_vma_bind(vma);
1257 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1258 if (vma->node.start != total ||
1259 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1260 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1261 vma->node.start, vma->node.size,
1262 total, 2*I915_GTT_PAGE_SIZE);
1268 /* And then try at random */
1269 list_for_each_entry_safe(obj, on, &objects, st_link) {
1270 struct i915_vma *vma;
1273 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1279 err = i915_vma_unbind(vma);
1281 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1285 offset = random_offset(0, i915->ggtt.base.total,
1286 2*I915_GTT_PAGE_SIZE,
1287 I915_GTT_MIN_ALIGNMENT);
1289 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1295 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1296 total, i915->ggtt.base.total, err);
1299 track_vma_bind(vma);
1301 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1302 if (vma->node.start != offset ||
1303 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1304 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1305 vma->node.start, vma->node.size,
1306 offset, 2*I915_GTT_PAGE_SIZE);
1313 list_for_each_entry_safe(obj, on, &objects, st_link) {
1314 i915_gem_object_unpin_pages(obj);
1315 i915_gem_object_put(obj);
1320 static int igt_gtt_insert(void *arg)
1322 struct drm_i915_private *i915 = arg;
1323 struct drm_i915_gem_object *obj, *on;
1324 struct drm_mm_node tmp = {};
1325 const struct invalid_insert {
1329 } invalid_insert[] = {
1331 i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
1332 0, i915->ggtt.base.total,
1335 2*I915_GTT_PAGE_SIZE, 0,
1336 0, I915_GTT_PAGE_SIZE,
1339 -(u64)I915_GTT_PAGE_SIZE, 0,
1340 0, 4*I915_GTT_PAGE_SIZE,
1343 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1344 0, 4*I915_GTT_PAGE_SIZE,
1347 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1348 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1356 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1357 * to the node, evicting if required.
1360 /* Check a couple of obviously invalid requests */
1361 for (ii = invalid_insert; ii->size; ii++) {
1362 err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
1363 ii->size, ii->alignment,
1364 I915_COLOR_UNEVICTABLE,
1367 if (err != -ENOSPC) {
1368 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1369 ii->size, ii->alignment, ii->start, ii->end,
1375 /* Start by filling the GGTT */
1377 total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1378 total += I915_GTT_PAGE_SIZE) {
1379 struct i915_vma *vma;
1381 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1387 err = i915_gem_object_pin_pages(obj);
1389 i915_gem_object_put(obj);
1393 list_add(&obj->st_link, &objects);
1395 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1401 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1402 obj->base.size, 0, obj->cache_level,
1403 0, i915->ggtt.base.total,
1405 if (err == -ENOSPC) {
1406 /* maxed out the GGTT space */
1407 i915_gem_object_put(obj);
1411 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1412 total, i915->ggtt.base.total, err);
1415 track_vma_bind(vma);
1416 __i915_vma_pin(vma);
1418 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1421 list_for_each_entry(obj, &objects, st_link) {
1422 struct i915_vma *vma;
1424 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1430 if (!drm_mm_node_allocated(&vma->node)) {
1431 pr_err("VMA was unexpectedly evicted!\n");
1436 __i915_vma_unpin(vma);
1439 /* If we then reinsert, we should find the same hole */
1440 list_for_each_entry_safe(obj, on, &objects, st_link) {
1441 struct i915_vma *vma;
1444 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1450 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1451 offset = vma->node.start;
1453 err = i915_vma_unbind(vma);
1455 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1459 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1460 obj->base.size, 0, obj->cache_level,
1461 0, i915->ggtt.base.total,
1464 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1465 total, i915->ggtt.base.total, err);
1468 track_vma_bind(vma);
1470 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1471 if (vma->node.start != offset) {
1472 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1473 offset, vma->node.start);
1479 /* And then force evictions */
1481 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1482 total += 2*I915_GTT_PAGE_SIZE) {
1483 struct i915_vma *vma;
1485 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1491 err = i915_gem_object_pin_pages(obj);
1493 i915_gem_object_put(obj);
1497 list_add(&obj->st_link, &objects);
1499 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1505 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1506 obj->base.size, 0, obj->cache_level,
1507 0, i915->ggtt.base.total,
1510 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1511 total, i915->ggtt.base.total, err);
1514 track_vma_bind(vma);
1516 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1520 list_for_each_entry_safe(obj, on, &objects, st_link) {
1521 i915_gem_object_unpin_pages(obj);
1522 i915_gem_object_put(obj);
1527 int i915_gem_gtt_mock_selftests(void)
1529 static const struct i915_subtest tests[] = {
1530 SUBTEST(igt_mock_drunk),
1531 SUBTEST(igt_mock_walk),
1532 SUBTEST(igt_mock_pot),
1533 SUBTEST(igt_mock_fill),
1534 SUBTEST(igt_gtt_reserve),
1535 SUBTEST(igt_gtt_insert),
1537 struct drm_i915_private *i915;
1540 i915 = mock_gem_device();
1544 mutex_lock(&i915->drm.struct_mutex);
1545 err = i915_subtests(tests, i915);
1546 mutex_unlock(&i915->drm.struct_mutex);
1548 drm_dev_unref(&i915->drm);
1552 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1554 static const struct i915_subtest tests[] = {
1555 SUBTEST(igt_ppgtt_alloc),
1556 SUBTEST(igt_ppgtt_lowlevel),
1557 SUBTEST(igt_ppgtt_drunk),
1558 SUBTEST(igt_ppgtt_walk),
1559 SUBTEST(igt_ppgtt_pot),
1560 SUBTEST(igt_ppgtt_fill),
1561 SUBTEST(igt_ppgtt_shrink),
1562 SUBTEST(igt_ggtt_lowlevel),
1563 SUBTEST(igt_ggtt_drunk),
1564 SUBTEST(igt_ggtt_walk),
1565 SUBTEST(igt_ggtt_pot),
1566 SUBTEST(igt_ggtt_fill),
1567 SUBTEST(igt_ggtt_page),
1570 GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
1572 return i915_subtests(tests, i915);