Merge tag 'drm-intel-fixes-2018-11-08' of git://anongit.freedesktop.org/drm/drm-intel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34
35 static void cleanup_freed_objects(struct drm_i915_private *i915)
36 {
37         /*
38          * As we may hold onto the struct_mutex for inordinate lengths of
39          * time, the NMI khungtaskd detector may fire for the free objects
40          * worker.
41          */
42         mutex_unlock(&i915->drm.struct_mutex);
43
44         i915_gem_drain_freed_objects(i915);
45
46         mutex_lock(&i915->drm.struct_mutex);
47 }
48
49 static void fake_free_pages(struct drm_i915_gem_object *obj,
50                             struct sg_table *pages)
51 {
52         sg_free_table(pages);
53         kfree(pages);
54 }
55
56 static int fake_get_pages(struct drm_i915_gem_object *obj)
57 {
58 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
59 #define PFN_BIAS 0x1000
60         struct sg_table *pages;
61         struct scatterlist *sg;
62         unsigned int sg_page_sizes;
63         typeof(obj->base.size) rem;
64
65         pages = kmalloc(sizeof(*pages), GFP);
66         if (!pages)
67                 return -ENOMEM;
68
69         rem = round_up(obj->base.size, BIT(31)) >> 31;
70         if (sg_alloc_table(pages, rem, GFP)) {
71                 kfree(pages);
72                 return -ENOMEM;
73         }
74
75         sg_page_sizes = 0;
76         rem = obj->base.size;
77         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
78                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
79
80                 GEM_BUG_ON(!len);
81                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
82                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
83                 sg_dma_len(sg) = len;
84                 sg_page_sizes |= len;
85
86                 rem -= len;
87         }
88         GEM_BUG_ON(rem);
89
90         obj->mm.madv = I915_MADV_DONTNEED;
91
92         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
93
94         return 0;
95 #undef GFP
96 }
97
98 static void fake_put_pages(struct drm_i915_gem_object *obj,
99                            struct sg_table *pages)
100 {
101         fake_free_pages(obj, pages);
102         obj->mm.dirty = false;
103         obj->mm.madv = I915_MADV_WILLNEED;
104 }
105
106 static const struct drm_i915_gem_object_ops fake_ops = {
107         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
108         .get_pages = fake_get_pages,
109         .put_pages = fake_put_pages,
110 };
111
112 static struct drm_i915_gem_object *
113 fake_dma_object(struct drm_i915_private *i915, u64 size)
114 {
115         struct drm_i915_gem_object *obj;
116
117         GEM_BUG_ON(!size);
118         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
119
120         if (overflows_type(size, obj->base.size))
121                 return ERR_PTR(-E2BIG);
122
123         obj = i915_gem_object_alloc(i915);
124         if (!obj)
125                 goto err;
126
127         drm_gem_private_object_init(&i915->drm, &obj->base, size);
128         i915_gem_object_init(obj, &fake_ops);
129
130         obj->write_domain = I915_GEM_DOMAIN_CPU;
131         obj->read_domains = I915_GEM_DOMAIN_CPU;
132         obj->cache_level = I915_CACHE_NONE;
133
134         /* Preallocate the "backing storage" */
135         if (i915_gem_object_pin_pages(obj))
136                 goto err_obj;
137
138         i915_gem_object_unpin_pages(obj);
139         return obj;
140
141 err_obj:
142         i915_gem_object_put(obj);
143 err:
144         return ERR_PTR(-ENOMEM);
145 }
146
147 static int igt_ppgtt_alloc(void *arg)
148 {
149         struct drm_i915_private *dev_priv = arg;
150         struct i915_hw_ppgtt *ppgtt;
151         u64 size, last, limit;
152         int err = 0;
153
154         /* Allocate a ppggt and try to fill the entire range */
155
156         if (!USES_PPGTT(dev_priv))
157                 return 0;
158
159         ppgtt = __hw_ppgtt_create(dev_priv);
160         if (IS_ERR(ppgtt))
161                 return PTR_ERR(ppgtt);
162
163         if (!ppgtt->vm.allocate_va_range)
164                 goto err_ppgtt_cleanup;
165
166         /*
167          * While we only allocate the page tables here and so we could
168          * address a much larger GTT than we could actually fit into
169          * RAM, a practical limit is the amount of physical pages in the system.
170          * This should ensure that we do not run into the oomkiller during
171          * the test and take down the machine wilfully.
172          */
173         limit = totalram_pages << PAGE_SHIFT;
174         limit = min(ppgtt->vm.total, limit);
175
176         /* Check we can allocate the entire range */
177         for (size = 4096; size <= limit; size <<= 2) {
178                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
179                 if (err) {
180                         if (err == -ENOMEM) {
181                                 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
182                                         size, ilog2(size));
183                                 err = 0; /* virtual space too large! */
184                         }
185                         goto err_ppgtt_cleanup;
186                 }
187
188                 cond_resched();
189
190                 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
191         }
192
193         /* Check we can incrementally allocate the entire range */
194         for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
195                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
196                                                   last, size - last);
197                 if (err) {
198                         if (err == -ENOMEM) {
199                                 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
200                                         last, size - last, ilog2(size));
201                                 err = 0; /* virtual space too large! */
202                         }
203                         goto err_ppgtt_cleanup;
204                 }
205
206                 cond_resched();
207         }
208
209 err_ppgtt_cleanup:
210         mutex_lock(&dev_priv->drm.struct_mutex);
211         i915_ppgtt_put(ppgtt);
212         mutex_unlock(&dev_priv->drm.struct_mutex);
213         return err;
214 }
215
216 static int lowlevel_hole(struct drm_i915_private *i915,
217                          struct i915_address_space *vm,
218                          u64 hole_start, u64 hole_end,
219                          unsigned long end_time)
220 {
221         I915_RND_STATE(seed_prng);
222         unsigned int size;
223         struct i915_vma mock_vma;
224
225         memset(&mock_vma, 0, sizeof(struct i915_vma));
226
227         /* Keep creating larger objects until one cannot fit into the hole */
228         for (size = 12; (hole_end - hole_start) >> size; size++) {
229                 I915_RND_SUBSTATE(prng, seed_prng);
230                 struct drm_i915_gem_object *obj;
231                 unsigned int *order, count, n;
232                 u64 hole_size;
233
234                 hole_size = (hole_end - hole_start) >> size;
235                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
236                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
237                 count = hole_size >> 1;
238                 if (!count) {
239                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
240                                  __func__, hole_start, hole_end, size, hole_size);
241                         break;
242                 }
243
244                 do {
245                         order = i915_random_order(count, &prng);
246                         if (order)
247                                 break;
248                 } while (count >>= 1);
249                 if (!count)
250                         return -ENOMEM;
251                 GEM_BUG_ON(!order);
252
253                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
254                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
255
256                 /* Ignore allocation failures (i.e. don't report them as
257                  * a test failure) as we are purposefully allocating very
258                  * large objects without checking that we have sufficient
259                  * memory. We expect to hit -ENOMEM.
260                  */
261
262                 obj = fake_dma_object(i915, BIT_ULL(size));
263                 if (IS_ERR(obj)) {
264                         kfree(order);
265                         break;
266                 }
267
268                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
269
270                 if (i915_gem_object_pin_pages(obj)) {
271                         i915_gem_object_put(obj);
272                         kfree(order);
273                         break;
274                 }
275
276                 for (n = 0; n < count; n++) {
277                         u64 addr = hole_start + order[n] * BIT_ULL(size);
278
279                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
280
281                         if (igt_timeout(end_time,
282                                         "%s timed out before %d/%d\n",
283                                         __func__, n, count)) {
284                                 hole_end = hole_start; /* quit */
285                                 break;
286                         }
287
288                         if (vm->allocate_va_range &&
289                             vm->allocate_va_range(vm, addr, BIT_ULL(size)))
290                                 break;
291
292                         mock_vma.pages = obj->mm.pages;
293                         mock_vma.node.size = BIT_ULL(size);
294                         mock_vma.node.start = addr;
295
296                         intel_runtime_pm_get(i915);
297                         vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
298                         intel_runtime_pm_put(i915);
299                 }
300                 count = n;
301
302                 i915_random_reorder(order, count, &prng);
303                 for (n = 0; n < count; n++) {
304                         u64 addr = hole_start + order[n] * BIT_ULL(size);
305
306                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
307                         vm->clear_range(vm, addr, BIT_ULL(size));
308                 }
309
310                 i915_gem_object_unpin_pages(obj);
311                 i915_gem_object_put(obj);
312
313                 kfree(order);
314
315                 cleanup_freed_objects(i915);
316         }
317
318         return 0;
319 }
320
321 static void close_object_list(struct list_head *objects,
322                               struct i915_address_space *vm)
323 {
324         struct drm_i915_gem_object *obj, *on;
325         int ignored;
326
327         list_for_each_entry_safe(obj, on, objects, st_link) {
328                 struct i915_vma *vma;
329
330                 vma = i915_vma_instance(obj, vm, NULL);
331                 if (!IS_ERR(vma))
332                         ignored = i915_vma_unbind(vma);
333                 /* Only ppgtt vma may be closed before the object is freed */
334                 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
335                         i915_vma_close(vma);
336
337                 list_del(&obj->st_link);
338                 i915_gem_object_put(obj);
339         }
340 }
341
342 static int fill_hole(struct drm_i915_private *i915,
343                      struct i915_address_space *vm,
344                      u64 hole_start, u64 hole_end,
345                      unsigned long end_time)
346 {
347         const u64 hole_size = hole_end - hole_start;
348         struct drm_i915_gem_object *obj;
349         const unsigned long max_pages =
350                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
351         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
352         unsigned long npages, prime, flags;
353         struct i915_vma *vma;
354         LIST_HEAD(objects);
355         int err;
356
357         /* Try binding many VMA working inwards from either edge */
358
359         flags = PIN_OFFSET_FIXED | PIN_USER;
360         if (i915_is_ggtt(vm))
361                 flags |= PIN_GLOBAL;
362
363         for_each_prime_number_from(prime, 2, max_step) {
364                 for (npages = 1; npages <= max_pages; npages *= prime) {
365                         const u64 full_size = npages << PAGE_SHIFT;
366                         const struct {
367                                 const char *name;
368                                 u64 offset;
369                                 int step;
370                         } phases[] = {
371                                 { "top-down", hole_end, -1, },
372                                 { "bottom-up", hole_start, 1, },
373                                 { }
374                         }, *p;
375
376                         obj = fake_dma_object(i915, full_size);
377                         if (IS_ERR(obj))
378                                 break;
379
380                         list_add(&obj->st_link, &objects);
381
382                         /* Align differing sized objects against the edges, and
383                          * check we don't walk off into the void when binding
384                          * them into the GTT.
385                          */
386                         for (p = phases; p->name; p++) {
387                                 u64 offset;
388
389                                 offset = p->offset;
390                                 list_for_each_entry(obj, &objects, st_link) {
391                                         vma = i915_vma_instance(obj, vm, NULL);
392                                         if (IS_ERR(vma))
393                                                 continue;
394
395                                         if (p->step < 0) {
396                                                 if (offset < hole_start + obj->base.size)
397                                                         break;
398                                                 offset -= obj->base.size;
399                                         }
400
401                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
402                                         if (err) {
403                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
404                                                        __func__, p->name, err, npages, prime, offset);
405                                                 goto err;
406                                         }
407
408                                         if (!drm_mm_node_allocated(&vma->node) ||
409                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
410                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
411                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
412                                                        offset);
413                                                 err = -EINVAL;
414                                                 goto err;
415                                         }
416
417                                         i915_vma_unpin(vma);
418
419                                         if (p->step > 0) {
420                                                 if (offset + obj->base.size > hole_end)
421                                                         break;
422                                                 offset += obj->base.size;
423                                         }
424                                 }
425
426                                 offset = p->offset;
427                                 list_for_each_entry(obj, &objects, st_link) {
428                                         vma = i915_vma_instance(obj, vm, NULL);
429                                         if (IS_ERR(vma))
430                                                 continue;
431
432                                         if (p->step < 0) {
433                                                 if (offset < hole_start + obj->base.size)
434                                                         break;
435                                                 offset -= obj->base.size;
436                                         }
437
438                                         if (!drm_mm_node_allocated(&vma->node) ||
439                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
440                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
441                                                        __func__, p->name, vma->node.start, vma->node.size,
442                                                        offset);
443                                                 err = -EINVAL;
444                                                 goto err;
445                                         }
446
447                                         err = i915_vma_unbind(vma);
448                                         if (err) {
449                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
450                                                        __func__, p->name, vma->node.start, vma->node.size,
451                                                        err);
452                                                 goto err;
453                                         }
454
455                                         if (p->step > 0) {
456                                                 if (offset + obj->base.size > hole_end)
457                                                         break;
458                                                 offset += obj->base.size;
459                                         }
460                                 }
461
462                                 offset = p->offset;
463                                 list_for_each_entry_reverse(obj, &objects, st_link) {
464                                         vma = i915_vma_instance(obj, vm, NULL);
465                                         if (IS_ERR(vma))
466                                                 continue;
467
468                                         if (p->step < 0) {
469                                                 if (offset < hole_start + obj->base.size)
470                                                         break;
471                                                 offset -= obj->base.size;
472                                         }
473
474                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
475                                         if (err) {
476                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
477                                                        __func__, p->name, err, npages, prime, offset);
478                                                 goto err;
479                                         }
480
481                                         if (!drm_mm_node_allocated(&vma->node) ||
482                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
483                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
484                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
485                                                        offset);
486                                                 err = -EINVAL;
487                                                 goto err;
488                                         }
489
490                                         i915_vma_unpin(vma);
491
492                                         if (p->step > 0) {
493                                                 if (offset + obj->base.size > hole_end)
494                                                         break;
495                                                 offset += obj->base.size;
496                                         }
497                                 }
498
499                                 offset = p->offset;
500                                 list_for_each_entry_reverse(obj, &objects, st_link) {
501                                         vma = i915_vma_instance(obj, vm, NULL);
502                                         if (IS_ERR(vma))
503                                                 continue;
504
505                                         if (p->step < 0) {
506                                                 if (offset < hole_start + obj->base.size)
507                                                         break;
508                                                 offset -= obj->base.size;
509                                         }
510
511                                         if (!drm_mm_node_allocated(&vma->node) ||
512                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
513                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
514                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
515                                                        offset);
516                                                 err = -EINVAL;
517                                                 goto err;
518                                         }
519
520                                         err = i915_vma_unbind(vma);
521                                         if (err) {
522                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
523                                                        __func__, p->name, vma->node.start, vma->node.size,
524                                                        err);
525                                                 goto err;
526                                         }
527
528                                         if (p->step > 0) {
529                                                 if (offset + obj->base.size > hole_end)
530                                                         break;
531                                                 offset += obj->base.size;
532                                         }
533                                 }
534                         }
535
536                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
537                                         __func__, npages, prime)) {
538                                 err = -EINTR;
539                                 goto err;
540                         }
541                 }
542
543                 close_object_list(&objects, vm);
544                 cleanup_freed_objects(i915);
545         }
546
547         return 0;
548
549 err:
550         close_object_list(&objects, vm);
551         return err;
552 }
553
554 static int walk_hole(struct drm_i915_private *i915,
555                      struct i915_address_space *vm,
556                      u64 hole_start, u64 hole_end,
557                      unsigned long end_time)
558 {
559         const u64 hole_size = hole_end - hole_start;
560         const unsigned long max_pages =
561                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
562         unsigned long flags;
563         u64 size;
564
565         /* Try binding a single VMA in different positions within the hole */
566
567         flags = PIN_OFFSET_FIXED | PIN_USER;
568         if (i915_is_ggtt(vm))
569                 flags |= PIN_GLOBAL;
570
571         for_each_prime_number_from(size, 1, max_pages) {
572                 struct drm_i915_gem_object *obj;
573                 struct i915_vma *vma;
574                 u64 addr;
575                 int err = 0;
576
577                 obj = fake_dma_object(i915, size << PAGE_SHIFT);
578                 if (IS_ERR(obj))
579                         break;
580
581                 vma = i915_vma_instance(obj, vm, NULL);
582                 if (IS_ERR(vma)) {
583                         err = PTR_ERR(vma);
584                         goto err_put;
585                 }
586
587                 for (addr = hole_start;
588                      addr + obj->base.size < hole_end;
589                      addr += obj->base.size) {
590                         err = i915_vma_pin(vma, 0, 0, addr | flags);
591                         if (err) {
592                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
593                                        __func__, addr, vma->size,
594                                        hole_start, hole_end, err);
595                                 goto err_close;
596                         }
597                         i915_vma_unpin(vma);
598
599                         if (!drm_mm_node_allocated(&vma->node) ||
600                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
601                                 pr_err("%s incorrect at %llx + %llx\n",
602                                        __func__, addr, vma->size);
603                                 err = -EINVAL;
604                                 goto err_close;
605                         }
606
607                         err = i915_vma_unbind(vma);
608                         if (err) {
609                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
610                                        __func__, addr, vma->size, err);
611                                 goto err_close;
612                         }
613
614                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
615
616                         if (igt_timeout(end_time,
617                                         "%s timed out at %llx\n",
618                                         __func__, addr)) {
619                                 err = -EINTR;
620                                 goto err_close;
621                         }
622                 }
623
624 err_close:
625                 if (!i915_vma_is_ggtt(vma))
626                         i915_vma_close(vma);
627 err_put:
628                 i915_gem_object_put(obj);
629                 if (err)
630                         return err;
631
632                 cleanup_freed_objects(i915);
633         }
634
635         return 0;
636 }
637
638 static int pot_hole(struct drm_i915_private *i915,
639                     struct i915_address_space *vm,
640                     u64 hole_start, u64 hole_end,
641                     unsigned long end_time)
642 {
643         struct drm_i915_gem_object *obj;
644         struct i915_vma *vma;
645         unsigned long flags;
646         unsigned int pot;
647         int err = 0;
648
649         flags = PIN_OFFSET_FIXED | PIN_USER;
650         if (i915_is_ggtt(vm))
651                 flags |= PIN_GLOBAL;
652
653         obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
654         if (IS_ERR(obj))
655                 return PTR_ERR(obj);
656
657         vma = i915_vma_instance(obj, vm, NULL);
658         if (IS_ERR(vma)) {
659                 err = PTR_ERR(vma);
660                 goto err_obj;
661         }
662
663         /* Insert a pair of pages across every pot boundary within the hole */
664         for (pot = fls64(hole_end - 1) - 1;
665              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
666              pot--) {
667                 u64 step = BIT_ULL(pot);
668                 u64 addr;
669
670                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
671                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
672                      addr += step) {
673                         err = i915_vma_pin(vma, 0, 0, addr | flags);
674                         if (err) {
675                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
676                                        __func__,
677                                        addr,
678                                        hole_start, hole_end,
679                                        err);
680                                 goto err;
681                         }
682
683                         if (!drm_mm_node_allocated(&vma->node) ||
684                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
685                                 pr_err("%s incorrect at %llx + %llx\n",
686                                        __func__, addr, vma->size);
687                                 i915_vma_unpin(vma);
688                                 err = i915_vma_unbind(vma);
689                                 err = -EINVAL;
690                                 goto err;
691                         }
692
693                         i915_vma_unpin(vma);
694                         err = i915_vma_unbind(vma);
695                         GEM_BUG_ON(err);
696                 }
697
698                 if (igt_timeout(end_time,
699                                 "%s timed out after %d/%d\n",
700                                 __func__, pot, fls64(hole_end - 1) - 1)) {
701                         err = -EINTR;
702                         goto err;
703                 }
704         }
705
706 err:
707         if (!i915_vma_is_ggtt(vma))
708                 i915_vma_close(vma);
709 err_obj:
710         i915_gem_object_put(obj);
711         return err;
712 }
713
714 static int drunk_hole(struct drm_i915_private *i915,
715                       struct i915_address_space *vm,
716                       u64 hole_start, u64 hole_end,
717                       unsigned long end_time)
718 {
719         I915_RND_STATE(prng);
720         unsigned int size;
721         unsigned long flags;
722
723         flags = PIN_OFFSET_FIXED | PIN_USER;
724         if (i915_is_ggtt(vm))
725                 flags |= PIN_GLOBAL;
726
727         /* Keep creating larger objects until one cannot fit into the hole */
728         for (size = 12; (hole_end - hole_start) >> size; size++) {
729                 struct drm_i915_gem_object *obj;
730                 unsigned int *order, count, n;
731                 struct i915_vma *vma;
732                 u64 hole_size;
733                 int err = -ENODEV;
734
735                 hole_size = (hole_end - hole_start) >> size;
736                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
737                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
738                 count = hole_size >> 1;
739                 if (!count) {
740                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
741                                  __func__, hole_start, hole_end, size, hole_size);
742                         break;
743                 }
744
745                 do {
746                         order = i915_random_order(count, &prng);
747                         if (order)
748                                 break;
749                 } while (count >>= 1);
750                 if (!count)
751                         return -ENOMEM;
752                 GEM_BUG_ON(!order);
753
754                 /* Ignore allocation failures (i.e. don't report them as
755                  * a test failure) as we are purposefully allocating very
756                  * large objects without checking that we have sufficient
757                  * memory. We expect to hit -ENOMEM.
758                  */
759
760                 obj = fake_dma_object(i915, BIT_ULL(size));
761                 if (IS_ERR(obj)) {
762                         kfree(order);
763                         break;
764                 }
765
766                 vma = i915_vma_instance(obj, vm, NULL);
767                 if (IS_ERR(vma)) {
768                         err = PTR_ERR(vma);
769                         goto err_obj;
770                 }
771
772                 GEM_BUG_ON(vma->size != BIT_ULL(size));
773
774                 for (n = 0; n < count; n++) {
775                         u64 addr = hole_start + order[n] * BIT_ULL(size);
776
777                         err = i915_vma_pin(vma, 0, 0, addr | flags);
778                         if (err) {
779                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
780                                        __func__,
781                                        addr, BIT_ULL(size),
782                                        hole_start, hole_end,
783                                        err);
784                                 goto err;
785                         }
786
787                         if (!drm_mm_node_allocated(&vma->node) ||
788                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
789                                 pr_err("%s incorrect at %llx + %llx\n",
790                                        __func__, addr, BIT_ULL(size));
791                                 i915_vma_unpin(vma);
792                                 err = i915_vma_unbind(vma);
793                                 err = -EINVAL;
794                                 goto err;
795                         }
796
797                         i915_vma_unpin(vma);
798                         err = i915_vma_unbind(vma);
799                         GEM_BUG_ON(err);
800
801                         if (igt_timeout(end_time,
802                                         "%s timed out after %d/%d\n",
803                                         __func__, n, count)) {
804                                 err = -EINTR;
805                                 goto err;
806                         }
807                 }
808
809 err:
810                 if (!i915_vma_is_ggtt(vma))
811                         i915_vma_close(vma);
812 err_obj:
813                 i915_gem_object_put(obj);
814                 kfree(order);
815                 if (err)
816                         return err;
817
818                 cleanup_freed_objects(i915);
819         }
820
821         return 0;
822 }
823
824 static int __shrink_hole(struct drm_i915_private *i915,
825                          struct i915_address_space *vm,
826                          u64 hole_start, u64 hole_end,
827                          unsigned long end_time)
828 {
829         struct drm_i915_gem_object *obj;
830         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
831         unsigned int order = 12;
832         LIST_HEAD(objects);
833         int err = 0;
834         u64 addr;
835
836         /* Keep creating larger objects until one cannot fit into the hole */
837         for (addr = hole_start; addr < hole_end; ) {
838                 struct i915_vma *vma;
839                 u64 size = BIT_ULL(order++);
840
841                 size = min(size, hole_end - addr);
842                 obj = fake_dma_object(i915, size);
843                 if (IS_ERR(obj)) {
844                         err = PTR_ERR(obj);
845                         break;
846                 }
847
848                 list_add(&obj->st_link, &objects);
849
850                 vma = i915_vma_instance(obj, vm, NULL);
851                 if (IS_ERR(vma)) {
852                         err = PTR_ERR(vma);
853                         break;
854                 }
855
856                 GEM_BUG_ON(vma->size != size);
857
858                 err = i915_vma_pin(vma, 0, 0, addr | flags);
859                 if (err) {
860                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
861                                __func__, addr, size, hole_start, hole_end, err);
862                         break;
863                 }
864
865                 if (!drm_mm_node_allocated(&vma->node) ||
866                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
867                         pr_err("%s incorrect at %llx + %llx\n",
868                                __func__, addr, size);
869                         i915_vma_unpin(vma);
870                         err = i915_vma_unbind(vma);
871                         err = -EINVAL;
872                         break;
873                 }
874
875                 i915_vma_unpin(vma);
876                 addr += size;
877
878                 if (igt_timeout(end_time,
879                                 "%s timed out at ofset %llx [%llx - %llx]\n",
880                                 __func__, addr, hole_start, hole_end)) {
881                         err = -EINTR;
882                         break;
883                 }
884         }
885
886         close_object_list(&objects, vm);
887         cleanup_freed_objects(i915);
888         return err;
889 }
890
891 static int shrink_hole(struct drm_i915_private *i915,
892                        struct i915_address_space *vm,
893                        u64 hole_start, u64 hole_end,
894                        unsigned long end_time)
895 {
896         unsigned long prime;
897         int err;
898
899         vm->fault_attr.probability = 999;
900         atomic_set(&vm->fault_attr.times, -1);
901
902         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
903                 vm->fault_attr.interval = prime;
904                 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
905                 if (err)
906                         break;
907         }
908
909         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
910
911         return err;
912 }
913
914 static int shrink_boom(struct drm_i915_private *i915,
915                        struct i915_address_space *vm,
916                        u64 hole_start, u64 hole_end,
917                        unsigned long end_time)
918 {
919         unsigned int sizes[] = { SZ_2M, SZ_1G };
920         struct drm_i915_gem_object *purge;
921         struct drm_i915_gem_object *explode;
922         int err;
923         int i;
924
925         /*
926          * Catch the case which shrink_hole seems to miss. The setup here
927          * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
928          * ensuring that all vma assiocated with the respective pd/pdp are
929          * unpinned at the time.
930          */
931
932         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
933                 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
934                 unsigned int size = sizes[i];
935                 struct i915_vma *vma;
936
937                 purge = fake_dma_object(i915, size);
938                 if (IS_ERR(purge))
939                         return PTR_ERR(purge);
940
941                 vma = i915_vma_instance(purge, vm, NULL);
942                 if (IS_ERR(vma)) {
943                         err = PTR_ERR(vma);
944                         goto err_purge;
945                 }
946
947                 err = i915_vma_pin(vma, 0, 0, flags);
948                 if (err)
949                         goto err_purge;
950
951                 /* Should now be ripe for purging */
952                 i915_vma_unpin(vma);
953
954                 explode = fake_dma_object(i915, size);
955                 if (IS_ERR(explode)) {
956                         err = PTR_ERR(explode);
957                         goto err_purge;
958                 }
959
960                 vm->fault_attr.probability = 100;
961                 vm->fault_attr.interval = 1;
962                 atomic_set(&vm->fault_attr.times, -1);
963
964                 vma = i915_vma_instance(explode, vm, NULL);
965                 if (IS_ERR(vma)) {
966                         err = PTR_ERR(vma);
967                         goto err_explode;
968                 }
969
970                 err = i915_vma_pin(vma, 0, 0, flags | size);
971                 if (err)
972                         goto err_explode;
973
974                 i915_vma_unpin(vma);
975
976                 i915_gem_object_put(purge);
977                 i915_gem_object_put(explode);
978
979                 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
980                 cleanup_freed_objects(i915);
981         }
982
983         return 0;
984
985 err_explode:
986         i915_gem_object_put(explode);
987 err_purge:
988         i915_gem_object_put(purge);
989         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
990         return err;
991 }
992
993 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
994                           int (*func)(struct drm_i915_private *i915,
995                                       struct i915_address_space *vm,
996                                       u64 hole_start, u64 hole_end,
997                                       unsigned long end_time))
998 {
999         struct drm_file *file;
1000         struct i915_hw_ppgtt *ppgtt;
1001         IGT_TIMEOUT(end_time);
1002         int err;
1003
1004         if (!USES_FULL_PPGTT(dev_priv))
1005                 return 0;
1006
1007         file = mock_file(dev_priv);
1008         if (IS_ERR(file))
1009                 return PTR_ERR(file);
1010
1011         mutex_lock(&dev_priv->drm.struct_mutex);
1012         ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
1013         if (IS_ERR(ppgtt)) {
1014                 err = PTR_ERR(ppgtt);
1015                 goto out_unlock;
1016         }
1017         GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1018         GEM_BUG_ON(ppgtt->vm.closed);
1019
1020         err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1021
1022         i915_ppgtt_close(&ppgtt->vm);
1023         i915_ppgtt_put(ppgtt);
1024 out_unlock:
1025         mutex_unlock(&dev_priv->drm.struct_mutex);
1026
1027         mock_file_free(dev_priv, file);
1028         return err;
1029 }
1030
1031 static int igt_ppgtt_fill(void *arg)
1032 {
1033         return exercise_ppgtt(arg, fill_hole);
1034 }
1035
1036 static int igt_ppgtt_walk(void *arg)
1037 {
1038         return exercise_ppgtt(arg, walk_hole);
1039 }
1040
1041 static int igt_ppgtt_pot(void *arg)
1042 {
1043         return exercise_ppgtt(arg, pot_hole);
1044 }
1045
1046 static int igt_ppgtt_drunk(void *arg)
1047 {
1048         return exercise_ppgtt(arg, drunk_hole);
1049 }
1050
1051 static int igt_ppgtt_lowlevel(void *arg)
1052 {
1053         return exercise_ppgtt(arg, lowlevel_hole);
1054 }
1055
1056 static int igt_ppgtt_shrink(void *arg)
1057 {
1058         return exercise_ppgtt(arg, shrink_hole);
1059 }
1060
1061 static int igt_ppgtt_shrink_boom(void *arg)
1062 {
1063         return exercise_ppgtt(arg, shrink_boom);
1064 }
1065
1066 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1067 {
1068         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1069         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1070
1071         if (a->start < b->start)
1072                 return -1;
1073         else
1074                 return 1;
1075 }
1076
1077 static int exercise_ggtt(struct drm_i915_private *i915,
1078                          int (*func)(struct drm_i915_private *i915,
1079                                      struct i915_address_space *vm,
1080                                      u64 hole_start, u64 hole_end,
1081                                      unsigned long end_time))
1082 {
1083         struct i915_ggtt *ggtt = &i915->ggtt;
1084         u64 hole_start, hole_end, last = 0;
1085         struct drm_mm_node *node;
1086         IGT_TIMEOUT(end_time);
1087         int err = 0;
1088
1089         mutex_lock(&i915->drm.struct_mutex);
1090 restart:
1091         list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1092         drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1093                 if (hole_start < last)
1094                         continue;
1095
1096                 if (ggtt->vm.mm.color_adjust)
1097                         ggtt->vm.mm.color_adjust(node, 0,
1098                                                  &hole_start, &hole_end);
1099                 if (hole_start >= hole_end)
1100                         continue;
1101
1102                 err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1103                 if (err)
1104                         break;
1105
1106                 /* As we have manipulated the drm_mm, the list may be corrupt */
1107                 last = hole_end;
1108                 goto restart;
1109         }
1110         mutex_unlock(&i915->drm.struct_mutex);
1111
1112         return err;
1113 }
1114
1115 static int igt_ggtt_fill(void *arg)
1116 {
1117         return exercise_ggtt(arg, fill_hole);
1118 }
1119
1120 static int igt_ggtt_walk(void *arg)
1121 {
1122         return exercise_ggtt(arg, walk_hole);
1123 }
1124
1125 static int igt_ggtt_pot(void *arg)
1126 {
1127         return exercise_ggtt(arg, pot_hole);
1128 }
1129
1130 static int igt_ggtt_drunk(void *arg)
1131 {
1132         return exercise_ggtt(arg, drunk_hole);
1133 }
1134
1135 static int igt_ggtt_lowlevel(void *arg)
1136 {
1137         return exercise_ggtt(arg, lowlevel_hole);
1138 }
1139
1140 static int igt_ggtt_page(void *arg)
1141 {
1142         const unsigned int count = PAGE_SIZE/sizeof(u32);
1143         I915_RND_STATE(prng);
1144         struct drm_i915_private *i915 = arg;
1145         struct i915_ggtt *ggtt = &i915->ggtt;
1146         struct drm_i915_gem_object *obj;
1147         struct drm_mm_node tmp;
1148         unsigned int *order, n;
1149         int err;
1150
1151         mutex_lock(&i915->drm.struct_mutex);
1152
1153         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1154         if (IS_ERR(obj)) {
1155                 err = PTR_ERR(obj);
1156                 goto out_unlock;
1157         }
1158
1159         err = i915_gem_object_pin_pages(obj);
1160         if (err)
1161                 goto out_free;
1162
1163         memset(&tmp, 0, sizeof(tmp));
1164         err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1165                                           count * PAGE_SIZE, 0,
1166                                           I915_COLOR_UNEVICTABLE,
1167                                           0, ggtt->mappable_end,
1168                                           DRM_MM_INSERT_LOW);
1169         if (err)
1170                 goto out_unpin;
1171
1172         intel_runtime_pm_get(i915);
1173
1174         for (n = 0; n < count; n++) {
1175                 u64 offset = tmp.start + n * PAGE_SIZE;
1176
1177                 ggtt->vm.insert_page(&ggtt->vm,
1178                                      i915_gem_object_get_dma_address(obj, 0),
1179                                      offset, I915_CACHE_NONE, 0);
1180         }
1181
1182         order = i915_random_order(count, &prng);
1183         if (!order) {
1184                 err = -ENOMEM;
1185                 goto out_remove;
1186         }
1187
1188         for (n = 0; n < count; n++) {
1189                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1190                 u32 __iomem *vaddr;
1191
1192                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1193                 iowrite32(n, vaddr + n);
1194                 io_mapping_unmap_atomic(vaddr);
1195         }
1196         i915_gem_flush_ggtt_writes(i915);
1197
1198         i915_random_reorder(order, count, &prng);
1199         for (n = 0; n < count; n++) {
1200                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1201                 u32 __iomem *vaddr;
1202                 u32 val;
1203
1204                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1205                 val = ioread32(vaddr + n);
1206                 io_mapping_unmap_atomic(vaddr);
1207
1208                 if (val != n) {
1209                         pr_err("insert page failed: found %d, expected %d\n",
1210                                val, n);
1211                         err = -EINVAL;
1212                         break;
1213                 }
1214         }
1215
1216         kfree(order);
1217 out_remove:
1218         ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1219         intel_runtime_pm_put(i915);
1220         drm_mm_remove_node(&tmp);
1221 out_unpin:
1222         i915_gem_object_unpin_pages(obj);
1223 out_free:
1224         i915_gem_object_put(obj);
1225 out_unlock:
1226         mutex_unlock(&i915->drm.struct_mutex);
1227         return err;
1228 }
1229
1230 static void track_vma_bind(struct i915_vma *vma)
1231 {
1232         struct drm_i915_gem_object *obj = vma->obj;
1233
1234         obj->bind_count++; /* track for eviction later */
1235         __i915_gem_object_pin_pages(obj);
1236
1237         vma->pages = obj->mm.pages;
1238         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1239 }
1240
1241 static int exercise_mock(struct drm_i915_private *i915,
1242                          int (*func)(struct drm_i915_private *i915,
1243                                      struct i915_address_space *vm,
1244                                      u64 hole_start, u64 hole_end,
1245                                      unsigned long end_time))
1246 {
1247         const u64 limit = totalram_pages << PAGE_SHIFT;
1248         struct i915_gem_context *ctx;
1249         struct i915_hw_ppgtt *ppgtt;
1250         IGT_TIMEOUT(end_time);
1251         int err;
1252
1253         ctx = mock_context(i915, "mock");
1254         if (!ctx)
1255                 return -ENOMEM;
1256
1257         ppgtt = ctx->ppgtt;
1258         GEM_BUG_ON(!ppgtt);
1259
1260         err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
1261
1262         mock_context_close(ctx);
1263         return err;
1264 }
1265
1266 static int igt_mock_fill(void *arg)
1267 {
1268         return exercise_mock(arg, fill_hole);
1269 }
1270
1271 static int igt_mock_walk(void *arg)
1272 {
1273         return exercise_mock(arg, walk_hole);
1274 }
1275
1276 static int igt_mock_pot(void *arg)
1277 {
1278         return exercise_mock(arg, pot_hole);
1279 }
1280
1281 static int igt_mock_drunk(void *arg)
1282 {
1283         return exercise_mock(arg, drunk_hole);
1284 }
1285
1286 static int igt_gtt_reserve(void *arg)
1287 {
1288         struct drm_i915_private *i915 = arg;
1289         struct drm_i915_gem_object *obj, *on;
1290         LIST_HEAD(objects);
1291         u64 total;
1292         int err = -ENODEV;
1293
1294         /* i915_gem_gtt_reserve() tries to reserve the precise range
1295          * for the node, and evicts if it has to. So our test checks that
1296          * it can give us the requsted space and prevent overlaps.
1297          */
1298
1299         /* Start by filling the GGTT */
1300         for (total = 0;
1301              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1302              total += 2*I915_GTT_PAGE_SIZE) {
1303                 struct i915_vma *vma;
1304
1305                 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1306                 if (IS_ERR(obj)) {
1307                         err = PTR_ERR(obj);
1308                         goto out;
1309                 }
1310
1311                 err = i915_gem_object_pin_pages(obj);
1312                 if (err) {
1313                         i915_gem_object_put(obj);
1314                         goto out;
1315                 }
1316
1317                 list_add(&obj->st_link, &objects);
1318
1319                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1320                 if (IS_ERR(vma)) {
1321                         err = PTR_ERR(vma);
1322                         goto out;
1323                 }
1324
1325                 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1326                                            obj->base.size,
1327                                            total,
1328                                            obj->cache_level,
1329                                            0);
1330                 if (err) {
1331                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1332                                total, i915->ggtt.vm.total, err);
1333                         goto out;
1334                 }
1335                 track_vma_bind(vma);
1336
1337                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1338                 if (vma->node.start != total ||
1339                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1340                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1341                                vma->node.start, vma->node.size,
1342                                total, 2*I915_GTT_PAGE_SIZE);
1343                         err = -EINVAL;
1344                         goto out;
1345                 }
1346         }
1347
1348         /* Now we start forcing evictions */
1349         for (total = I915_GTT_PAGE_SIZE;
1350              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1351              total += 2*I915_GTT_PAGE_SIZE) {
1352                 struct i915_vma *vma;
1353
1354                 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1355                 if (IS_ERR(obj)) {
1356                         err = PTR_ERR(obj);
1357                         goto out;
1358                 }
1359
1360                 err = i915_gem_object_pin_pages(obj);
1361                 if (err) {
1362                         i915_gem_object_put(obj);
1363                         goto out;
1364                 }
1365
1366                 list_add(&obj->st_link, &objects);
1367
1368                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1369                 if (IS_ERR(vma)) {
1370                         err = PTR_ERR(vma);
1371                         goto out;
1372                 }
1373
1374                 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1375                                            obj->base.size,
1376                                            total,
1377                                            obj->cache_level,
1378                                            0);
1379                 if (err) {
1380                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1381                                total, i915->ggtt.vm.total, err);
1382                         goto out;
1383                 }
1384                 track_vma_bind(vma);
1385
1386                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1387                 if (vma->node.start != total ||
1388                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1389                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1390                                vma->node.start, vma->node.size,
1391                                total, 2*I915_GTT_PAGE_SIZE);
1392                         err = -EINVAL;
1393                         goto out;
1394                 }
1395         }
1396
1397         /* And then try at random */
1398         list_for_each_entry_safe(obj, on, &objects, st_link) {
1399                 struct i915_vma *vma;
1400                 u64 offset;
1401
1402                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1403                 if (IS_ERR(vma)) {
1404                         err = PTR_ERR(vma);
1405                         goto out;
1406                 }
1407
1408                 err = i915_vma_unbind(vma);
1409                 if (err) {
1410                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1411                         goto out;
1412                 }
1413
1414                 offset = random_offset(0, i915->ggtt.vm.total,
1415                                        2*I915_GTT_PAGE_SIZE,
1416                                        I915_GTT_MIN_ALIGNMENT);
1417
1418                 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1419                                            obj->base.size,
1420                                            offset,
1421                                            obj->cache_level,
1422                                            0);
1423                 if (err) {
1424                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1425                                total, i915->ggtt.vm.total, err);
1426                         goto out;
1427                 }
1428                 track_vma_bind(vma);
1429
1430                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1431                 if (vma->node.start != offset ||
1432                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1433                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1434                                vma->node.start, vma->node.size,
1435                                offset, 2*I915_GTT_PAGE_SIZE);
1436                         err = -EINVAL;
1437                         goto out;
1438                 }
1439         }
1440
1441 out:
1442         list_for_each_entry_safe(obj, on, &objects, st_link) {
1443                 i915_gem_object_unpin_pages(obj);
1444                 i915_gem_object_put(obj);
1445         }
1446         return err;
1447 }
1448
1449 static int igt_gtt_insert(void *arg)
1450 {
1451         struct drm_i915_private *i915 = arg;
1452         struct drm_i915_gem_object *obj, *on;
1453         struct drm_mm_node tmp = {};
1454         const struct invalid_insert {
1455                 u64 size;
1456                 u64 alignment;
1457                 u64 start, end;
1458         } invalid_insert[] = {
1459                 {
1460                         i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
1461                         0, i915->ggtt.vm.total,
1462                 },
1463                 {
1464                         2*I915_GTT_PAGE_SIZE, 0,
1465                         0, I915_GTT_PAGE_SIZE,
1466                 },
1467                 {
1468                         -(u64)I915_GTT_PAGE_SIZE, 0,
1469                         0, 4*I915_GTT_PAGE_SIZE,
1470                 },
1471                 {
1472                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1473                         0, 4*I915_GTT_PAGE_SIZE,
1474                 },
1475                 {
1476                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1477                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1478                 },
1479                 {}
1480         }, *ii;
1481         LIST_HEAD(objects);
1482         u64 total;
1483         int err = -ENODEV;
1484
1485         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1486          * to the node, evicting if required.
1487          */
1488
1489         /* Check a couple of obviously invalid requests */
1490         for (ii = invalid_insert; ii->size; ii++) {
1491                 err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
1492                                           ii->size, ii->alignment,
1493                                           I915_COLOR_UNEVICTABLE,
1494                                           ii->start, ii->end,
1495                                           0);
1496                 if (err != -ENOSPC) {
1497                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1498                                ii->size, ii->alignment, ii->start, ii->end,
1499                                err);
1500                         return -EINVAL;
1501                 }
1502         }
1503
1504         /* Start by filling the GGTT */
1505         for (total = 0;
1506              total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1507              total += I915_GTT_PAGE_SIZE) {
1508                 struct i915_vma *vma;
1509
1510                 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1511                 if (IS_ERR(obj)) {
1512                         err = PTR_ERR(obj);
1513                         goto out;
1514                 }
1515
1516                 err = i915_gem_object_pin_pages(obj);
1517                 if (err) {
1518                         i915_gem_object_put(obj);
1519                         goto out;
1520                 }
1521
1522                 list_add(&obj->st_link, &objects);
1523
1524                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1525                 if (IS_ERR(vma)) {
1526                         err = PTR_ERR(vma);
1527                         goto out;
1528                 }
1529
1530                 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1531                                           obj->base.size, 0, obj->cache_level,
1532                                           0, i915->ggtt.vm.total,
1533                                           0);
1534                 if (err == -ENOSPC) {
1535                         /* maxed out the GGTT space */
1536                         i915_gem_object_put(obj);
1537                         break;
1538                 }
1539                 if (err) {
1540                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1541                                total, i915->ggtt.vm.total, err);
1542                         goto out;
1543                 }
1544                 track_vma_bind(vma);
1545                 __i915_vma_pin(vma);
1546
1547                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1548         }
1549
1550         list_for_each_entry(obj, &objects, st_link) {
1551                 struct i915_vma *vma;
1552
1553                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1554                 if (IS_ERR(vma)) {
1555                         err = PTR_ERR(vma);
1556                         goto out;
1557                 }
1558
1559                 if (!drm_mm_node_allocated(&vma->node)) {
1560                         pr_err("VMA was unexpectedly evicted!\n");
1561                         err = -EINVAL;
1562                         goto out;
1563                 }
1564
1565                 __i915_vma_unpin(vma);
1566         }
1567
1568         /* If we then reinsert, we should find the same hole */
1569         list_for_each_entry_safe(obj, on, &objects, st_link) {
1570                 struct i915_vma *vma;
1571                 u64 offset;
1572
1573                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1574                 if (IS_ERR(vma)) {
1575                         err = PTR_ERR(vma);
1576                         goto out;
1577                 }
1578
1579                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1580                 offset = vma->node.start;
1581
1582                 err = i915_vma_unbind(vma);
1583                 if (err) {
1584                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1585                         goto out;
1586                 }
1587
1588                 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1589                                           obj->base.size, 0, obj->cache_level,
1590                                           0, i915->ggtt.vm.total,
1591                                           0);
1592                 if (err) {
1593                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1594                                total, i915->ggtt.vm.total, err);
1595                         goto out;
1596                 }
1597                 track_vma_bind(vma);
1598
1599                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1600                 if (vma->node.start != offset) {
1601                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1602                                offset, vma->node.start);
1603                         err = -EINVAL;
1604                         goto out;
1605                 }
1606         }
1607
1608         /* And then force evictions */
1609         for (total = 0;
1610              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1611              total += 2*I915_GTT_PAGE_SIZE) {
1612                 struct i915_vma *vma;
1613
1614                 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1615                 if (IS_ERR(obj)) {
1616                         err = PTR_ERR(obj);
1617                         goto out;
1618                 }
1619
1620                 err = i915_gem_object_pin_pages(obj);
1621                 if (err) {
1622                         i915_gem_object_put(obj);
1623                         goto out;
1624                 }
1625
1626                 list_add(&obj->st_link, &objects);
1627
1628                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1629                 if (IS_ERR(vma)) {
1630                         err = PTR_ERR(vma);
1631                         goto out;
1632                 }
1633
1634                 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1635                                           obj->base.size, 0, obj->cache_level,
1636                                           0, i915->ggtt.vm.total,
1637                                           0);
1638                 if (err) {
1639                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1640                                total, i915->ggtt.vm.total, err);
1641                         goto out;
1642                 }
1643                 track_vma_bind(vma);
1644
1645                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1646         }
1647
1648 out:
1649         list_for_each_entry_safe(obj, on, &objects, st_link) {
1650                 i915_gem_object_unpin_pages(obj);
1651                 i915_gem_object_put(obj);
1652         }
1653         return err;
1654 }
1655
1656 int i915_gem_gtt_mock_selftests(void)
1657 {
1658         static const struct i915_subtest tests[] = {
1659                 SUBTEST(igt_mock_drunk),
1660                 SUBTEST(igt_mock_walk),
1661                 SUBTEST(igt_mock_pot),
1662                 SUBTEST(igt_mock_fill),
1663                 SUBTEST(igt_gtt_reserve),
1664                 SUBTEST(igt_gtt_insert),
1665         };
1666         struct drm_i915_private *i915;
1667         int err;
1668
1669         i915 = mock_gem_device();
1670         if (!i915)
1671                 return -ENOMEM;
1672
1673         mutex_lock(&i915->drm.struct_mutex);
1674         err = i915_subtests(tests, i915);
1675         mutex_unlock(&i915->drm.struct_mutex);
1676
1677         drm_dev_put(&i915->drm);
1678         return err;
1679 }
1680
1681 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1682 {
1683         static const struct i915_subtest tests[] = {
1684                 SUBTEST(igt_ppgtt_alloc),
1685                 SUBTEST(igt_ppgtt_lowlevel),
1686                 SUBTEST(igt_ppgtt_drunk),
1687                 SUBTEST(igt_ppgtt_walk),
1688                 SUBTEST(igt_ppgtt_pot),
1689                 SUBTEST(igt_ppgtt_fill),
1690                 SUBTEST(igt_ppgtt_shrink),
1691                 SUBTEST(igt_ppgtt_shrink_boom),
1692                 SUBTEST(igt_ggtt_lowlevel),
1693                 SUBTEST(igt_ggtt_drunk),
1694                 SUBTEST(igt_ggtt_walk),
1695                 SUBTEST(igt_ggtt_pot),
1696                 SUBTEST(igt_ggtt_fill),
1697                 SUBTEST(igt_ggtt_page),
1698         };
1699
1700         GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
1701
1702         return i915_subtests(tests, i915);
1703 }