Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34
35 static void fake_free_pages(struct drm_i915_gem_object *obj,
36                             struct sg_table *pages)
37 {
38         sg_free_table(pages);
39         kfree(pages);
40 }
41
42 static struct sg_table *
43 fake_get_pages(struct drm_i915_gem_object *obj)
44 {
45 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
46 #define PFN_BIAS 0x1000
47         struct sg_table *pages;
48         struct scatterlist *sg;
49         typeof(obj->base.size) rem;
50
51         pages = kmalloc(sizeof(*pages), GFP);
52         if (!pages)
53                 return ERR_PTR(-ENOMEM);
54
55         rem = round_up(obj->base.size, BIT(31)) >> 31;
56         if (sg_alloc_table(pages, rem, GFP)) {
57                 kfree(pages);
58                 return ERR_PTR(-ENOMEM);
59         }
60
61         rem = obj->base.size;
62         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
63                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
64
65                 GEM_BUG_ON(!len);
66                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
67                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
68                 sg_dma_len(sg) = len;
69
70                 rem -= len;
71         }
72         GEM_BUG_ON(rem);
73
74         obj->mm.madv = I915_MADV_DONTNEED;
75         return pages;
76 #undef GFP
77 }
78
79 static void fake_put_pages(struct drm_i915_gem_object *obj,
80                            struct sg_table *pages)
81 {
82         fake_free_pages(obj, pages);
83         obj->mm.dirty = false;
84         obj->mm.madv = I915_MADV_WILLNEED;
85 }
86
87 static const struct drm_i915_gem_object_ops fake_ops = {
88         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
89         .get_pages = fake_get_pages,
90         .put_pages = fake_put_pages,
91 };
92
93 static struct drm_i915_gem_object *
94 fake_dma_object(struct drm_i915_private *i915, u64 size)
95 {
96         struct drm_i915_gem_object *obj;
97
98         GEM_BUG_ON(!size);
99         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
100
101         if (overflows_type(size, obj->base.size))
102                 return ERR_PTR(-E2BIG);
103
104         obj = i915_gem_object_alloc(i915);
105         if (!obj)
106                 goto err;
107
108         drm_gem_private_object_init(&i915->drm, &obj->base, size);
109         i915_gem_object_init(obj, &fake_ops);
110
111         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
112         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
113         obj->cache_level = I915_CACHE_NONE;
114
115         /* Preallocate the "backing storage" */
116         if (i915_gem_object_pin_pages(obj))
117                 goto err_obj;
118
119         i915_gem_object_unpin_pages(obj);
120         return obj;
121
122 err_obj:
123         i915_gem_object_put(obj);
124 err:
125         return ERR_PTR(-ENOMEM);
126 }
127
128 static int igt_ppgtt_alloc(void *arg)
129 {
130         struct drm_i915_private *dev_priv = arg;
131         struct i915_hw_ppgtt *ppgtt;
132         u64 size, last;
133         int err;
134
135         /* Allocate a ppggt and try to fill the entire range */
136
137         if (!USES_PPGTT(dev_priv))
138                 return 0;
139
140         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
141         if (!ppgtt)
142                 return -ENOMEM;
143
144         mutex_lock(&dev_priv->drm.struct_mutex);
145         err = __hw_ppgtt_init(ppgtt, dev_priv);
146         if (err)
147                 goto err_ppgtt;
148
149         if (!ppgtt->base.allocate_va_range)
150                 goto err_ppgtt_cleanup;
151
152         /* Check we can allocate the entire range */
153         for (size = 4096;
154              size <= ppgtt->base.total;
155              size <<= 2) {
156                 err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
157                 if (err) {
158                         if (err == -ENOMEM) {
159                                 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
160                                         size, ilog2(size));
161                                 err = 0; /* virtual space too large! */
162                         }
163                         goto err_ppgtt_cleanup;
164                 }
165
166                 ppgtt->base.clear_range(&ppgtt->base, 0, size);
167         }
168
169         /* Check we can incrementally allocate the entire range */
170         for (last = 0, size = 4096;
171              size <= ppgtt->base.total;
172              last = size, size <<= 2) {
173                 err = ppgtt->base.allocate_va_range(&ppgtt->base,
174                                                     last, size - last);
175                 if (err) {
176                         if (err == -ENOMEM) {
177                                 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
178                                         last, size - last, ilog2(size));
179                                 err = 0; /* virtual space too large! */
180                         }
181                         goto err_ppgtt_cleanup;
182                 }
183         }
184
185 err_ppgtt_cleanup:
186         ppgtt->base.cleanup(&ppgtt->base);
187 err_ppgtt:
188         mutex_unlock(&dev_priv->drm.struct_mutex);
189         kfree(ppgtt);
190         return err;
191 }
192
193 static int lowlevel_hole(struct drm_i915_private *i915,
194                          struct i915_address_space *vm,
195                          u64 hole_start, u64 hole_end,
196                          unsigned long end_time)
197 {
198         I915_RND_STATE(seed_prng);
199         unsigned int size;
200         struct i915_vma mock_vma;
201
202         memset(&mock_vma, 0, sizeof(struct i915_vma));
203
204         /* Keep creating larger objects until one cannot fit into the hole */
205         for (size = 12; (hole_end - hole_start) >> size; size++) {
206                 I915_RND_SUBSTATE(prng, seed_prng);
207                 struct drm_i915_gem_object *obj;
208                 unsigned int *order, count, n;
209                 u64 hole_size;
210
211                 hole_size = (hole_end - hole_start) >> size;
212                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
213                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
214                 count = hole_size;
215                 do {
216                         count >>= 1;
217                         order = i915_random_order(count, &prng);
218                 } while (!order && count);
219                 if (!order)
220                         break;
221
222                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
223                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
224
225                 /* Ignore allocation failures (i.e. don't report them as
226                  * a test failure) as we are purposefully allocating very
227                  * large objects without checking that we have sufficient
228                  * memory. We expect to hit -ENOMEM.
229                  */
230
231                 obj = fake_dma_object(i915, BIT_ULL(size));
232                 if (IS_ERR(obj)) {
233                         kfree(order);
234                         break;
235                 }
236
237                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
238
239                 if (i915_gem_object_pin_pages(obj)) {
240                         i915_gem_object_put(obj);
241                         kfree(order);
242                         break;
243                 }
244
245                 for (n = 0; n < count; n++) {
246                         u64 addr = hole_start + order[n] * BIT_ULL(size);
247
248                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
249
250                         if (igt_timeout(end_time,
251                                         "%s timed out before %d/%d\n",
252                                         __func__, n, count)) {
253                                 hole_end = hole_start; /* quit */
254                                 break;
255                         }
256
257                         if (vm->allocate_va_range &&
258                             vm->allocate_va_range(vm, addr, BIT_ULL(size)))
259                                 break;
260
261                         mock_vma.pages = obj->mm.pages;
262                         mock_vma.node.size = BIT_ULL(size);
263                         mock_vma.node.start = addr;
264
265                         vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
266                 }
267                 count = n;
268
269                 i915_random_reorder(order, count, &prng);
270                 for (n = 0; n < count; n++) {
271                         u64 addr = hole_start + order[n] * BIT_ULL(size);
272
273                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
274                         vm->clear_range(vm, addr, BIT_ULL(size));
275                 }
276
277                 i915_gem_object_unpin_pages(obj);
278                 i915_gem_object_put(obj);
279
280                 kfree(order);
281         }
282
283         return 0;
284 }
285
286 static void close_object_list(struct list_head *objects,
287                               struct i915_address_space *vm)
288 {
289         struct drm_i915_gem_object *obj, *on;
290         int ignored;
291
292         list_for_each_entry_safe(obj, on, objects, st_link) {
293                 struct i915_vma *vma;
294
295                 vma = i915_vma_instance(obj, vm, NULL);
296                 if (!IS_ERR(vma))
297                         ignored = i915_vma_unbind(vma);
298                 /* Only ppgtt vma may be closed before the object is freed */
299                 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
300                         i915_vma_close(vma);
301
302                 list_del(&obj->st_link);
303                 i915_gem_object_put(obj);
304         }
305 }
306
307 static int fill_hole(struct drm_i915_private *i915,
308                      struct i915_address_space *vm,
309                      u64 hole_start, u64 hole_end,
310                      unsigned long end_time)
311 {
312         const u64 hole_size = hole_end - hole_start;
313         struct drm_i915_gem_object *obj;
314         const unsigned long max_pages =
315                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
316         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
317         unsigned long npages, prime, flags;
318         struct i915_vma *vma;
319         LIST_HEAD(objects);
320         int err;
321
322         /* Try binding many VMA working inwards from either edge */
323
324         flags = PIN_OFFSET_FIXED | PIN_USER;
325         if (i915_is_ggtt(vm))
326                 flags |= PIN_GLOBAL;
327
328         for_each_prime_number_from(prime, 2, max_step) {
329                 for (npages = 1; npages <= max_pages; npages *= prime) {
330                         const u64 full_size = npages << PAGE_SHIFT;
331                         const struct {
332                                 const char *name;
333                                 u64 offset;
334                                 int step;
335                         } phases[] = {
336                                 { "top-down", hole_end, -1, },
337                                 { "bottom-up", hole_start, 1, },
338                                 { }
339                         }, *p;
340
341                         obj = fake_dma_object(i915, full_size);
342                         if (IS_ERR(obj))
343                                 break;
344
345                         list_add(&obj->st_link, &objects);
346
347                         /* Align differing sized objects against the edges, and
348                          * check we don't walk off into the void when binding
349                          * them into the GTT.
350                          */
351                         for (p = phases; p->name; p++) {
352                                 u64 offset;
353
354                                 offset = p->offset;
355                                 list_for_each_entry(obj, &objects, st_link) {
356                                         vma = i915_vma_instance(obj, vm, NULL);
357                                         if (IS_ERR(vma))
358                                                 continue;
359
360                                         if (p->step < 0) {
361                                                 if (offset < hole_start + obj->base.size)
362                                                         break;
363                                                 offset -= obj->base.size;
364                                         }
365
366                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
367                                         if (err) {
368                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
369                                                        __func__, p->name, err, npages, prime, offset);
370                                                 goto err;
371                                         }
372
373                                         if (!drm_mm_node_allocated(&vma->node) ||
374                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
375                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
376                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
377                                                        offset);
378                                                 err = -EINVAL;
379                                                 goto err;
380                                         }
381
382                                         i915_vma_unpin(vma);
383
384                                         if (p->step > 0) {
385                                                 if (offset + obj->base.size > hole_end)
386                                                         break;
387                                                 offset += obj->base.size;
388                                         }
389                                 }
390
391                                 offset = p->offset;
392                                 list_for_each_entry(obj, &objects, st_link) {
393                                         vma = i915_vma_instance(obj, vm, NULL);
394                                         if (IS_ERR(vma))
395                                                 continue;
396
397                                         if (p->step < 0) {
398                                                 if (offset < hole_start + obj->base.size)
399                                                         break;
400                                                 offset -= obj->base.size;
401                                         }
402
403                                         if (!drm_mm_node_allocated(&vma->node) ||
404                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
405                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
406                                                        __func__, p->name, vma->node.start, vma->node.size,
407                                                        offset);
408                                                 err = -EINVAL;
409                                                 goto err;
410                                         }
411
412                                         err = i915_vma_unbind(vma);
413                                         if (err) {
414                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
415                                                        __func__, p->name, vma->node.start, vma->node.size,
416                                                        err);
417                                                 goto err;
418                                         }
419
420                                         if (p->step > 0) {
421                                                 if (offset + obj->base.size > hole_end)
422                                                         break;
423                                                 offset += obj->base.size;
424                                         }
425                                 }
426
427                                 offset = p->offset;
428                                 list_for_each_entry_reverse(obj, &objects, st_link) {
429                                         vma = i915_vma_instance(obj, vm, NULL);
430                                         if (IS_ERR(vma))
431                                                 continue;
432
433                                         if (p->step < 0) {
434                                                 if (offset < hole_start + obj->base.size)
435                                                         break;
436                                                 offset -= obj->base.size;
437                                         }
438
439                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
440                                         if (err) {
441                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
442                                                        __func__, p->name, err, npages, prime, offset);
443                                                 goto err;
444                                         }
445
446                                         if (!drm_mm_node_allocated(&vma->node) ||
447                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
448                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
449                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
450                                                        offset);
451                                                 err = -EINVAL;
452                                                 goto err;
453                                         }
454
455                                         i915_vma_unpin(vma);
456
457                                         if (p->step > 0) {
458                                                 if (offset + obj->base.size > hole_end)
459                                                         break;
460                                                 offset += obj->base.size;
461                                         }
462                                 }
463
464                                 offset = p->offset;
465                                 list_for_each_entry_reverse(obj, &objects, st_link) {
466                                         vma = i915_vma_instance(obj, vm, NULL);
467                                         if (IS_ERR(vma))
468                                                 continue;
469
470                                         if (p->step < 0) {
471                                                 if (offset < hole_start + obj->base.size)
472                                                         break;
473                                                 offset -= obj->base.size;
474                                         }
475
476                                         if (!drm_mm_node_allocated(&vma->node) ||
477                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
478                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
479                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
480                                                        offset);
481                                                 err = -EINVAL;
482                                                 goto err;
483                                         }
484
485                                         err = i915_vma_unbind(vma);
486                                         if (err) {
487                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
488                                                        __func__, p->name, vma->node.start, vma->node.size,
489                                                        err);
490                                                 goto err;
491                                         }
492
493                                         if (p->step > 0) {
494                                                 if (offset + obj->base.size > hole_end)
495                                                         break;
496                                                 offset += obj->base.size;
497                                         }
498                                 }
499                         }
500
501                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
502                                         __func__, npages, prime)) {
503                                 err = -EINTR;
504                                 goto err;
505                         }
506                 }
507
508                 close_object_list(&objects, vm);
509         }
510
511         return 0;
512
513 err:
514         close_object_list(&objects, vm);
515         return err;
516 }
517
518 static int walk_hole(struct drm_i915_private *i915,
519                      struct i915_address_space *vm,
520                      u64 hole_start, u64 hole_end,
521                      unsigned long end_time)
522 {
523         const u64 hole_size = hole_end - hole_start;
524         const unsigned long max_pages =
525                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
526         unsigned long flags;
527         u64 size;
528
529         /* Try binding a single VMA in different positions within the hole */
530
531         flags = PIN_OFFSET_FIXED | PIN_USER;
532         if (i915_is_ggtt(vm))
533                 flags |= PIN_GLOBAL;
534
535         for_each_prime_number_from(size, 1, max_pages) {
536                 struct drm_i915_gem_object *obj;
537                 struct i915_vma *vma;
538                 u64 addr;
539                 int err = 0;
540
541                 obj = fake_dma_object(i915, size << PAGE_SHIFT);
542                 if (IS_ERR(obj))
543                         break;
544
545                 vma = i915_vma_instance(obj, vm, NULL);
546                 if (IS_ERR(vma)) {
547                         err = PTR_ERR(vma);
548                         goto err_put;
549                 }
550
551                 for (addr = hole_start;
552                      addr + obj->base.size < hole_end;
553                      addr += obj->base.size) {
554                         err = i915_vma_pin(vma, 0, 0, addr | flags);
555                         if (err) {
556                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
557                                        __func__, addr, vma->size,
558                                        hole_start, hole_end, err);
559                                 goto err_close;
560                         }
561                         i915_vma_unpin(vma);
562
563                         if (!drm_mm_node_allocated(&vma->node) ||
564                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
565                                 pr_err("%s incorrect at %llx + %llx\n",
566                                        __func__, addr, vma->size);
567                                 err = -EINVAL;
568                                 goto err_close;
569                         }
570
571                         err = i915_vma_unbind(vma);
572                         if (err) {
573                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
574                                        __func__, addr, vma->size, err);
575                                 goto err_close;
576                         }
577
578                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
579
580                         if (igt_timeout(end_time,
581                                         "%s timed out at %llx\n",
582                                         __func__, addr)) {
583                                 err = -EINTR;
584                                 goto err_close;
585                         }
586                 }
587
588 err_close:
589                 if (!i915_vma_is_ggtt(vma))
590                         i915_vma_close(vma);
591 err_put:
592                 i915_gem_object_put(obj);
593                 if (err)
594                         return err;
595         }
596
597         return 0;
598 }
599
600 static int pot_hole(struct drm_i915_private *i915,
601                     struct i915_address_space *vm,
602                     u64 hole_start, u64 hole_end,
603                     unsigned long end_time)
604 {
605         struct drm_i915_gem_object *obj;
606         struct i915_vma *vma;
607         unsigned long flags;
608         unsigned int pot;
609         int err = 0;
610
611         flags = PIN_OFFSET_FIXED | PIN_USER;
612         if (i915_is_ggtt(vm))
613                 flags |= PIN_GLOBAL;
614
615         obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
616         if (IS_ERR(obj))
617                 return PTR_ERR(obj);
618
619         vma = i915_vma_instance(obj, vm, NULL);
620         if (IS_ERR(vma)) {
621                 err = PTR_ERR(vma);
622                 goto err_obj;
623         }
624
625         /* Insert a pair of pages across every pot boundary within the hole */
626         for (pot = fls64(hole_end - 1) - 1;
627              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
628              pot--) {
629                 u64 step = BIT_ULL(pot);
630                 u64 addr;
631
632                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
633                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
634                      addr += step) {
635                         err = i915_vma_pin(vma, 0, 0, addr | flags);
636                         if (err) {
637                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
638                                        __func__,
639                                        addr,
640                                        hole_start, hole_end,
641                                        err);
642                                 goto err;
643                         }
644
645                         if (!drm_mm_node_allocated(&vma->node) ||
646                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
647                                 pr_err("%s incorrect at %llx + %llx\n",
648                                        __func__, addr, vma->size);
649                                 i915_vma_unpin(vma);
650                                 err = i915_vma_unbind(vma);
651                                 err = -EINVAL;
652                                 goto err;
653                         }
654
655                         i915_vma_unpin(vma);
656                         err = i915_vma_unbind(vma);
657                         GEM_BUG_ON(err);
658                 }
659
660                 if (igt_timeout(end_time,
661                                 "%s timed out after %d/%d\n",
662                                 __func__, pot, fls64(hole_end - 1) - 1)) {
663                         err = -EINTR;
664                         goto err;
665                 }
666         }
667
668 err:
669         if (!i915_vma_is_ggtt(vma))
670                 i915_vma_close(vma);
671 err_obj:
672         i915_gem_object_put(obj);
673         return err;
674 }
675
676 static int drunk_hole(struct drm_i915_private *i915,
677                       struct i915_address_space *vm,
678                       u64 hole_start, u64 hole_end,
679                       unsigned long end_time)
680 {
681         I915_RND_STATE(prng);
682         unsigned int size;
683         unsigned long flags;
684
685         flags = PIN_OFFSET_FIXED | PIN_USER;
686         if (i915_is_ggtt(vm))
687                 flags |= PIN_GLOBAL;
688
689         /* Keep creating larger objects until one cannot fit into the hole */
690         for (size = 12; (hole_end - hole_start) >> size; size++) {
691                 struct drm_i915_gem_object *obj;
692                 unsigned int *order, count, n;
693                 struct i915_vma *vma;
694                 u64 hole_size;
695                 int err;
696
697                 hole_size = (hole_end - hole_start) >> size;
698                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
699                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
700                 count = hole_size;
701                 do {
702                         count >>= 1;
703                         order = i915_random_order(count, &prng);
704                 } while (!order && count);
705                 if (!order)
706                         break;
707
708                 /* Ignore allocation failures (i.e. don't report them as
709                  * a test failure) as we are purposefully allocating very
710                  * large objects without checking that we have sufficient
711                  * memory. We expect to hit -ENOMEM.
712                  */
713
714                 obj = fake_dma_object(i915, BIT_ULL(size));
715                 if (IS_ERR(obj)) {
716                         kfree(order);
717                         break;
718                 }
719
720                 vma = i915_vma_instance(obj, vm, NULL);
721                 if (IS_ERR(vma)) {
722                         err = PTR_ERR(vma);
723                         goto err_obj;
724                 }
725
726                 GEM_BUG_ON(vma->size != BIT_ULL(size));
727
728                 for (n = 0; n < count; n++) {
729                         u64 addr = hole_start + order[n] * BIT_ULL(size);
730
731                         err = i915_vma_pin(vma, 0, 0, addr | flags);
732                         if (err) {
733                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
734                                        __func__,
735                                        addr, BIT_ULL(size),
736                                        hole_start, hole_end,
737                                        err);
738                                 goto err;
739                         }
740
741                         if (!drm_mm_node_allocated(&vma->node) ||
742                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
743                                 pr_err("%s incorrect at %llx + %llx\n",
744                                        __func__, addr, BIT_ULL(size));
745                                 i915_vma_unpin(vma);
746                                 err = i915_vma_unbind(vma);
747                                 err = -EINVAL;
748                                 goto err;
749                         }
750
751                         i915_vma_unpin(vma);
752                         err = i915_vma_unbind(vma);
753                         GEM_BUG_ON(err);
754
755                         if (igt_timeout(end_time,
756                                         "%s timed out after %d/%d\n",
757                                         __func__, n, count)) {
758                                 err = -EINTR;
759                                 goto err;
760                         }
761                 }
762
763 err:
764                 if (!i915_vma_is_ggtt(vma))
765                         i915_vma_close(vma);
766 err_obj:
767                 i915_gem_object_put(obj);
768                 kfree(order);
769                 if (err)
770                         return err;
771         }
772
773         return 0;
774 }
775
776 static int __shrink_hole(struct drm_i915_private *i915,
777                          struct i915_address_space *vm,
778                          u64 hole_start, u64 hole_end,
779                          unsigned long end_time)
780 {
781         struct drm_i915_gem_object *obj;
782         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
783         unsigned int order = 12;
784         LIST_HEAD(objects);
785         int err = 0;
786         u64 addr;
787
788         /* Keep creating larger objects until one cannot fit into the hole */
789         for (addr = hole_start; addr < hole_end; ) {
790                 struct i915_vma *vma;
791                 u64 size = BIT_ULL(order++);
792
793                 size = min(size, hole_end - addr);
794                 obj = fake_dma_object(i915, size);
795                 if (IS_ERR(obj)) {
796                         err = PTR_ERR(obj);
797                         break;
798                 }
799
800                 list_add(&obj->st_link, &objects);
801
802                 vma = i915_vma_instance(obj, vm, NULL);
803                 if (IS_ERR(vma)) {
804                         err = PTR_ERR(vma);
805                         break;
806                 }
807
808                 GEM_BUG_ON(vma->size != size);
809
810                 err = i915_vma_pin(vma, 0, 0, addr | flags);
811                 if (err) {
812                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
813                                __func__, addr, size, hole_start, hole_end, err);
814                         break;
815                 }
816
817                 if (!drm_mm_node_allocated(&vma->node) ||
818                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
819                         pr_err("%s incorrect at %llx + %llx\n",
820                                __func__, addr, size);
821                         i915_vma_unpin(vma);
822                         err = i915_vma_unbind(vma);
823                         err = -EINVAL;
824                         break;
825                 }
826
827                 i915_vma_unpin(vma);
828                 addr += size;
829
830                 if (igt_timeout(end_time,
831                                 "%s timed out at ofset %llx [%llx - %llx]\n",
832                                 __func__, addr, hole_start, hole_end)) {
833                         err = -EINTR;
834                         break;
835                 }
836         }
837
838         close_object_list(&objects, vm);
839         return err;
840 }
841
842 static int shrink_hole(struct drm_i915_private *i915,
843                        struct i915_address_space *vm,
844                        u64 hole_start, u64 hole_end,
845                        unsigned long end_time)
846 {
847         unsigned long prime;
848         int err;
849
850         vm->fault_attr.probability = 999;
851         atomic_set(&vm->fault_attr.times, -1);
852
853         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
854                 vm->fault_attr.interval = prime;
855                 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
856                 if (err)
857                         break;
858         }
859
860         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
861
862         return err;
863 }
864
865 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
866                           int (*func)(struct drm_i915_private *i915,
867                                       struct i915_address_space *vm,
868                                       u64 hole_start, u64 hole_end,
869                                       unsigned long end_time))
870 {
871         struct drm_file *file;
872         struct i915_hw_ppgtt *ppgtt;
873         IGT_TIMEOUT(end_time);
874         int err;
875
876         if (!USES_FULL_PPGTT(dev_priv))
877                 return 0;
878
879         file = mock_file(dev_priv);
880         if (IS_ERR(file))
881                 return PTR_ERR(file);
882
883         mutex_lock(&dev_priv->drm.struct_mutex);
884         ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
885         if (IS_ERR(ppgtt)) {
886                 err = PTR_ERR(ppgtt);
887                 goto out_unlock;
888         }
889         GEM_BUG_ON(offset_in_page(ppgtt->base.total));
890         GEM_BUG_ON(ppgtt->base.closed);
891
892         err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
893
894         i915_ppgtt_close(&ppgtt->base);
895         i915_ppgtt_put(ppgtt);
896 out_unlock:
897         mutex_unlock(&dev_priv->drm.struct_mutex);
898
899         mock_file_free(dev_priv, file);
900         return err;
901 }
902
903 static int igt_ppgtt_fill(void *arg)
904 {
905         return exercise_ppgtt(arg, fill_hole);
906 }
907
908 static int igt_ppgtt_walk(void *arg)
909 {
910         return exercise_ppgtt(arg, walk_hole);
911 }
912
913 static int igt_ppgtt_pot(void *arg)
914 {
915         return exercise_ppgtt(arg, pot_hole);
916 }
917
918 static int igt_ppgtt_drunk(void *arg)
919 {
920         return exercise_ppgtt(arg, drunk_hole);
921 }
922
923 static int igt_ppgtt_lowlevel(void *arg)
924 {
925         return exercise_ppgtt(arg, lowlevel_hole);
926 }
927
928 static int igt_ppgtt_shrink(void *arg)
929 {
930         return exercise_ppgtt(arg, shrink_hole);
931 }
932
933 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
934 {
935         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
936         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
937
938         if (a->start < b->start)
939                 return -1;
940         else
941                 return 1;
942 }
943
944 static int exercise_ggtt(struct drm_i915_private *i915,
945                          int (*func)(struct drm_i915_private *i915,
946                                      struct i915_address_space *vm,
947                                      u64 hole_start, u64 hole_end,
948                                      unsigned long end_time))
949 {
950         struct i915_ggtt *ggtt = &i915->ggtt;
951         u64 hole_start, hole_end, last = 0;
952         struct drm_mm_node *node;
953         IGT_TIMEOUT(end_time);
954         int err;
955
956         mutex_lock(&i915->drm.struct_mutex);
957 restart:
958         list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
959         drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
960                 if (hole_start < last)
961                         continue;
962
963                 if (ggtt->base.mm.color_adjust)
964                         ggtt->base.mm.color_adjust(node, 0,
965                                                    &hole_start, &hole_end);
966                 if (hole_start >= hole_end)
967                         continue;
968
969                 err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
970                 if (err)
971                         break;
972
973                 /* As we have manipulated the drm_mm, the list may be corrupt */
974                 last = hole_end;
975                 goto restart;
976         }
977         mutex_unlock(&i915->drm.struct_mutex);
978
979         return err;
980 }
981
982 static int igt_ggtt_fill(void *arg)
983 {
984         return exercise_ggtt(arg, fill_hole);
985 }
986
987 static int igt_ggtt_walk(void *arg)
988 {
989         return exercise_ggtt(arg, walk_hole);
990 }
991
992 static int igt_ggtt_pot(void *arg)
993 {
994         return exercise_ggtt(arg, pot_hole);
995 }
996
997 static int igt_ggtt_drunk(void *arg)
998 {
999         return exercise_ggtt(arg, drunk_hole);
1000 }
1001
1002 static int igt_ggtt_lowlevel(void *arg)
1003 {
1004         return exercise_ggtt(arg, lowlevel_hole);
1005 }
1006
1007 static int igt_ggtt_page(void *arg)
1008 {
1009         const unsigned int count = PAGE_SIZE/sizeof(u32);
1010         I915_RND_STATE(prng);
1011         struct drm_i915_private *i915 = arg;
1012         struct i915_ggtt *ggtt = &i915->ggtt;
1013         struct drm_i915_gem_object *obj;
1014         struct drm_mm_node tmp;
1015         unsigned int *order, n;
1016         int err;
1017
1018         mutex_lock(&i915->drm.struct_mutex);
1019
1020         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1021         if (IS_ERR(obj)) {
1022                 err = PTR_ERR(obj);
1023                 goto out_unlock;
1024         }
1025
1026         err = i915_gem_object_pin_pages(obj);
1027         if (err)
1028                 goto out_free;
1029
1030         memset(&tmp, 0, sizeof(tmp));
1031         err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1032                                           1024 * PAGE_SIZE, 0,
1033                                           I915_COLOR_UNEVICTABLE,
1034                                           0, ggtt->mappable_end,
1035                                           DRM_MM_INSERT_LOW);
1036         if (err)
1037                 goto out_unpin;
1038
1039         order = i915_random_order(count, &prng);
1040         if (!order) {
1041                 err = -ENOMEM;
1042                 goto out_remove;
1043         }
1044
1045         for (n = 0; n < count; n++) {
1046                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1047                 u32 __iomem *vaddr;
1048
1049                 ggtt->base.insert_page(&ggtt->base,
1050                                        i915_gem_object_get_dma_address(obj, 0),
1051                                        offset, I915_CACHE_NONE, 0);
1052
1053                 vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1054                 iowrite32(n, vaddr + n);
1055                 io_mapping_unmap_atomic(vaddr);
1056
1057                 wmb();
1058                 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1059         }
1060
1061         i915_random_reorder(order, count, &prng);
1062         for (n = 0; n < count; n++) {
1063                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1064                 u32 __iomem *vaddr;
1065                 u32 val;
1066
1067                 ggtt->base.insert_page(&ggtt->base,
1068                                        i915_gem_object_get_dma_address(obj, 0),
1069                                        offset, I915_CACHE_NONE, 0);
1070
1071                 vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
1072                 val = ioread32(vaddr + n);
1073                 io_mapping_unmap_atomic(vaddr);
1074
1075                 ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
1076
1077                 if (val != n) {
1078                         pr_err("insert page failed: found %d, expected %d\n",
1079                                val, n);
1080                         err = -EINVAL;
1081                         break;
1082                 }
1083         }
1084
1085         kfree(order);
1086 out_remove:
1087         drm_mm_remove_node(&tmp);
1088 out_unpin:
1089         i915_gem_object_unpin_pages(obj);
1090 out_free:
1091         i915_gem_object_put(obj);
1092 out_unlock:
1093         mutex_unlock(&i915->drm.struct_mutex);
1094         return err;
1095 }
1096
1097 static void track_vma_bind(struct i915_vma *vma)
1098 {
1099         struct drm_i915_gem_object *obj = vma->obj;
1100
1101         obj->bind_count++; /* track for eviction later */
1102         __i915_gem_object_pin_pages(obj);
1103
1104         vma->pages = obj->mm.pages;
1105         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1106 }
1107
1108 static int exercise_mock(struct drm_i915_private *i915,
1109                          int (*func)(struct drm_i915_private *i915,
1110                                      struct i915_address_space *vm,
1111                                      u64 hole_start, u64 hole_end,
1112                                      unsigned long end_time))
1113 {
1114         struct i915_gem_context *ctx;
1115         struct i915_hw_ppgtt *ppgtt;
1116         IGT_TIMEOUT(end_time);
1117         int err;
1118
1119         ctx = mock_context(i915, "mock");
1120         if (!ctx)
1121                 return -ENOMEM;
1122
1123         ppgtt = ctx->ppgtt;
1124         GEM_BUG_ON(!ppgtt);
1125
1126         err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
1127
1128         mock_context_close(ctx);
1129         return err;
1130 }
1131
1132 static int igt_mock_fill(void *arg)
1133 {
1134         return exercise_mock(arg, fill_hole);
1135 }
1136
1137 static int igt_mock_walk(void *arg)
1138 {
1139         return exercise_mock(arg, walk_hole);
1140 }
1141
1142 static int igt_mock_pot(void *arg)
1143 {
1144         return exercise_mock(arg, pot_hole);
1145 }
1146
1147 static int igt_mock_drunk(void *arg)
1148 {
1149         return exercise_mock(arg, drunk_hole);
1150 }
1151
1152 static int igt_gtt_reserve(void *arg)
1153 {
1154         struct drm_i915_private *i915 = arg;
1155         struct drm_i915_gem_object *obj, *on;
1156         LIST_HEAD(objects);
1157         u64 total;
1158         int err;
1159
1160         /* i915_gem_gtt_reserve() tries to reserve the precise range
1161          * for the node, and evicts if it has to. So our test checks that
1162          * it can give us the requsted space and prevent overlaps.
1163          */
1164
1165         /* Start by filling the GGTT */
1166         for (total = 0;
1167              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1168              total += 2*I915_GTT_PAGE_SIZE) {
1169                 struct i915_vma *vma;
1170
1171                 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1172                 if (IS_ERR(obj)) {
1173                         err = PTR_ERR(obj);
1174                         goto out;
1175                 }
1176
1177                 err = i915_gem_object_pin_pages(obj);
1178                 if (err) {
1179                         i915_gem_object_put(obj);
1180                         goto out;
1181                 }
1182
1183                 list_add(&obj->st_link, &objects);
1184
1185                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1186                 if (IS_ERR(vma)) {
1187                         err = PTR_ERR(vma);
1188                         goto out;
1189                 }
1190
1191                 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1192                                            obj->base.size,
1193                                            total,
1194                                            obj->cache_level,
1195                                            0);
1196                 if (err) {
1197                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1198                                total, i915->ggtt.base.total, err);
1199                         goto out;
1200                 }
1201                 track_vma_bind(vma);
1202
1203                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1204                 if (vma->node.start != total ||
1205                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1206                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1207                                vma->node.start, vma->node.size,
1208                                total, 2*I915_GTT_PAGE_SIZE);
1209                         err = -EINVAL;
1210                         goto out;
1211                 }
1212         }
1213
1214         /* Now we start forcing evictions */
1215         for (total = I915_GTT_PAGE_SIZE;
1216              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1217              total += 2*I915_GTT_PAGE_SIZE) {
1218                 struct i915_vma *vma;
1219
1220                 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1221                 if (IS_ERR(obj)) {
1222                         err = PTR_ERR(obj);
1223                         goto out;
1224                 }
1225
1226                 err = i915_gem_object_pin_pages(obj);
1227                 if (err) {
1228                         i915_gem_object_put(obj);
1229                         goto out;
1230                 }
1231
1232                 list_add(&obj->st_link, &objects);
1233
1234                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1235                 if (IS_ERR(vma)) {
1236                         err = PTR_ERR(vma);
1237                         goto out;
1238                 }
1239
1240                 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1241                                            obj->base.size,
1242                                            total,
1243                                            obj->cache_level,
1244                                            0);
1245                 if (err) {
1246                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1247                                total, i915->ggtt.base.total, err);
1248                         goto out;
1249                 }
1250                 track_vma_bind(vma);
1251
1252                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1253                 if (vma->node.start != total ||
1254                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1255                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1256                                vma->node.start, vma->node.size,
1257                                total, 2*I915_GTT_PAGE_SIZE);
1258                         err = -EINVAL;
1259                         goto out;
1260                 }
1261         }
1262
1263         /* And then try at random */
1264         list_for_each_entry_safe(obj, on, &objects, st_link) {
1265                 struct i915_vma *vma;
1266                 u64 offset;
1267
1268                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1269                 if (IS_ERR(vma)) {
1270                         err = PTR_ERR(vma);
1271                         goto out;
1272                 }
1273
1274                 err = i915_vma_unbind(vma);
1275                 if (err) {
1276                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1277                         goto out;
1278                 }
1279
1280                 offset = random_offset(0, i915->ggtt.base.total,
1281                                        2*I915_GTT_PAGE_SIZE,
1282                                        I915_GTT_MIN_ALIGNMENT);
1283
1284                 err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
1285                                            obj->base.size,
1286                                            offset,
1287                                            obj->cache_level,
1288                                            0);
1289                 if (err) {
1290                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1291                                total, i915->ggtt.base.total, err);
1292                         goto out;
1293                 }
1294                 track_vma_bind(vma);
1295
1296                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1297                 if (vma->node.start != offset ||
1298                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1299                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1300                                vma->node.start, vma->node.size,
1301                                offset, 2*I915_GTT_PAGE_SIZE);
1302                         err = -EINVAL;
1303                         goto out;
1304                 }
1305         }
1306
1307 out:
1308         list_for_each_entry_safe(obj, on, &objects, st_link) {
1309                 i915_gem_object_unpin_pages(obj);
1310                 i915_gem_object_put(obj);
1311         }
1312         return err;
1313 }
1314
1315 static int igt_gtt_insert(void *arg)
1316 {
1317         struct drm_i915_private *i915 = arg;
1318         struct drm_i915_gem_object *obj, *on;
1319         struct drm_mm_node tmp = {};
1320         const struct invalid_insert {
1321                 u64 size;
1322                 u64 alignment;
1323                 u64 start, end;
1324         } invalid_insert[] = {
1325                 {
1326                         i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
1327                         0, i915->ggtt.base.total,
1328                 },
1329                 {
1330                         2*I915_GTT_PAGE_SIZE, 0,
1331                         0, I915_GTT_PAGE_SIZE,
1332                 },
1333                 {
1334                         -(u64)I915_GTT_PAGE_SIZE, 0,
1335                         0, 4*I915_GTT_PAGE_SIZE,
1336                 },
1337                 {
1338                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1339                         0, 4*I915_GTT_PAGE_SIZE,
1340                 },
1341                 {
1342                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1343                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1344                 },
1345                 {}
1346         }, *ii;
1347         LIST_HEAD(objects);
1348         u64 total;
1349         int err;
1350
1351         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1352          * to the node, evicting if required.
1353          */
1354
1355         /* Check a couple of obviously invalid requests */
1356         for (ii = invalid_insert; ii->size; ii++) {
1357                 err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
1358                                           ii->size, ii->alignment,
1359                                           I915_COLOR_UNEVICTABLE,
1360                                           ii->start, ii->end,
1361                                           0);
1362                 if (err != -ENOSPC) {
1363                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1364                                ii->size, ii->alignment, ii->start, ii->end,
1365                                err);
1366                         return -EINVAL;
1367                 }
1368         }
1369
1370         /* Start by filling the GGTT */
1371         for (total = 0;
1372              total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1373              total += I915_GTT_PAGE_SIZE) {
1374                 struct i915_vma *vma;
1375
1376                 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1377                 if (IS_ERR(obj)) {
1378                         err = PTR_ERR(obj);
1379                         goto out;
1380                 }
1381
1382                 err = i915_gem_object_pin_pages(obj);
1383                 if (err) {
1384                         i915_gem_object_put(obj);
1385                         goto out;
1386                 }
1387
1388                 list_add(&obj->st_link, &objects);
1389
1390                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1391                 if (IS_ERR(vma)) {
1392                         err = PTR_ERR(vma);
1393                         goto out;
1394                 }
1395
1396                 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1397                                           obj->base.size, 0, obj->cache_level,
1398                                           0, i915->ggtt.base.total,
1399                                           0);
1400                 if (err == -ENOSPC) {
1401                         /* maxed out the GGTT space */
1402                         i915_gem_object_put(obj);
1403                         break;
1404                 }
1405                 if (err) {
1406                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1407                                total, i915->ggtt.base.total, err);
1408                         goto out;
1409                 }
1410                 track_vma_bind(vma);
1411                 __i915_vma_pin(vma);
1412
1413                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1414         }
1415
1416         list_for_each_entry(obj, &objects, st_link) {
1417                 struct i915_vma *vma;
1418
1419                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1420                 if (IS_ERR(vma)) {
1421                         err = PTR_ERR(vma);
1422                         goto out;
1423                 }
1424
1425                 if (!drm_mm_node_allocated(&vma->node)) {
1426                         pr_err("VMA was unexpectedly evicted!\n");
1427                         err = -EINVAL;
1428                         goto out;
1429                 }
1430
1431                 __i915_vma_unpin(vma);
1432         }
1433
1434         /* If we then reinsert, we should find the same hole */
1435         list_for_each_entry_safe(obj, on, &objects, st_link) {
1436                 struct i915_vma *vma;
1437                 u64 offset;
1438
1439                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1440                 if (IS_ERR(vma)) {
1441                         err = PTR_ERR(vma);
1442                         goto out;
1443                 }
1444
1445                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1446                 offset = vma->node.start;
1447
1448                 err = i915_vma_unbind(vma);
1449                 if (err) {
1450                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1451                         goto out;
1452                 }
1453
1454                 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1455                                           obj->base.size, 0, obj->cache_level,
1456                                           0, i915->ggtt.base.total,
1457                                           0);
1458                 if (err) {
1459                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1460                                total, i915->ggtt.base.total, err);
1461                         goto out;
1462                 }
1463                 track_vma_bind(vma);
1464
1465                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1466                 if (vma->node.start != offset) {
1467                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1468                                offset, vma->node.start);
1469                         err = -EINVAL;
1470                         goto out;
1471                 }
1472         }
1473
1474         /* And then force evictions */
1475         for (total = 0;
1476              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
1477              total += 2*I915_GTT_PAGE_SIZE) {
1478                 struct i915_vma *vma;
1479
1480                 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1481                 if (IS_ERR(obj)) {
1482                         err = PTR_ERR(obj);
1483                         goto out;
1484                 }
1485
1486                 err = i915_gem_object_pin_pages(obj);
1487                 if (err) {
1488                         i915_gem_object_put(obj);
1489                         goto out;
1490                 }
1491
1492                 list_add(&obj->st_link, &objects);
1493
1494                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1495                 if (IS_ERR(vma)) {
1496                         err = PTR_ERR(vma);
1497                         goto out;
1498                 }
1499
1500                 err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
1501                                           obj->base.size, 0, obj->cache_level,
1502                                           0, i915->ggtt.base.total,
1503                                           0);
1504                 if (err) {
1505                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1506                                total, i915->ggtt.base.total, err);
1507                         goto out;
1508                 }
1509                 track_vma_bind(vma);
1510
1511                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1512         }
1513
1514 out:
1515         list_for_each_entry_safe(obj, on, &objects, st_link) {
1516                 i915_gem_object_unpin_pages(obj);
1517                 i915_gem_object_put(obj);
1518         }
1519         return err;
1520 }
1521
1522 int i915_gem_gtt_mock_selftests(void)
1523 {
1524         static const struct i915_subtest tests[] = {
1525                 SUBTEST(igt_mock_drunk),
1526                 SUBTEST(igt_mock_walk),
1527                 SUBTEST(igt_mock_pot),
1528                 SUBTEST(igt_mock_fill),
1529                 SUBTEST(igt_gtt_reserve),
1530                 SUBTEST(igt_gtt_insert),
1531         };
1532         struct drm_i915_private *i915;
1533         int err;
1534
1535         i915 = mock_gem_device();
1536         if (!i915)
1537                 return -ENOMEM;
1538
1539         mutex_lock(&i915->drm.struct_mutex);
1540         err = i915_subtests(tests, i915);
1541         mutex_unlock(&i915->drm.struct_mutex);
1542
1543         drm_dev_unref(&i915->drm);
1544         return err;
1545 }
1546
1547 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1548 {
1549         static const struct i915_subtest tests[] = {
1550                 SUBTEST(igt_ppgtt_alloc),
1551                 SUBTEST(igt_ppgtt_lowlevel),
1552                 SUBTEST(igt_ppgtt_drunk),
1553                 SUBTEST(igt_ppgtt_walk),
1554                 SUBTEST(igt_ppgtt_pot),
1555                 SUBTEST(igt_ppgtt_fill),
1556                 SUBTEST(igt_ppgtt_shrink),
1557                 SUBTEST(igt_ggtt_lowlevel),
1558                 SUBTEST(igt_ggtt_drunk),
1559                 SUBTEST(igt_ggtt_walk),
1560                 SUBTEST(igt_ggtt_pot),
1561                 SUBTEST(igt_ggtt_fill),
1562                 SUBTEST(igt_ggtt_page),
1563         };
1564
1565         GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
1566
1567         return i915_subtests(tests, i915);
1568 }