Merge remote-tracking branches 'spi/topic/spidev', 'spi/topic/st-ssc4' and 'spi/topic...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / selftests / i915_gem_context.c
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "../i915_selftest.h"
26
27 #include "mock_drm.h"
28 #include "huge_gem_object.h"
29
30 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
31
32 static struct i915_vma *
33 gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
34 {
35         struct drm_i915_gem_object *obj;
36         const int gen = INTEL_GEN(vma->vm->i915);
37         unsigned long n, size;
38         u32 *cmd;
39         int err;
40
41         GEM_BUG_ON(!igt_can_mi_store_dword_imm(vma->vm->i915));
42
43         size = (4 * count + 1) * sizeof(u32);
44         size = round_up(size, PAGE_SIZE);
45         obj = i915_gem_object_create_internal(vma->vm->i915, size);
46         if (IS_ERR(obj))
47                 return ERR_CAST(obj);
48
49         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
50         if (IS_ERR(cmd)) {
51                 err = PTR_ERR(cmd);
52                 goto err;
53         }
54
55         GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
56         offset += vma->node.start;
57
58         for (n = 0; n < count; n++) {
59                 if (gen >= 8) {
60                         *cmd++ = MI_STORE_DWORD_IMM_GEN4;
61                         *cmd++ = lower_32_bits(offset);
62                         *cmd++ = upper_32_bits(offset);
63                         *cmd++ = value;
64                 } else if (gen >= 4) {
65                         *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
66                                 (gen < 6 ? 1 << 22 : 0);
67                         *cmd++ = 0;
68                         *cmd++ = offset;
69                         *cmd++ = value;
70                 } else {
71                         *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
72                         *cmd++ = offset;
73                         *cmd++ = value;
74                 }
75                 offset += PAGE_SIZE;
76         }
77         *cmd = MI_BATCH_BUFFER_END;
78         i915_gem_object_unpin_map(obj);
79
80         err = i915_gem_object_set_to_gtt_domain(obj, false);
81         if (err)
82                 goto err;
83
84         vma = i915_vma_instance(obj, vma->vm, NULL);
85         if (IS_ERR(vma)) {
86                 err = PTR_ERR(vma);
87                 goto err;
88         }
89
90         err = i915_vma_pin(vma, 0, 0, PIN_USER);
91         if (err)
92                 goto err;
93
94         return vma;
95
96 err:
97         i915_gem_object_put(obj);
98         return ERR_PTR(err);
99 }
100
101 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
102 {
103         return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
104 }
105
106 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
107 {
108         return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
109 }
110
111 static int gpu_fill(struct drm_i915_gem_object *obj,
112                     struct i915_gem_context *ctx,
113                     struct intel_engine_cs *engine,
114                     unsigned int dw)
115 {
116         struct drm_i915_private *i915 = to_i915(obj->base.dev);
117         struct i915_address_space *vm =
118                 ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
119         struct drm_i915_gem_request *rq;
120         struct i915_vma *vma;
121         struct i915_vma *batch;
122         unsigned int flags;
123         int err;
124
125         GEM_BUG_ON(obj->base.size > vm->total);
126
127         vma = i915_vma_instance(obj, vm, NULL);
128         if (IS_ERR(vma))
129                 return PTR_ERR(vma);
130
131         err = i915_gem_object_set_to_gtt_domain(obj, false);
132         if (err)
133                 return err;
134
135         err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
136         if (err)
137                 return err;
138
139         /* Within the GTT the huge objects maps every page onto
140          * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
141          * We set the nth dword within the page using the nth
142          * mapping via the GTT - this should exercise the GTT mapping
143          * whilst checking that each context provides a unique view
144          * into the object.
145          */
146         batch = gpu_fill_dw(vma,
147                             (dw * real_page_count(obj)) << PAGE_SHIFT |
148                             (dw * sizeof(u32)),
149                             real_page_count(obj),
150                             dw);
151         if (IS_ERR(batch)) {
152                 err = PTR_ERR(batch);
153                 goto err_vma;
154         }
155
156         rq = i915_gem_request_alloc(engine, ctx);
157         if (IS_ERR(rq)) {
158                 err = PTR_ERR(rq);
159                 goto err_batch;
160         }
161
162         err = engine->emit_flush(rq, EMIT_INVALIDATE);
163         if (err)
164                 goto err_request;
165
166         err = i915_switch_context(rq);
167         if (err)
168                 goto err_request;
169
170         flags = 0;
171         if (INTEL_GEN(vm->i915) <= 5)
172                 flags |= I915_DISPATCH_SECURE;
173
174         err = engine->emit_bb_start(rq,
175                                     batch->node.start, batch->node.size,
176                                     flags);
177         if (err)
178                 goto err_request;
179
180         i915_vma_move_to_active(batch, rq, 0);
181         i915_gem_object_set_active_reference(batch->obj);
182         i915_vma_unpin(batch);
183         i915_vma_close(batch);
184
185         i915_vma_move_to_active(vma, rq, 0);
186         i915_vma_unpin(vma);
187
188         reservation_object_lock(obj->resv, NULL);
189         reservation_object_add_excl_fence(obj->resv, &rq->fence);
190         reservation_object_unlock(obj->resv);
191
192         __i915_add_request(rq, true);
193
194         return 0;
195
196 err_request:
197         __i915_add_request(rq, false);
198 err_batch:
199         i915_vma_unpin(batch);
200 err_vma:
201         i915_vma_unpin(vma);
202         return err;
203 }
204
205 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
206 {
207         const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
208         unsigned int n, m, need_flush;
209         int err;
210
211         err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
212         if (err)
213                 return err;
214
215         for (n = 0; n < real_page_count(obj); n++) {
216                 u32 *map;
217
218                 map = kmap_atomic(i915_gem_object_get_page(obj, n));
219                 for (m = 0; m < DW_PER_PAGE; m++)
220                         map[m] = value;
221                 if (!has_llc)
222                         drm_clflush_virt_range(map, PAGE_SIZE);
223                 kunmap_atomic(map);
224         }
225
226         i915_gem_obj_finish_shmem_access(obj);
227         obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
228         obj->base.write_domain = 0;
229         return 0;
230 }
231
232 static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
233 {
234         unsigned int n, m, needs_flush;
235         int err;
236
237         err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
238         if (err)
239                 return err;
240
241         for (n = 0; n < real_page_count(obj); n++) {
242                 u32 *map;
243
244                 map = kmap_atomic(i915_gem_object_get_page(obj, n));
245                 if (needs_flush & CLFLUSH_BEFORE)
246                         drm_clflush_virt_range(map, PAGE_SIZE);
247
248                 for (m = 0; m < max; m++) {
249                         if (map[m] != m) {
250                                 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
251                                        n, m, map[m], m);
252                                 err = -EINVAL;
253                                 goto out_unmap;
254                         }
255                 }
256
257                 for (; m < DW_PER_PAGE; m++) {
258                         if (map[m] != 0xdeadbeef) {
259                                 pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
260                                        n, m, map[m], 0xdeadbeef);
261                                 err = -EINVAL;
262                                 goto out_unmap;
263                         }
264                 }
265
266 out_unmap:
267                 kunmap_atomic(map);
268                 if (err)
269                         break;
270         }
271
272         i915_gem_obj_finish_shmem_access(obj);
273         return err;
274 }
275
276 static struct drm_i915_gem_object *
277 create_test_object(struct i915_gem_context *ctx,
278                    struct drm_file *file,
279                    struct list_head *objects)
280 {
281         struct drm_i915_gem_object *obj;
282         struct i915_address_space *vm =
283                 ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
284         u64 size;
285         u32 handle;
286         int err;
287
288         size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
289         size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
290
291         obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
292         if (IS_ERR(obj))
293                 return obj;
294
295         /* tie the handle to the drm_file for easy reaping */
296         err = drm_gem_handle_create(file, &obj->base, &handle);
297         i915_gem_object_put(obj);
298         if (err)
299                 return ERR_PTR(err);
300
301         err = cpu_fill(obj, 0xdeadbeef);
302         if (err) {
303                 pr_err("Failed to fill object with cpu, err=%d\n",
304                        err);
305                 return ERR_PTR(err);
306         }
307
308         list_add_tail(&obj->st_link, objects);
309         return obj;
310 }
311
312 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
313 {
314         unsigned long npages = fake_page_count(obj);
315
316         GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
317         return npages / DW_PER_PAGE;
318 }
319
320 static int igt_ctx_exec(void *arg)
321 {
322         struct drm_i915_private *i915 = arg;
323         struct drm_i915_gem_object *obj = NULL;
324         struct drm_file *file;
325         IGT_TIMEOUT(end_time);
326         LIST_HEAD(objects);
327         unsigned long ncontexts, ndwords, dw;
328         bool first_shared_gtt = true;
329         int err;
330
331         /* Create a few different contexts (with different mm) and write
332          * through each ctx/mm using the GPU making sure those writes end
333          * up in the expected pages of our obj.
334          */
335
336         file = mock_file(i915);
337         if (IS_ERR(file))
338                 return PTR_ERR(file);
339
340         mutex_lock(&i915->drm.struct_mutex);
341
342         ncontexts = 0;
343         ndwords = 0;
344         dw = 0;
345         while (!time_after(jiffies, end_time)) {
346                 struct intel_engine_cs *engine;
347                 struct i915_gem_context *ctx;
348                 unsigned int id;
349
350                 if (first_shared_gtt) {
351                         ctx = __create_hw_context(i915, file->driver_priv);
352                         first_shared_gtt = false;
353                 } else {
354                         ctx = i915_gem_create_context(i915, file->driver_priv);
355                 }
356                 if (IS_ERR(ctx)) {
357                         err = PTR_ERR(ctx);
358                         goto out_unlock;
359                 }
360
361                 for_each_engine(engine, i915, id) {
362                         if (!obj) {
363                                 obj = create_test_object(ctx, file, &objects);
364                                 if (IS_ERR(obj)) {
365                                         err = PTR_ERR(obj);
366                                         goto out_unlock;
367                                 }
368                         }
369
370                         err = gpu_fill(obj, ctx, engine, dw);
371                         if (err) {
372                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
373                                        ndwords, dw, max_dwords(obj),
374                                        engine->name, ctx->hw_id,
375                                        yesno(!!ctx->ppgtt), err);
376                                 goto out_unlock;
377                         }
378
379                         if (++dw == max_dwords(obj)) {
380                                 obj = NULL;
381                                 dw = 0;
382                         }
383                         ndwords++;
384                 }
385                 ncontexts++;
386         }
387         pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
388                 ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
389
390         dw = 0;
391         list_for_each_entry(obj, &objects, st_link) {
392                 unsigned int rem =
393                         min_t(unsigned int, ndwords - dw, max_dwords(obj));
394
395                 err = cpu_check(obj, rem);
396                 if (err)
397                         break;
398
399                 dw += rem;
400         }
401
402 out_unlock:
403         mutex_unlock(&i915->drm.struct_mutex);
404
405         mock_file_free(i915, file);
406         return err;
407 }
408
409 static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
410 {
411         struct drm_i915_gem_object *obj;
412         int err;
413
414         err = i915_gem_init_aliasing_ppgtt(i915);
415         if (err)
416                 return err;
417
418         list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
419                 struct i915_vma *vma;
420
421                 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
422                 if (IS_ERR(vma))
423                         continue;
424
425                 vma->flags &= ~I915_VMA_LOCAL_BIND;
426         }
427
428         return 0;
429 }
430
431 static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
432 {
433         i915_gem_fini_aliasing_ppgtt(i915);
434 }
435
436 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
437 {
438         static const struct i915_subtest tests[] = {
439                 SUBTEST(igt_ctx_exec),
440         };
441         bool fake_alias = false;
442         int err;
443
444         /* Install a fake aliasing gtt for exercise */
445         if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
446                 mutex_lock(&dev_priv->drm.struct_mutex);
447                 err = fake_aliasing_ppgtt_enable(dev_priv);
448                 mutex_unlock(&dev_priv->drm.struct_mutex);
449                 if (err)
450                         return err;
451
452                 GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
453                 fake_alias = true;
454         }
455
456         err = i915_subtests(tests, dev_priv);
457
458         if (fake_alias) {
459                 mutex_lock(&dev_priv->drm.struct_mutex);
460                 fake_aliasing_ppgtt_disable(dev_priv);
461                 mutex_unlock(&dev_priv->drm.struct_mutex);
462         }
463
464         return err;
465 }