Merge tag 'drm-misc-next-2019-01-23' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / selftests / intel_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "../i915_selftest.h"
8
9 #include "igt_flush_test.h"
10 #include "igt_reset.h"
11 #include "igt_spinner.h"
12 #include "igt_wedge_me.h"
13 #include "mock_context.h"
14
15 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
16 struct wa_lists {
17         struct i915_wa_list gt_wa_list;
18         struct {
19                 char name[REF_NAME_MAX];
20                 struct i915_wa_list wa_list;
21         } engine[I915_NUM_ENGINES];
22 };
23
24 static void
25 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
26 {
27         struct intel_engine_cs *engine;
28         enum intel_engine_id id;
29
30         memset(lists, 0, sizeof(*lists));
31
32         wa_init_start(&lists->gt_wa_list, "GT_REF");
33         gt_init_workarounds(i915, &lists->gt_wa_list);
34         wa_init_finish(&lists->gt_wa_list);
35
36         for_each_engine(engine, i915, id) {
37                 struct i915_wa_list *wal = &lists->engine[id].wa_list;
38                 char *name = lists->engine[id].name;
39
40                 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
41
42                 wa_init_start(wal, name);
43                 engine_init_workarounds(engine, wal);
44                 wa_init_finish(wal);
45         }
46 }
47
48 static void
49 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
50 {
51         struct intel_engine_cs *engine;
52         enum intel_engine_id id;
53
54         for_each_engine(engine, i915, id)
55                 intel_wa_list_free(&lists->engine[id].wa_list);
56
57         intel_wa_list_free(&lists->gt_wa_list);
58 }
59
60 static struct drm_i915_gem_object *
61 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
62 {
63         struct drm_i915_gem_object *result;
64         struct i915_request *rq;
65         struct i915_vma *vma;
66         const u32 base = engine->mmio_base;
67         u32 srm, *cs;
68         int err;
69         int i;
70
71         result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
72         if (IS_ERR(result))
73                 return result;
74
75         i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
76
77         cs = i915_gem_object_pin_map(result, I915_MAP_WB);
78         if (IS_ERR(cs)) {
79                 err = PTR_ERR(cs);
80                 goto err_obj;
81         }
82         memset(cs, 0xc5, PAGE_SIZE);
83         i915_gem_object_unpin_map(result);
84
85         vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
86         if (IS_ERR(vma)) {
87                 err = PTR_ERR(vma);
88                 goto err_obj;
89         }
90
91         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
92         if (err)
93                 goto err_obj;
94
95         intel_runtime_pm_get(engine->i915);
96         rq = i915_request_alloc(engine, ctx);
97         intel_runtime_pm_put(engine->i915);
98         if (IS_ERR(rq)) {
99                 err = PTR_ERR(rq);
100                 goto err_pin;
101         }
102
103         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
104         if (err)
105                 goto err_req;
106
107         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
108         if (INTEL_GEN(ctx->i915) >= 8)
109                 srm++;
110
111         cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
112         if (IS_ERR(cs)) {
113                 err = PTR_ERR(cs);
114                 goto err_req;
115         }
116
117         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
118                 *cs++ = srm;
119                 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
120                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
121                 *cs++ = 0;
122         }
123         intel_ring_advance(rq, cs);
124
125         i915_gem_object_get(result);
126         i915_gem_object_set_active_reference(result);
127
128         i915_request_add(rq);
129         i915_vma_unpin(vma);
130
131         return result;
132
133 err_req:
134         i915_request_add(rq);
135 err_pin:
136         i915_vma_unpin(vma);
137 err_obj:
138         i915_gem_object_put(result);
139         return ERR_PTR(err);
140 }
141
142 static u32
143 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
144 {
145         i915_reg_t reg = i < engine->whitelist.count ?
146                          engine->whitelist.list[i].reg :
147                          RING_NOPID(engine->mmio_base);
148
149         return i915_mmio_reg_offset(reg);
150 }
151
152 static void
153 print_results(const struct intel_engine_cs *engine, const u32 *results)
154 {
155         unsigned int i;
156
157         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158                 u32 expected = get_whitelist_reg(engine, i);
159                 u32 actual = results[i];
160
161                 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
162                         i, expected, actual);
163         }
164 }
165
166 static int check_whitelist(struct i915_gem_context *ctx,
167                            struct intel_engine_cs *engine)
168 {
169         struct drm_i915_gem_object *results;
170         struct igt_wedge_me wedge;
171         u32 *vaddr;
172         int err;
173         int i;
174
175         results = read_nonprivs(ctx, engine);
176         if (IS_ERR(results))
177                 return PTR_ERR(results);
178
179         err = 0;
180         igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
181                 err = i915_gem_object_set_to_cpu_domain(results, false);
182         if (i915_terminally_wedged(&ctx->i915->gpu_error))
183                 err = -EIO;
184         if (err)
185                 goto out_put;
186
187         vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
188         if (IS_ERR(vaddr)) {
189                 err = PTR_ERR(vaddr);
190                 goto out_put;
191         }
192
193         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
194                 u32 expected = get_whitelist_reg(engine, i);
195                 u32 actual = vaddr[i];
196
197                 if (expected != actual) {
198                         print_results(engine, vaddr);
199                         pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
200                                i, expected, actual);
201
202                         err = -EINVAL;
203                         break;
204                 }
205         }
206
207         i915_gem_object_unpin_map(results);
208 out_put:
209         i915_gem_object_put(results);
210         return err;
211 }
212
213 static int do_device_reset(struct intel_engine_cs *engine)
214 {
215         set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
216         i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
217         return 0;
218 }
219
220 static int do_engine_reset(struct intel_engine_cs *engine)
221 {
222         return i915_reset_engine(engine, "live_workarounds");
223 }
224
225 static int
226 switch_to_scratch_context(struct intel_engine_cs *engine,
227                           struct igt_spinner *spin)
228 {
229         struct i915_gem_context *ctx;
230         struct i915_request *rq;
231         int err = 0;
232
233         ctx = kernel_context(engine->i915);
234         if (IS_ERR(ctx))
235                 return PTR_ERR(ctx);
236
237         intel_runtime_pm_get(engine->i915);
238
239         if (spin)
240                 rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
241         else
242                 rq = i915_request_alloc(engine, ctx);
243
244         intel_runtime_pm_put(engine->i915);
245
246         kernel_context_close(ctx);
247
248         if (IS_ERR(rq)) {
249                 spin = NULL;
250                 err = PTR_ERR(rq);
251                 goto err;
252         }
253
254         i915_request_add(rq);
255
256         if (spin && !igt_wait_for_spinner(spin, rq)) {
257                 pr_err("Spinner failed to start\n");
258                 err = -ETIMEDOUT;
259         }
260
261 err:
262         if (err && spin)
263                 igt_spinner_end(spin);
264
265         return err;
266 }
267
268 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
269                                         int (*reset)(struct intel_engine_cs *),
270                                         const char *name)
271 {
272         struct drm_i915_private *i915 = engine->i915;
273         bool want_spin = reset == do_engine_reset;
274         struct i915_gem_context *ctx;
275         struct igt_spinner spin;
276         int err;
277
278         pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
279                 engine->whitelist.count, name);
280
281         if (want_spin) {
282                 err = igt_spinner_init(&spin, i915);
283                 if (err)
284                         return err;
285         }
286
287         ctx = kernel_context(i915);
288         if (IS_ERR(ctx))
289                 return PTR_ERR(ctx);
290
291         err = check_whitelist(ctx, engine);
292         if (err) {
293                 pr_err("Invalid whitelist *before* %s reset!\n", name);
294                 goto out;
295         }
296
297         err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
298         if (err)
299                 goto out;
300
301         intel_runtime_pm_get(i915);
302         err = reset(engine);
303         intel_runtime_pm_put(i915);
304
305         if (want_spin) {
306                 igt_spinner_end(&spin);
307                 igt_spinner_fini(&spin);
308         }
309
310         if (err) {
311                 pr_err("%s reset failed\n", name);
312                 goto out;
313         }
314
315         err = check_whitelist(ctx, engine);
316         if (err) {
317                 pr_err("Whitelist not preserved in context across %s reset!\n",
318                        name);
319                 goto out;
320         }
321
322         kernel_context_close(ctx);
323
324         ctx = kernel_context(i915);
325         if (IS_ERR(ctx))
326                 return PTR_ERR(ctx);
327
328         err = check_whitelist(ctx, engine);
329         if (err) {
330                 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
331                        name);
332                 goto out;
333         }
334
335 out:
336         kernel_context_close(ctx);
337         return err;
338 }
339
340 static int live_reset_whitelist(void *arg)
341 {
342         struct drm_i915_private *i915 = arg;
343         struct intel_engine_cs *engine = i915->engine[RCS];
344         int err = 0;
345
346         /* If we reset the gpu, we should not lose the RING_NONPRIV */
347
348         if (!engine || engine->whitelist.count == 0)
349                 return 0;
350
351         igt_global_reset_lock(i915);
352
353         if (intel_has_reset_engine(i915)) {
354                 err = check_whitelist_across_reset(engine,
355                                                    do_engine_reset,
356                                                    "engine");
357                 if (err)
358                         goto out;
359         }
360
361         if (intel_has_gpu_reset(i915)) {
362                 err = check_whitelist_across_reset(engine,
363                                                    do_device_reset,
364                                                    "device");
365                 if (err)
366                         goto out;
367         }
368
369 out:
370         igt_global_reset_unlock(i915);
371         return err;
372 }
373
374 static bool verify_gt_engine_wa(struct drm_i915_private *i915,
375                                 struct wa_lists *lists, const char *str)
376 {
377         struct intel_engine_cs *engine;
378         enum intel_engine_id id;
379         bool ok = true;
380
381         ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
382
383         for_each_engine(engine, i915, id)
384                 ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
385
386         return ok;
387 }
388
389 static int
390 live_gpu_reset_gt_engine_workarounds(void *arg)
391 {
392         struct drm_i915_private *i915 = arg;
393         struct i915_gpu_error *error = &i915->gpu_error;
394         struct wa_lists lists;
395         bool ok;
396
397         if (!intel_has_gpu_reset(i915))
398                 return 0;
399
400         pr_info("Verifying after GPU reset...\n");
401
402         igt_global_reset_lock(i915);
403         intel_runtime_pm_get(i915);
404         reference_lists_init(i915, &lists);
405
406         ok = verify_gt_engine_wa(i915, &lists, "before reset");
407         if (!ok)
408                 goto out;
409
410         set_bit(I915_RESET_HANDOFF, &error->flags);
411         i915_reset(i915, ALL_ENGINES, "live_workarounds");
412
413         ok = verify_gt_engine_wa(i915, &lists, "after reset");
414
415 out:
416         reference_lists_fini(i915, &lists);
417         intel_runtime_pm_put(i915);
418         igt_global_reset_unlock(i915);
419
420         return ok ? 0 : -ESRCH;
421 }
422
423 static int
424 live_engine_reset_gt_engine_workarounds(void *arg)
425 {
426         struct drm_i915_private *i915 = arg;
427         struct intel_engine_cs *engine;
428         struct i915_gem_context *ctx;
429         struct igt_spinner spin;
430         enum intel_engine_id id;
431         struct i915_request *rq;
432         struct wa_lists lists;
433         int ret = 0;
434
435         if (!intel_has_reset_engine(i915))
436                 return 0;
437
438         ctx = kernel_context(i915);
439         if (IS_ERR(ctx))
440                 return PTR_ERR(ctx);
441
442         igt_global_reset_lock(i915);
443         intel_runtime_pm_get(i915);
444         reference_lists_init(i915, &lists);
445
446         for_each_engine(engine, i915, id) {
447                 bool ok;
448
449                 pr_info("Verifying after %s reset...\n", engine->name);
450
451                 ok = verify_gt_engine_wa(i915, &lists, "before reset");
452                 if (!ok) {
453                         ret = -ESRCH;
454                         goto err;
455                 }
456
457                 i915_reset_engine(engine, "live_workarounds");
458
459                 ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
460                 if (!ok) {
461                         ret = -ESRCH;
462                         goto err;
463                 }
464
465                 ret = igt_spinner_init(&spin, i915);
466                 if (ret)
467                         goto err;
468
469                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
470                 if (IS_ERR(rq)) {
471                         ret = PTR_ERR(rq);
472                         igt_spinner_fini(&spin);
473                         goto err;
474                 }
475
476                 i915_request_add(rq);
477
478                 if (!igt_wait_for_spinner(&spin, rq)) {
479                         pr_err("Spinner failed to start\n");
480                         igt_spinner_fini(&spin);
481                         ret = -ETIMEDOUT;
482                         goto err;
483                 }
484
485                 i915_reset_engine(engine, "live_workarounds");
486
487                 igt_spinner_end(&spin);
488                 igt_spinner_fini(&spin);
489
490                 ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
491                 if (!ok) {
492                         ret = -ESRCH;
493                         goto err;
494                 }
495         }
496
497 err:
498         reference_lists_fini(i915, &lists);
499         intel_runtime_pm_put(i915);
500         igt_global_reset_unlock(i915);
501         kernel_context_close(ctx);
502
503         igt_flush_test(i915, I915_WAIT_LOCKED);
504
505         return ret;
506 }
507
508 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
509 {
510         static const struct i915_subtest tests[] = {
511                 SUBTEST(live_reset_whitelist),
512                 SUBTEST(live_gpu_reset_gt_engine_workarounds),
513                 SUBTEST(live_engine_reset_gt_engine_workarounds),
514         };
515         int err;
516
517         if (i915_terminally_wedged(&i915->gpu_error))
518                 return 0;
519
520         mutex_lock(&i915->drm.struct_mutex);
521         err = i915_subtests(tests, i915);
522         mutex_unlock(&i915->drm.struct_mutex);
523
524         return err;
525 }