Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "gem/i915_gem_pm.h"
8 #include "i915_selftest.h"
9 #include "intel_reset.h"
10
11 #include "selftests/igt_flush_test.h"
12 #include "selftests/igt_reset.h"
13 #include "selftests/igt_spinner.h"
14 #include "selftests/igt_wedge_me.h"
15 #include "selftests/mock_drm.h"
16
17 #include "gem/selftests/igt_gem_utils.h"
18 #include "gem/selftests/mock_context.h"
19
20 static const struct wo_register {
21         enum intel_platform platform;
22         u32 reg;
23 } wo_registers[] = {
24         { INTEL_GEMINILAKE, 0x731c }
25 };
26
27 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
28 struct wa_lists {
29         struct i915_wa_list gt_wa_list;
30         struct {
31                 char name[REF_NAME_MAX];
32                 struct i915_wa_list wa_list;
33                 struct i915_wa_list ctx_wa_list;
34         } engine[I915_NUM_ENGINES];
35 };
36
37 static void
38 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
39 {
40         struct intel_engine_cs *engine;
41         enum intel_engine_id id;
42
43         memset(lists, 0, sizeof(*lists));
44
45         wa_init_start(&lists->gt_wa_list, "GT_REF");
46         gt_init_workarounds(i915, &lists->gt_wa_list);
47         wa_init_finish(&lists->gt_wa_list);
48
49         for_each_engine(engine, i915, id) {
50                 struct i915_wa_list *wal = &lists->engine[id].wa_list;
51                 char *name = lists->engine[id].name;
52
53                 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
54
55                 wa_init_start(wal, name);
56                 engine_init_workarounds(engine, wal);
57                 wa_init_finish(wal);
58
59                 snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
60
61                 __intel_engine_init_ctx_wa(engine,
62                                            &lists->engine[id].ctx_wa_list,
63                                            name);
64         }
65 }
66
67 static void
68 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
69 {
70         struct intel_engine_cs *engine;
71         enum intel_engine_id id;
72
73         for_each_engine(engine, i915, id)
74                 intel_wa_list_free(&lists->engine[id].wa_list);
75
76         intel_wa_list_free(&lists->gt_wa_list);
77 }
78
79 static struct drm_i915_gem_object *
80 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
81 {
82         const u32 base = engine->mmio_base;
83         struct drm_i915_gem_object *result;
84         struct i915_request *rq;
85         struct i915_vma *vma;
86         u32 srm, *cs;
87         int err;
88         int i;
89
90         result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
91         if (IS_ERR(result))
92                 return result;
93
94         i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
95
96         cs = i915_gem_object_pin_map(result, I915_MAP_WB);
97         if (IS_ERR(cs)) {
98                 err = PTR_ERR(cs);
99                 goto err_obj;
100         }
101         memset(cs, 0xc5, PAGE_SIZE);
102         i915_gem_object_flush_map(result);
103         i915_gem_object_unpin_map(result);
104
105         vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
106         if (IS_ERR(vma)) {
107                 err = PTR_ERR(vma);
108                 goto err_obj;
109         }
110
111         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
112         if (err)
113                 goto err_obj;
114
115         rq = igt_request_alloc(ctx, engine);
116         if (IS_ERR(rq)) {
117                 err = PTR_ERR(rq);
118                 goto err_pin;
119         }
120
121         i915_vma_lock(vma);
122         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
123         i915_vma_unlock(vma);
124         if (err)
125                 goto err_req;
126
127         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
128         if (INTEL_GEN(ctx->i915) >= 8)
129                 srm++;
130
131         cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
132         if (IS_ERR(cs)) {
133                 err = PTR_ERR(cs);
134                 goto err_req;
135         }
136
137         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
138                 *cs++ = srm;
139                 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
140                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
141                 *cs++ = 0;
142         }
143         intel_ring_advance(rq, cs);
144
145         i915_request_add(rq);
146         i915_vma_unpin(vma);
147
148         return result;
149
150 err_req:
151         i915_request_add(rq);
152 err_pin:
153         i915_vma_unpin(vma);
154 err_obj:
155         i915_gem_object_put(result);
156         return ERR_PTR(err);
157 }
158
159 static u32
160 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
161 {
162         i915_reg_t reg = i < engine->whitelist.count ?
163                          engine->whitelist.list[i].reg :
164                          RING_NOPID(engine->mmio_base);
165
166         return i915_mmio_reg_offset(reg);
167 }
168
169 static void
170 print_results(const struct intel_engine_cs *engine, const u32 *results)
171 {
172         unsigned int i;
173
174         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
175                 u32 expected = get_whitelist_reg(engine, i);
176                 u32 actual = results[i];
177
178                 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
179                         i, expected, actual);
180         }
181 }
182
183 static int check_whitelist(struct i915_gem_context *ctx,
184                            struct intel_engine_cs *engine)
185 {
186         struct drm_i915_gem_object *results;
187         struct igt_wedge_me wedge;
188         u32 *vaddr;
189         int err;
190         int i;
191
192         results = read_nonprivs(ctx, engine);
193         if (IS_ERR(results))
194                 return PTR_ERR(results);
195
196         err = 0;
197         i915_gem_object_lock(results);
198         igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
199                 err = i915_gem_object_set_to_cpu_domain(results, false);
200         i915_gem_object_unlock(results);
201         if (i915_terminally_wedged(ctx->i915))
202                 err = -EIO;
203         if (err)
204                 goto out_put;
205
206         vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
207         if (IS_ERR(vaddr)) {
208                 err = PTR_ERR(vaddr);
209                 goto out_put;
210         }
211
212         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
213                 u32 expected = get_whitelist_reg(engine, i);
214                 u32 actual = vaddr[i];
215
216                 if (expected != actual) {
217                         print_results(engine, vaddr);
218                         pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
219                                i, expected, actual);
220
221                         err = -EINVAL;
222                         break;
223                 }
224         }
225
226         i915_gem_object_unpin_map(results);
227 out_put:
228         i915_gem_object_put(results);
229         return err;
230 }
231
232 static int do_device_reset(struct intel_engine_cs *engine)
233 {
234         i915_reset(engine->i915, engine->mask, "live_workarounds");
235         return 0;
236 }
237
238 static int do_engine_reset(struct intel_engine_cs *engine)
239 {
240         return i915_reset_engine(engine, "live_workarounds");
241 }
242
243 static int
244 switch_to_scratch_context(struct intel_engine_cs *engine,
245                           struct igt_spinner *spin)
246 {
247         struct i915_gem_context *ctx;
248         struct i915_request *rq;
249         intel_wakeref_t wakeref;
250         int err = 0;
251
252         ctx = kernel_context(engine->i915);
253         if (IS_ERR(ctx))
254                 return PTR_ERR(ctx);
255
256         GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
257
258         rq = ERR_PTR(-ENODEV);
259         with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
260                 rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
261
262         kernel_context_close(ctx);
263
264         if (IS_ERR(rq)) {
265                 spin = NULL;
266                 err = PTR_ERR(rq);
267                 goto err;
268         }
269
270         i915_request_add(rq);
271
272         if (spin && !igt_wait_for_spinner(spin, rq)) {
273                 pr_err("Spinner failed to start\n");
274                 err = -ETIMEDOUT;
275         }
276
277 err:
278         if (err && spin)
279                 igt_spinner_end(spin);
280
281         return err;
282 }
283
284 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
285                                         int (*reset)(struct intel_engine_cs *),
286                                         const char *name)
287 {
288         struct drm_i915_private *i915 = engine->i915;
289         struct i915_gem_context *ctx;
290         struct igt_spinner spin;
291         intel_wakeref_t wakeref;
292         int err;
293
294         pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
295                 engine->whitelist.count, name);
296
297         err = igt_spinner_init(&spin, i915);
298         if (err)
299                 return err;
300
301         ctx = kernel_context(i915);
302         if (IS_ERR(ctx))
303                 return PTR_ERR(ctx);
304
305         err = check_whitelist(ctx, engine);
306         if (err) {
307                 pr_err("Invalid whitelist *before* %s reset!\n", name);
308                 goto out;
309         }
310
311         err = switch_to_scratch_context(engine, &spin);
312         if (err)
313                 goto out;
314
315         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
316                 err = reset(engine);
317
318         igt_spinner_end(&spin);
319         igt_spinner_fini(&spin);
320
321         if (err) {
322                 pr_err("%s reset failed\n", name);
323                 goto out;
324         }
325
326         err = check_whitelist(ctx, engine);
327         if (err) {
328                 pr_err("Whitelist not preserved in context across %s reset!\n",
329                        name);
330                 goto out;
331         }
332
333         kernel_context_close(ctx);
334
335         ctx = kernel_context(i915);
336         if (IS_ERR(ctx))
337                 return PTR_ERR(ctx);
338
339         err = check_whitelist(ctx, engine);
340         if (err) {
341                 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
342                        name);
343                 goto out;
344         }
345
346 out:
347         kernel_context_close(ctx);
348         return err;
349 }
350
351 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
352 {
353         struct drm_i915_gem_object *obj;
354         struct i915_vma *vma;
355         int err;
356
357         obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
358         if (IS_ERR(obj))
359                 return ERR_CAST(obj);
360
361         vma = i915_vma_instance(obj, ctx->vm, NULL);
362         if (IS_ERR(vma)) {
363                 err = PTR_ERR(vma);
364                 goto err_obj;
365         }
366
367         err = i915_vma_pin(vma, 0, 0, PIN_USER);
368         if (err)
369                 goto err_obj;
370
371         return vma;
372
373 err_obj:
374         i915_gem_object_put(obj);
375         return ERR_PTR(err);
376 }
377
378 static u32 reg_write(u32 old, u32 new, u32 rsvd)
379 {
380         if (rsvd == 0x0000ffff) {
381                 old &= ~(new >> 16);
382                 old |= new & (new >> 16);
383         } else {
384                 old &= ~rsvd;
385                 old |= new & rsvd;
386         }
387
388         return old;
389 }
390
391 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
392 {
393         enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
394         int i;
395
396         for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
397                 if (wo_registers[i].platform == platform &&
398                     wo_registers[i].reg == reg)
399                         return true;
400         }
401
402         return false;
403 }
404
405 static bool ro_register(u32 reg)
406 {
407         if (reg & RING_FORCE_TO_NONPRIV_RD)
408                 return true;
409
410         return false;
411 }
412
413 static int whitelist_writable_count(struct intel_engine_cs *engine)
414 {
415         int count = engine->whitelist.count;
416         int i;
417
418         for (i = 0; i < engine->whitelist.count; i++) {
419                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
420
421                 if (ro_register(reg))
422                         count--;
423         }
424
425         return count;
426 }
427
428 static int check_dirty_whitelist(struct i915_gem_context *ctx,
429                                  struct intel_engine_cs *engine)
430 {
431         const u32 values[] = {
432                 0x00000000,
433                 0x01010101,
434                 0x10100101,
435                 0x03030303,
436                 0x30300303,
437                 0x05050505,
438                 0x50500505,
439                 0x0f0f0f0f,
440                 0xf00ff00f,
441                 0x10101010,
442                 0xf0f01010,
443                 0x30303030,
444                 0xa0a03030,
445                 0x50505050,
446                 0xc0c05050,
447                 0xf0f0f0f0,
448                 0x11111111,
449                 0x33333333,
450                 0x55555555,
451                 0x0000ffff,
452                 0x00ff00ff,
453                 0xff0000ff,
454                 0xffff00ff,
455                 0xffffffff,
456         };
457         struct i915_vma *scratch;
458         struct i915_vma *batch;
459         int err = 0, i, v;
460         u32 *cs, *results;
461
462         scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
463         if (IS_ERR(scratch))
464                 return PTR_ERR(scratch);
465
466         batch = create_batch(ctx);
467         if (IS_ERR(batch)) {
468                 err = PTR_ERR(batch);
469                 goto out_scratch;
470         }
471
472         for (i = 0; i < engine->whitelist.count; i++) {
473                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
474                 u64 addr = scratch->node.start;
475                 struct i915_request *rq;
476                 u32 srm, lrm, rsvd;
477                 u32 expect;
478                 int idx;
479
480                 if (wo_register(engine, reg))
481                         continue;
482
483                 if (ro_register(reg))
484                         continue;
485
486                 srm = MI_STORE_REGISTER_MEM;
487                 lrm = MI_LOAD_REGISTER_MEM;
488                 if (INTEL_GEN(ctx->i915) >= 8)
489                         lrm++, srm++;
490
491                 pr_debug("%s: Writing garbage to %x\n",
492                          engine->name, reg);
493
494                 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
495                 if (IS_ERR(cs)) {
496                         err = PTR_ERR(cs);
497                         goto out_batch;
498                 }
499
500                 /* SRM original */
501                 *cs++ = srm;
502                 *cs++ = reg;
503                 *cs++ = lower_32_bits(addr);
504                 *cs++ = upper_32_bits(addr);
505
506                 idx = 1;
507                 for (v = 0; v < ARRAY_SIZE(values); v++) {
508                         /* LRI garbage */
509                         *cs++ = MI_LOAD_REGISTER_IMM(1);
510                         *cs++ = reg;
511                         *cs++ = values[v];
512
513                         /* SRM result */
514                         *cs++ = srm;
515                         *cs++ = reg;
516                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
517                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
518                         idx++;
519                 }
520                 for (v = 0; v < ARRAY_SIZE(values); v++) {
521                         /* LRI garbage */
522                         *cs++ = MI_LOAD_REGISTER_IMM(1);
523                         *cs++ = reg;
524                         *cs++ = ~values[v];
525
526                         /* SRM result */
527                         *cs++ = srm;
528                         *cs++ = reg;
529                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
530                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
531                         idx++;
532                 }
533                 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
534
535                 /* LRM original -- don't leave garbage in the context! */
536                 *cs++ = lrm;
537                 *cs++ = reg;
538                 *cs++ = lower_32_bits(addr);
539                 *cs++ = upper_32_bits(addr);
540
541                 *cs++ = MI_BATCH_BUFFER_END;
542
543                 i915_gem_object_flush_map(batch->obj);
544                 i915_gem_object_unpin_map(batch->obj);
545                 i915_gem_chipset_flush(ctx->i915);
546
547                 rq = igt_request_alloc(ctx, engine);
548                 if (IS_ERR(rq)) {
549                         err = PTR_ERR(rq);
550                         goto out_batch;
551                 }
552
553                 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
554                         err = engine->emit_init_breadcrumb(rq);
555                         if (err)
556                                 goto err_request;
557                 }
558
559                 err = engine->emit_bb_start(rq,
560                                             batch->node.start, PAGE_SIZE,
561                                             0);
562                 if (err)
563                         goto err_request;
564
565 err_request:
566                 i915_request_add(rq);
567                 if (err)
568                         goto out_batch;
569
570                 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
571                         pr_err("%s: Futzing %x timedout; cancelling test\n",
572                                engine->name, reg);
573                         i915_gem_set_wedged(ctx->i915);
574                         err = -EIO;
575                         goto out_batch;
576                 }
577
578                 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
579                 if (IS_ERR(results)) {
580                         err = PTR_ERR(results);
581                         goto out_batch;
582                 }
583
584                 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
585                 rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
586                 if (!rsvd) {
587                         pr_err("%s: Unable to write to whitelisted register %x\n",
588                                engine->name, reg);
589                         err = -EINVAL;
590                         goto out_unpin;
591                 }
592
593                 expect = results[0];
594                 idx = 1;
595                 for (v = 0; v < ARRAY_SIZE(values); v++) {
596                         expect = reg_write(expect, values[v], rsvd);
597                         if (results[idx] != expect)
598                                 err++;
599                         idx++;
600                 }
601                 for (v = 0; v < ARRAY_SIZE(values); v++) {
602                         expect = reg_write(expect, ~values[v], rsvd);
603                         if (results[idx] != expect)
604                                 err++;
605                         idx++;
606                 }
607                 if (err) {
608                         pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
609                                engine->name, err, reg);
610
611                         pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
612                                 engine->name, reg, results[0], rsvd);
613
614                         expect = results[0];
615                         idx = 1;
616                         for (v = 0; v < ARRAY_SIZE(values); v++) {
617                                 u32 w = values[v];
618
619                                 expect = reg_write(expect, w, rsvd);
620                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
621                                         w, results[idx], expect);
622                                 idx++;
623                         }
624                         for (v = 0; v < ARRAY_SIZE(values); v++) {
625                                 u32 w = ~values[v];
626
627                                 expect = reg_write(expect, w, rsvd);
628                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
629                                         w, results[idx], expect);
630                                 idx++;
631                         }
632
633                         err = -EINVAL;
634                 }
635 out_unpin:
636                 i915_gem_object_unpin_map(scratch->obj);
637                 if (err)
638                         break;
639         }
640
641         if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
642                 err = -EIO;
643 out_batch:
644         i915_vma_unpin_and_release(&batch, 0);
645 out_scratch:
646         i915_vma_unpin_and_release(&scratch, 0);
647         return err;
648 }
649
650 static int live_dirty_whitelist(void *arg)
651 {
652         struct drm_i915_private *i915 = arg;
653         struct intel_engine_cs *engine;
654         struct i915_gem_context *ctx;
655         enum intel_engine_id id;
656         intel_wakeref_t wakeref;
657         struct drm_file *file;
658         int err = 0;
659
660         /* Can the user write to the whitelisted registers? */
661
662         if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
663                 return 0;
664
665         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
666
667         mutex_unlock(&i915->drm.struct_mutex);
668         file = mock_file(i915);
669         mutex_lock(&i915->drm.struct_mutex);
670         if (IS_ERR(file)) {
671                 err = PTR_ERR(file);
672                 goto out_rpm;
673         }
674
675         ctx = live_context(i915, file);
676         if (IS_ERR(ctx)) {
677                 err = PTR_ERR(ctx);
678                 goto out_file;
679         }
680
681         for_each_engine(engine, i915, id) {
682                 if (engine->whitelist.count == 0)
683                         continue;
684
685                 err = check_dirty_whitelist(ctx, engine);
686                 if (err)
687                         goto out_file;
688         }
689
690 out_file:
691         mutex_unlock(&i915->drm.struct_mutex);
692         mock_file_free(i915, file);
693         mutex_lock(&i915->drm.struct_mutex);
694 out_rpm:
695         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
696         return err;
697 }
698
699 static int live_reset_whitelist(void *arg)
700 {
701         struct drm_i915_private *i915 = arg;
702         struct intel_engine_cs *engine = i915->engine[RCS0];
703         int err = 0;
704
705         /* If we reset the gpu, we should not lose the RING_NONPRIV */
706
707         if (!engine || engine->whitelist.count == 0)
708                 return 0;
709
710         igt_global_reset_lock(i915);
711
712         if (intel_has_reset_engine(i915)) {
713                 err = check_whitelist_across_reset(engine,
714                                                    do_engine_reset,
715                                                    "engine");
716                 if (err)
717                         goto out;
718         }
719
720         if (intel_has_gpu_reset(i915)) {
721                 err = check_whitelist_across_reset(engine,
722                                                    do_device_reset,
723                                                    "device");
724                 if (err)
725                         goto out;
726         }
727
728 out:
729         igt_global_reset_unlock(i915);
730         return err;
731 }
732
733 static int read_whitelisted_registers(struct i915_gem_context *ctx,
734                                       struct intel_engine_cs *engine,
735                                       struct i915_vma *results)
736 {
737         struct i915_request *rq;
738         int i, err = 0;
739         u32 srm, *cs;
740
741         rq = igt_request_alloc(ctx, engine);
742         if (IS_ERR(rq))
743                 return PTR_ERR(rq);
744
745         srm = MI_STORE_REGISTER_MEM;
746         if (INTEL_GEN(ctx->i915) >= 8)
747                 srm++;
748
749         cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
750         if (IS_ERR(cs)) {
751                 err = PTR_ERR(cs);
752                 goto err_req;
753         }
754
755         for (i = 0; i < engine->whitelist.count; i++) {
756                 u64 offset = results->node.start + sizeof(u32) * i;
757                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
758
759                 /* Clear RD only and WR only flags */
760                 reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR);
761
762                 *cs++ = srm;
763                 *cs++ = reg;
764                 *cs++ = lower_32_bits(offset);
765                 *cs++ = upper_32_bits(offset);
766         }
767         intel_ring_advance(rq, cs);
768
769 err_req:
770         i915_request_add(rq);
771
772         if (i915_request_wait(rq, 0, HZ / 5) < 0)
773                 err = -EIO;
774
775         return err;
776 }
777
778 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
779                                        struct intel_engine_cs *engine)
780 {
781         struct i915_request *rq;
782         struct i915_vma *batch;
783         int i, err = 0;
784         u32 *cs;
785
786         batch = create_batch(ctx);
787         if (IS_ERR(batch))
788                 return PTR_ERR(batch);
789
790         cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
791         if (IS_ERR(cs)) {
792                 err = PTR_ERR(cs);
793                 goto err_batch;
794         }
795
796         *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
797         for (i = 0; i < engine->whitelist.count; i++) {
798                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
799
800                 if (ro_register(reg))
801                         continue;
802
803                 *cs++ = reg;
804                 *cs++ = 0xffffffff;
805         }
806         *cs++ = MI_BATCH_BUFFER_END;
807
808         i915_gem_object_flush_map(batch->obj);
809         i915_gem_chipset_flush(ctx->i915);
810
811         rq = igt_request_alloc(ctx, engine);
812         if (IS_ERR(rq)) {
813                 err = PTR_ERR(rq);
814                 goto err_unpin;
815         }
816
817         if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
818                 err = engine->emit_init_breadcrumb(rq);
819                 if (err)
820                         goto err_request;
821         }
822
823         /* Perform the writes from an unprivileged "user" batch */
824         err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
825
826 err_request:
827         i915_request_add(rq);
828         if (i915_request_wait(rq, 0, HZ / 5) < 0)
829                 err = -EIO;
830
831 err_unpin:
832         i915_gem_object_unpin_map(batch->obj);
833 err_batch:
834         i915_vma_unpin_and_release(&batch, 0);
835         return err;
836 }
837
838 struct regmask {
839         i915_reg_t reg;
840         unsigned long gen_mask;
841 };
842
843 static bool find_reg(struct drm_i915_private *i915,
844                      i915_reg_t reg,
845                      const struct regmask *tbl,
846                      unsigned long count)
847 {
848         u32 offset = i915_mmio_reg_offset(reg);
849
850         while (count--) {
851                 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
852                     i915_mmio_reg_offset(tbl->reg) == offset)
853                         return true;
854                 tbl++;
855         }
856
857         return false;
858 }
859
860 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
861 {
862         /* Alas, we must pardon some whitelists. Mistakes already made */
863         static const struct regmask pardon[] = {
864                 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
865                 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
866         };
867
868         return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
869 }
870
871 static bool result_eq(struct intel_engine_cs *engine,
872                       u32 a, u32 b, i915_reg_t reg)
873 {
874         if (a != b && !pardon_reg(engine->i915, reg)) {
875                 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
876                        i915_mmio_reg_offset(reg), a, b);
877                 return false;
878         }
879
880         return true;
881 }
882
883 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
884 {
885         /* Some registers do not seem to behave and our writes unreadable */
886         static const struct regmask wo[] = {
887                 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
888         };
889
890         return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
891 }
892
893 static bool result_neq(struct intel_engine_cs *engine,
894                        u32 a, u32 b, i915_reg_t reg)
895 {
896         if (a == b && !writeonly_reg(engine->i915, reg)) {
897                 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
898                        i915_mmio_reg_offset(reg), a);
899                 return false;
900         }
901
902         return true;
903 }
904
905 static int
906 check_whitelisted_registers(struct intel_engine_cs *engine,
907                             struct i915_vma *A,
908                             struct i915_vma *B,
909                             bool (*fn)(struct intel_engine_cs *engine,
910                                        u32 a, u32 b,
911                                        i915_reg_t reg))
912 {
913         u32 *a, *b;
914         int i, err;
915
916         a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
917         if (IS_ERR(a))
918                 return PTR_ERR(a);
919
920         b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
921         if (IS_ERR(b)) {
922                 err = PTR_ERR(b);
923                 goto err_a;
924         }
925
926         err = 0;
927         for (i = 0; i < engine->whitelist.count; i++) {
928                 const struct i915_wa *wa = &engine->whitelist.list[i];
929
930                 if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
931                         continue;
932
933                 if (!fn(engine, a[i], b[i], wa->reg))
934                         err = -EINVAL;
935         }
936
937         i915_gem_object_unpin_map(B->obj);
938 err_a:
939         i915_gem_object_unpin_map(A->obj);
940         return err;
941 }
942
943 static int live_isolated_whitelist(void *arg)
944 {
945         struct drm_i915_private *i915 = arg;
946         struct {
947                 struct i915_gem_context *ctx;
948                 struct i915_vma *scratch[2];
949         } client[2] = {};
950         struct intel_engine_cs *engine;
951         enum intel_engine_id id;
952         int i, err = 0;
953
954         /*
955          * Check that a write into a whitelist register works, but
956          * invisible to a second context.
957          */
958
959         if (!intel_engines_has_context_isolation(i915))
960                 return 0;
961
962         if (!i915->kernel_context->vm)
963                 return 0;
964
965         for (i = 0; i < ARRAY_SIZE(client); i++) {
966                 struct i915_gem_context *c;
967
968                 c = kernel_context(i915);
969                 if (IS_ERR(c)) {
970                         err = PTR_ERR(c);
971                         goto err;
972                 }
973
974                 client[i].scratch[0] = create_scratch(c->vm, 1024);
975                 if (IS_ERR(client[i].scratch[0])) {
976                         err = PTR_ERR(client[i].scratch[0]);
977                         kernel_context_close(c);
978                         goto err;
979                 }
980
981                 client[i].scratch[1] = create_scratch(c->vm, 1024);
982                 if (IS_ERR(client[i].scratch[1])) {
983                         err = PTR_ERR(client[i].scratch[1]);
984                         i915_vma_unpin_and_release(&client[i].scratch[0], 0);
985                         kernel_context_close(c);
986                         goto err;
987                 }
988
989                 client[i].ctx = c;
990         }
991
992         for_each_engine(engine, i915, id) {
993                 if (!whitelist_writable_count(engine))
994                         continue;
995
996                 /* Read default values */
997                 err = read_whitelisted_registers(client[0].ctx, engine,
998                                                  client[0].scratch[0]);
999                 if (err)
1000                         goto err;
1001
1002                 /* Try to overwrite registers (should only affect ctx0) */
1003                 err = scrub_whitelisted_registers(client[0].ctx, engine);
1004                 if (err)
1005                         goto err;
1006
1007                 /* Read values from ctx1, we expect these to be defaults */
1008                 err = read_whitelisted_registers(client[1].ctx, engine,
1009                                                  client[1].scratch[0]);
1010                 if (err)
1011                         goto err;
1012
1013                 /* Verify that both reads return the same default values */
1014                 err = check_whitelisted_registers(engine,
1015                                                   client[0].scratch[0],
1016                                                   client[1].scratch[0],
1017                                                   result_eq);
1018                 if (err)
1019                         goto err;
1020
1021                 /* Read back the updated values in ctx0 */
1022                 err = read_whitelisted_registers(client[0].ctx, engine,
1023                                                  client[0].scratch[1]);
1024                 if (err)
1025                         goto err;
1026
1027                 /* User should be granted privilege to overwhite regs */
1028                 err = check_whitelisted_registers(engine,
1029                                                   client[0].scratch[0],
1030                                                   client[0].scratch[1],
1031                                                   result_neq);
1032                 if (err)
1033                         goto err;
1034         }
1035
1036 err:
1037         for (i = 0; i < ARRAY_SIZE(client); i++) {
1038                 if (!client[i].ctx)
1039                         break;
1040
1041                 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1042                 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1043                 kernel_context_close(client[i].ctx);
1044         }
1045
1046         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1047                 err = -EIO;
1048
1049         return err;
1050 }
1051
1052 static bool
1053 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1054                 const char *str)
1055 {
1056         struct drm_i915_private *i915 = ctx->i915;
1057         struct i915_gem_engines_iter it;
1058         struct intel_context *ce;
1059         bool ok = true;
1060
1061         ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1062
1063         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1064                 enum intel_engine_id id = ce->engine->id;
1065
1066                 ok &= engine_wa_list_verify(ce,
1067                                             &lists->engine[id].wa_list,
1068                                             str) == 0;
1069
1070                 ok &= engine_wa_list_verify(ce,
1071                                             &lists->engine[id].ctx_wa_list,
1072                                             str) == 0;
1073         }
1074         i915_gem_context_unlock_engines(ctx);
1075
1076         return ok;
1077 }
1078
1079 static int
1080 live_gpu_reset_workarounds(void *arg)
1081 {
1082         struct drm_i915_private *i915 = arg;
1083         struct i915_gem_context *ctx;
1084         intel_wakeref_t wakeref;
1085         struct wa_lists lists;
1086         bool ok;
1087
1088         if (!intel_has_gpu_reset(i915))
1089                 return 0;
1090
1091         ctx = kernel_context(i915);
1092         if (IS_ERR(ctx))
1093                 return PTR_ERR(ctx);
1094
1095         pr_info("Verifying after GPU reset...\n");
1096
1097         igt_global_reset_lock(i915);
1098         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1099
1100         reference_lists_init(i915, &lists);
1101
1102         ok = verify_wa_lists(ctx, &lists, "before reset");
1103         if (!ok)
1104                 goto out;
1105
1106         i915_reset(i915, ALL_ENGINES, "live_workarounds");
1107
1108         ok = verify_wa_lists(ctx, &lists, "after reset");
1109
1110 out:
1111         kernel_context_close(ctx);
1112         reference_lists_fini(i915, &lists);
1113         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1114         igt_global_reset_unlock(i915);
1115
1116         return ok ? 0 : -ESRCH;
1117 }
1118
1119 static int
1120 live_engine_reset_workarounds(void *arg)
1121 {
1122         struct drm_i915_private *i915 = arg;
1123         struct intel_engine_cs *engine;
1124         struct i915_gem_context *ctx;
1125         struct igt_spinner spin;
1126         enum intel_engine_id id;
1127         struct i915_request *rq;
1128         intel_wakeref_t wakeref;
1129         struct wa_lists lists;
1130         int ret = 0;
1131
1132         if (!intel_has_reset_engine(i915))
1133                 return 0;
1134
1135         ctx = kernel_context(i915);
1136         if (IS_ERR(ctx))
1137                 return PTR_ERR(ctx);
1138
1139         igt_global_reset_lock(i915);
1140         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1141
1142         reference_lists_init(i915, &lists);
1143
1144         for_each_engine(engine, i915, id) {
1145                 bool ok;
1146
1147                 pr_info("Verifying after %s reset...\n", engine->name);
1148
1149                 ok = verify_wa_lists(ctx, &lists, "before reset");
1150                 if (!ok) {
1151                         ret = -ESRCH;
1152                         goto err;
1153                 }
1154
1155                 i915_reset_engine(engine, "live_workarounds");
1156
1157                 ok = verify_wa_lists(ctx, &lists, "after idle reset");
1158                 if (!ok) {
1159                         ret = -ESRCH;
1160                         goto err;
1161                 }
1162
1163                 ret = igt_spinner_init(&spin, i915);
1164                 if (ret)
1165                         goto err;
1166
1167                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
1168                 if (IS_ERR(rq)) {
1169                         ret = PTR_ERR(rq);
1170                         igt_spinner_fini(&spin);
1171                         goto err;
1172                 }
1173
1174                 i915_request_add(rq);
1175
1176                 if (!igt_wait_for_spinner(&spin, rq)) {
1177                         pr_err("Spinner failed to start\n");
1178                         igt_spinner_fini(&spin);
1179                         ret = -ETIMEDOUT;
1180                         goto err;
1181                 }
1182
1183                 i915_reset_engine(engine, "live_workarounds");
1184
1185                 igt_spinner_end(&spin);
1186                 igt_spinner_fini(&spin);
1187
1188                 ok = verify_wa_lists(ctx, &lists, "after busy reset");
1189                 if (!ok) {
1190                         ret = -ESRCH;
1191                         goto err;
1192                 }
1193         }
1194
1195 err:
1196         reference_lists_fini(i915, &lists);
1197         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1198         igt_global_reset_unlock(i915);
1199         kernel_context_close(ctx);
1200
1201         igt_flush_test(i915, I915_WAIT_LOCKED);
1202
1203         return ret;
1204 }
1205
1206 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1207 {
1208         static const struct i915_subtest tests[] = {
1209                 SUBTEST(live_dirty_whitelist),
1210                 SUBTEST(live_reset_whitelist),
1211                 SUBTEST(live_isolated_whitelist),
1212                 SUBTEST(live_gpu_reset_workarounds),
1213                 SUBTEST(live_engine_reset_workarounds),
1214         };
1215         int err;
1216
1217         if (i915_terminally_wedged(i915))
1218                 return 0;
1219
1220         mutex_lock(&i915->drm.struct_mutex);
1221         err = i915_subtests(tests, i915);
1222         mutex_unlock(&i915->drm.struct_mutex);
1223
1224         return err;
1225 }