Merge tag 'drm-misc-next-2020-02-10' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
12
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
17
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
20
21 static const struct wo_register {
22         enum intel_platform platform;
23         u32 reg;
24 } wo_registers[] = {
25         { INTEL_GEMINILAKE, 0x731c }
26 };
27
28 struct wa_lists {
29         struct i915_wa_list gt_wa_list;
30         struct {
31                 struct i915_wa_list wa_list;
32                 struct i915_wa_list ctx_wa_list;
33         } engine[I915_NUM_ENGINES];
34 };
35
36 static int request_add_sync(struct i915_request *rq, int err)
37 {
38         i915_request_get(rq);
39         i915_request_add(rq);
40         if (i915_request_wait(rq, 0, HZ / 5) < 0)
41                 err = -EIO;
42         i915_request_put(rq);
43
44         return err;
45 }
46
47 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
48 {
49         int err = 0;
50
51         i915_request_get(rq);
52         i915_request_add(rq);
53         if (spin && !igt_wait_for_spinner(spin, rq))
54                 err = -ETIMEDOUT;
55         i915_request_put(rq);
56
57         return err;
58 }
59
60 static void
61 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
62 {
63         struct intel_engine_cs *engine;
64         enum intel_engine_id id;
65
66         memset(lists, 0, sizeof(*lists));
67
68         wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
69         gt_init_workarounds(gt->i915, &lists->gt_wa_list);
70         wa_init_finish(&lists->gt_wa_list);
71
72         for_each_engine(engine, gt, id) {
73                 struct i915_wa_list *wal = &lists->engine[id].wa_list;
74
75                 wa_init_start(wal, "REF", engine->name);
76                 engine_init_workarounds(engine, wal);
77                 wa_init_finish(wal);
78
79                 __intel_engine_init_ctx_wa(engine,
80                                            &lists->engine[id].ctx_wa_list,
81                                            "CTX_REF");
82         }
83 }
84
85 static void
86 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
87 {
88         struct intel_engine_cs *engine;
89         enum intel_engine_id id;
90
91         for_each_engine(engine, gt, id)
92                 intel_wa_list_free(&lists->engine[id].wa_list);
93
94         intel_wa_list_free(&lists->gt_wa_list);
95 }
96
97 static struct drm_i915_gem_object *
98 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
99 {
100         const u32 base = engine->mmio_base;
101         struct drm_i915_gem_object *result;
102         struct i915_request *rq;
103         struct i915_vma *vma;
104         u32 srm, *cs;
105         int err;
106         int i;
107
108         result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
109         if (IS_ERR(result))
110                 return result;
111
112         i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
113
114         cs = i915_gem_object_pin_map(result, I915_MAP_WB);
115         if (IS_ERR(cs)) {
116                 err = PTR_ERR(cs);
117                 goto err_obj;
118         }
119         memset(cs, 0xc5, PAGE_SIZE);
120         i915_gem_object_flush_map(result);
121         i915_gem_object_unpin_map(result);
122
123         vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
124         if (IS_ERR(vma)) {
125                 err = PTR_ERR(vma);
126                 goto err_obj;
127         }
128
129         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130         if (err)
131                 goto err_obj;
132
133         rq = igt_request_alloc(ctx, engine);
134         if (IS_ERR(rq)) {
135                 err = PTR_ERR(rq);
136                 goto err_pin;
137         }
138
139         i915_vma_lock(vma);
140         err = i915_request_await_object(rq, vma->obj, true);
141         if (err == 0)
142                 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
143         i915_vma_unlock(vma);
144         if (err)
145                 goto err_req;
146
147         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
148         if (INTEL_GEN(ctx->i915) >= 8)
149                 srm++;
150
151         cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
152         if (IS_ERR(cs)) {
153                 err = PTR_ERR(cs);
154                 goto err_req;
155         }
156
157         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
158                 *cs++ = srm;
159                 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
160                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
161                 *cs++ = 0;
162         }
163         intel_ring_advance(rq, cs);
164
165         i915_request_add(rq);
166         i915_vma_unpin(vma);
167
168         return result;
169
170 err_req:
171         i915_request_add(rq);
172 err_pin:
173         i915_vma_unpin(vma);
174 err_obj:
175         i915_gem_object_put(result);
176         return ERR_PTR(err);
177 }
178
179 static u32
180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
181 {
182         i915_reg_t reg = i < engine->whitelist.count ?
183                          engine->whitelist.list[i].reg :
184                          RING_NOPID(engine->mmio_base);
185
186         return i915_mmio_reg_offset(reg);
187 }
188
189 static void
190 print_results(const struct intel_engine_cs *engine, const u32 *results)
191 {
192         unsigned int i;
193
194         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195                 u32 expected = get_whitelist_reg(engine, i);
196                 u32 actual = results[i];
197
198                 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199                         i, expected, actual);
200         }
201 }
202
203 static int check_whitelist(struct i915_gem_context *ctx,
204                            struct intel_engine_cs *engine)
205 {
206         struct drm_i915_gem_object *results;
207         struct intel_wedge_me wedge;
208         u32 *vaddr;
209         int err;
210         int i;
211
212         results = read_nonprivs(ctx, engine);
213         if (IS_ERR(results))
214                 return PTR_ERR(results);
215
216         err = 0;
217         i915_gem_object_lock(results);
218         intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
219                 err = i915_gem_object_set_to_cpu_domain(results, false);
220         i915_gem_object_unlock(results);
221         if (intel_gt_is_wedged(engine->gt))
222                 err = -EIO;
223         if (err)
224                 goto out_put;
225
226         vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
227         if (IS_ERR(vaddr)) {
228                 err = PTR_ERR(vaddr);
229                 goto out_put;
230         }
231
232         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
233                 u32 expected = get_whitelist_reg(engine, i);
234                 u32 actual = vaddr[i];
235
236                 if (expected != actual) {
237                         print_results(engine, vaddr);
238                         pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239                                i, expected, actual);
240
241                         err = -EINVAL;
242                         break;
243                 }
244         }
245
246         i915_gem_object_unpin_map(results);
247 out_put:
248         i915_gem_object_put(results);
249         return err;
250 }
251
252 static int do_device_reset(struct intel_engine_cs *engine)
253 {
254         intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
255         return 0;
256 }
257
258 static int do_engine_reset(struct intel_engine_cs *engine)
259 {
260         return intel_engine_reset(engine, "live_workarounds");
261 }
262
263 static int
264 switch_to_scratch_context(struct intel_engine_cs *engine,
265                           struct igt_spinner *spin)
266 {
267         struct intel_context *ce;
268         struct i915_request *rq;
269         int err = 0;
270
271         ce = intel_context_create(engine);
272         if (IS_ERR(ce))
273                 return PTR_ERR(ce);
274
275         rq = igt_spinner_create_request(spin, ce, MI_NOOP);
276         intel_context_put(ce);
277
278         if (IS_ERR(rq)) {
279                 spin = NULL;
280                 err = PTR_ERR(rq);
281                 goto err;
282         }
283
284         err = request_add_spin(rq, spin);
285 err:
286         if (err && spin)
287                 igt_spinner_end(spin);
288
289         return err;
290 }
291
292 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
293                                         int (*reset)(struct intel_engine_cs *),
294                                         const char *name)
295 {
296         struct drm_i915_private *i915 = engine->i915;
297         struct i915_gem_context *ctx, *tmp;
298         struct igt_spinner spin;
299         intel_wakeref_t wakeref;
300         int err;
301
302         pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303                 engine->whitelist.count, engine->name, name);
304
305         ctx = kernel_context(i915);
306         if (IS_ERR(ctx))
307                 return PTR_ERR(ctx);
308
309         err = igt_spinner_init(&spin, engine->gt);
310         if (err)
311                 goto out_ctx;
312
313         err = check_whitelist(ctx, engine);
314         if (err) {
315                 pr_err("Invalid whitelist *before* %s reset!\n", name);
316                 goto out_spin;
317         }
318
319         err = switch_to_scratch_context(engine, &spin);
320         if (err)
321                 goto out_spin;
322
323         with_intel_runtime_pm(engine->uncore->rpm, wakeref)
324                 err = reset(engine);
325
326         igt_spinner_end(&spin);
327
328         if (err) {
329                 pr_err("%s reset failed\n", name);
330                 goto out_spin;
331         }
332
333         err = check_whitelist(ctx, engine);
334         if (err) {
335                 pr_err("Whitelist not preserved in context across %s reset!\n",
336                        name);
337                 goto out_spin;
338         }
339
340         tmp = kernel_context(i915);
341         if (IS_ERR(tmp)) {
342                 err = PTR_ERR(tmp);
343                 goto out_spin;
344         }
345         kernel_context_close(ctx);
346         ctx = tmp;
347
348         err = check_whitelist(ctx, engine);
349         if (err) {
350                 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
351                        name);
352                 goto out_spin;
353         }
354
355 out_spin:
356         igt_spinner_fini(&spin);
357 out_ctx:
358         kernel_context_close(ctx);
359         return err;
360 }
361
362 static struct i915_vma *create_batch(struct i915_address_space *vm)
363 {
364         struct drm_i915_gem_object *obj;
365         struct i915_vma *vma;
366         int err;
367
368         obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
369         if (IS_ERR(obj))
370                 return ERR_CAST(obj);
371
372         vma = i915_vma_instance(obj, vm, NULL);
373         if (IS_ERR(vma)) {
374                 err = PTR_ERR(vma);
375                 goto err_obj;
376         }
377
378         err = i915_vma_pin(vma, 0, 0, PIN_USER);
379         if (err)
380                 goto err_obj;
381
382         return vma;
383
384 err_obj:
385         i915_gem_object_put(obj);
386         return ERR_PTR(err);
387 }
388
389 static u32 reg_write(u32 old, u32 new, u32 rsvd)
390 {
391         if (rsvd == 0x0000ffff) {
392                 old &= ~(new >> 16);
393                 old |= new & (new >> 16);
394         } else {
395                 old &= ~rsvd;
396                 old |= new & rsvd;
397         }
398
399         return old;
400 }
401
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
403 {
404         enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
405         int i;
406
407         if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
408              RING_FORCE_TO_NONPRIV_ACCESS_WR)
409                 return true;
410
411         for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
412                 if (wo_registers[i].platform == platform &&
413                     wo_registers[i].reg == reg)
414                         return true;
415         }
416
417         return false;
418 }
419
420 static bool ro_register(u32 reg)
421 {
422         if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
423              RING_FORCE_TO_NONPRIV_ACCESS_RD)
424                 return true;
425
426         return false;
427 }
428
429 static int whitelist_writable_count(struct intel_engine_cs *engine)
430 {
431         int count = engine->whitelist.count;
432         int i;
433
434         for (i = 0; i < engine->whitelist.count; i++) {
435                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
436
437                 if (ro_register(reg))
438                         count--;
439         }
440
441         return count;
442 }
443
444 static int check_dirty_whitelist(struct intel_context *ce)
445 {
446         const u32 values[] = {
447                 0x00000000,
448                 0x01010101,
449                 0x10100101,
450                 0x03030303,
451                 0x30300303,
452                 0x05050505,
453                 0x50500505,
454                 0x0f0f0f0f,
455                 0xf00ff00f,
456                 0x10101010,
457                 0xf0f01010,
458                 0x30303030,
459                 0xa0a03030,
460                 0x50505050,
461                 0xc0c05050,
462                 0xf0f0f0f0,
463                 0x11111111,
464                 0x33333333,
465                 0x55555555,
466                 0x0000ffff,
467                 0x00ff00ff,
468                 0xff0000ff,
469                 0xffff00ff,
470                 0xffffffff,
471         };
472         struct intel_engine_cs *engine = ce->engine;
473         struct i915_vma *scratch;
474         struct i915_vma *batch;
475         int err = 0, i, v;
476         u32 *cs, *results;
477
478         scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
479         if (IS_ERR(scratch))
480                 return PTR_ERR(scratch);
481
482         batch = create_batch(ce->vm);
483         if (IS_ERR(batch)) {
484                 err = PTR_ERR(batch);
485                 goto out_scratch;
486         }
487
488         for (i = 0; i < engine->whitelist.count; i++) {
489                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
490                 u64 addr = scratch->node.start;
491                 struct i915_request *rq;
492                 u32 srm, lrm, rsvd;
493                 u32 expect;
494                 int idx;
495                 bool ro_reg;
496
497                 if (wo_register(engine, reg))
498                         continue;
499
500                 ro_reg = ro_register(reg);
501
502                 /* Clear non priv flags */
503                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
504
505                 srm = MI_STORE_REGISTER_MEM;
506                 lrm = MI_LOAD_REGISTER_MEM;
507                 if (INTEL_GEN(engine->i915) >= 8)
508                         lrm++, srm++;
509
510                 pr_debug("%s: Writing garbage to %x\n",
511                          engine->name, reg);
512
513                 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
514                 if (IS_ERR(cs)) {
515                         err = PTR_ERR(cs);
516                         goto out_batch;
517                 }
518
519                 /* SRM original */
520                 *cs++ = srm;
521                 *cs++ = reg;
522                 *cs++ = lower_32_bits(addr);
523                 *cs++ = upper_32_bits(addr);
524
525                 idx = 1;
526                 for (v = 0; v < ARRAY_SIZE(values); v++) {
527                         /* LRI garbage */
528                         *cs++ = MI_LOAD_REGISTER_IMM(1);
529                         *cs++ = reg;
530                         *cs++ = values[v];
531
532                         /* SRM result */
533                         *cs++ = srm;
534                         *cs++ = reg;
535                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
536                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
537                         idx++;
538                 }
539                 for (v = 0; v < ARRAY_SIZE(values); v++) {
540                         /* LRI garbage */
541                         *cs++ = MI_LOAD_REGISTER_IMM(1);
542                         *cs++ = reg;
543                         *cs++ = ~values[v];
544
545                         /* SRM result */
546                         *cs++ = srm;
547                         *cs++ = reg;
548                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
549                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
550                         idx++;
551                 }
552                 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
553
554                 /* LRM original -- don't leave garbage in the context! */
555                 *cs++ = lrm;
556                 *cs++ = reg;
557                 *cs++ = lower_32_bits(addr);
558                 *cs++ = upper_32_bits(addr);
559
560                 *cs++ = MI_BATCH_BUFFER_END;
561
562                 i915_gem_object_flush_map(batch->obj);
563                 i915_gem_object_unpin_map(batch->obj);
564                 intel_gt_chipset_flush(engine->gt);
565
566                 rq = intel_context_create_request(ce);
567                 if (IS_ERR(rq)) {
568                         err = PTR_ERR(rq);
569                         goto out_batch;
570                 }
571
572                 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
573                         err = engine->emit_init_breadcrumb(rq);
574                         if (err)
575                                 goto err_request;
576                 }
577
578                 i915_vma_lock(batch);
579                 err = i915_request_await_object(rq, batch->obj, false);
580                 if (err == 0)
581                         err = i915_vma_move_to_active(batch, rq, 0);
582                 i915_vma_unlock(batch);
583                 if (err)
584                         goto err_request;
585
586                 err = engine->emit_bb_start(rq,
587                                             batch->node.start, PAGE_SIZE,
588                                             0);
589                 if (err)
590                         goto err_request;
591
592 err_request:
593                 err = request_add_sync(rq, err);
594                 if (err) {
595                         pr_err("%s: Futzing %x timedout; cancelling test\n",
596                                engine->name, reg);
597                         intel_gt_set_wedged(engine->gt);
598                         goto out_batch;
599                 }
600
601                 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
602                 if (IS_ERR(results)) {
603                         err = PTR_ERR(results);
604                         goto out_batch;
605                 }
606
607                 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
608                 if (!ro_reg) {
609                         /* detect write masking */
610                         rsvd = results[ARRAY_SIZE(values)];
611                         if (!rsvd) {
612                                 pr_err("%s: Unable to write to whitelisted register %x\n",
613                                        engine->name, reg);
614                                 err = -EINVAL;
615                                 goto out_unpin;
616                         }
617                 }
618
619                 expect = results[0];
620                 idx = 1;
621                 for (v = 0; v < ARRAY_SIZE(values); v++) {
622                         if (ro_reg)
623                                 expect = results[0];
624                         else
625                                 expect = reg_write(expect, values[v], rsvd);
626
627                         if (results[idx] != expect)
628                                 err++;
629                         idx++;
630                 }
631                 for (v = 0; v < ARRAY_SIZE(values); v++) {
632                         if (ro_reg)
633                                 expect = results[0];
634                         else
635                                 expect = reg_write(expect, ~values[v], rsvd);
636
637                         if (results[idx] != expect)
638                                 err++;
639                         idx++;
640                 }
641                 if (err) {
642                         pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
643                                engine->name, err, reg);
644
645                         if (ro_reg)
646                                 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
647                                         engine->name, reg, results[0]);
648                         else
649                                 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
650                                         engine->name, reg, results[0], rsvd);
651
652                         expect = results[0];
653                         idx = 1;
654                         for (v = 0; v < ARRAY_SIZE(values); v++) {
655                                 u32 w = values[v];
656
657                                 if (ro_reg)
658                                         expect = results[0];
659                                 else
660                                         expect = reg_write(expect, w, rsvd);
661                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
662                                         w, results[idx], expect);
663                                 idx++;
664                         }
665                         for (v = 0; v < ARRAY_SIZE(values); v++) {
666                                 u32 w = ~values[v];
667
668                                 if (ro_reg)
669                                         expect = results[0];
670                                 else
671                                         expect = reg_write(expect, w, rsvd);
672                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
673                                         w, results[idx], expect);
674                                 idx++;
675                         }
676
677                         err = -EINVAL;
678                 }
679 out_unpin:
680                 i915_gem_object_unpin_map(scratch->obj);
681                 if (err)
682                         break;
683         }
684
685         if (igt_flush_test(engine->i915))
686                 err = -EIO;
687 out_batch:
688         i915_vma_unpin_and_release(&batch, 0);
689 out_scratch:
690         i915_vma_unpin_and_release(&scratch, 0);
691         return err;
692 }
693
694 static int live_dirty_whitelist(void *arg)
695 {
696         struct intel_gt *gt = arg;
697         struct intel_engine_cs *engine;
698         enum intel_engine_id id;
699
700         /* Can the user write to the whitelisted registers? */
701
702         if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
703                 return 0;
704
705         for_each_engine(engine, gt, id) {
706                 struct intel_context *ce;
707                 int err;
708
709                 if (engine->whitelist.count == 0)
710                         continue;
711
712                 ce = intel_context_create(engine);
713                 if (IS_ERR(ce))
714                         return PTR_ERR(ce);
715
716                 err = check_dirty_whitelist(ce);
717                 intel_context_put(ce);
718                 if (err)
719                         return err;
720         }
721
722         return 0;
723 }
724
725 static int live_reset_whitelist(void *arg)
726 {
727         struct intel_gt *gt = arg;
728         struct intel_engine_cs *engine;
729         enum intel_engine_id id;
730         int err = 0;
731
732         /* If we reset the gpu, we should not lose the RING_NONPRIV */
733         igt_global_reset_lock(gt);
734
735         for_each_engine(engine, gt, id) {
736                 if (engine->whitelist.count == 0)
737                         continue;
738
739                 if (intel_has_reset_engine(gt)) {
740                         err = check_whitelist_across_reset(engine,
741                                                            do_engine_reset,
742                                                            "engine");
743                         if (err)
744                                 goto out;
745                 }
746
747                 if (intel_has_gpu_reset(gt)) {
748                         err = check_whitelist_across_reset(engine,
749                                                            do_device_reset,
750                                                            "device");
751                         if (err)
752                                 goto out;
753                 }
754         }
755
756 out:
757         igt_global_reset_unlock(gt);
758         return err;
759 }
760
761 static int read_whitelisted_registers(struct i915_gem_context *ctx,
762                                       struct intel_engine_cs *engine,
763                                       struct i915_vma *results)
764 {
765         struct i915_request *rq;
766         int i, err = 0;
767         u32 srm, *cs;
768
769         rq = igt_request_alloc(ctx, engine);
770         if (IS_ERR(rq))
771                 return PTR_ERR(rq);
772
773         i915_vma_lock(results);
774         err = i915_request_await_object(rq, results->obj, true);
775         if (err == 0)
776                 err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
777         i915_vma_unlock(results);
778         if (err)
779                 goto err_req;
780
781         srm = MI_STORE_REGISTER_MEM;
782         if (INTEL_GEN(ctx->i915) >= 8)
783                 srm++;
784
785         cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
786         if (IS_ERR(cs)) {
787                 err = PTR_ERR(cs);
788                 goto err_req;
789         }
790
791         for (i = 0; i < engine->whitelist.count; i++) {
792                 u64 offset = results->node.start + sizeof(u32) * i;
793                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
794
795                 /* Clear non priv flags */
796                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
797
798                 *cs++ = srm;
799                 *cs++ = reg;
800                 *cs++ = lower_32_bits(offset);
801                 *cs++ = upper_32_bits(offset);
802         }
803         intel_ring_advance(rq, cs);
804
805 err_req:
806         return request_add_sync(rq, err);
807 }
808
809 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
810                                        struct intel_engine_cs *engine)
811 {
812         struct i915_address_space *vm;
813         struct i915_request *rq;
814         struct i915_vma *batch;
815         int i, err = 0;
816         u32 *cs;
817
818         vm = i915_gem_context_get_vm_rcu(ctx);
819         batch = create_batch(vm);
820         i915_vm_put(vm);
821         if (IS_ERR(batch))
822                 return PTR_ERR(batch);
823
824         cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
825         if (IS_ERR(cs)) {
826                 err = PTR_ERR(cs);
827                 goto err_batch;
828         }
829
830         *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
831         for (i = 0; i < engine->whitelist.count; i++) {
832                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
833
834                 if (ro_register(reg))
835                         continue;
836
837                 /* Clear non priv flags */
838                 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
839
840                 *cs++ = reg;
841                 *cs++ = 0xffffffff;
842         }
843         *cs++ = MI_BATCH_BUFFER_END;
844
845         i915_gem_object_flush_map(batch->obj);
846         intel_gt_chipset_flush(engine->gt);
847
848         rq = igt_request_alloc(ctx, engine);
849         if (IS_ERR(rq)) {
850                 err = PTR_ERR(rq);
851                 goto err_unpin;
852         }
853
854         if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
855                 err = engine->emit_init_breadcrumb(rq);
856                 if (err)
857                         goto err_request;
858         }
859
860         i915_vma_lock(batch);
861         err = i915_request_await_object(rq, batch->obj, false);
862         if (err == 0)
863                 err = i915_vma_move_to_active(batch, rq, 0);
864         i915_vma_unlock(batch);
865         if (err)
866                 goto err_request;
867
868         /* Perform the writes from an unprivileged "user" batch */
869         err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
870
871 err_request:
872         err = request_add_sync(rq, err);
873
874 err_unpin:
875         i915_gem_object_unpin_map(batch->obj);
876 err_batch:
877         i915_vma_unpin_and_release(&batch, 0);
878         return err;
879 }
880
881 struct regmask {
882         i915_reg_t reg;
883         unsigned long gen_mask;
884 };
885
886 static bool find_reg(struct drm_i915_private *i915,
887                      i915_reg_t reg,
888                      const struct regmask *tbl,
889                      unsigned long count)
890 {
891         u32 offset = i915_mmio_reg_offset(reg);
892
893         while (count--) {
894                 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
895                     i915_mmio_reg_offset(tbl->reg) == offset)
896                         return true;
897                 tbl++;
898         }
899
900         return false;
901 }
902
903 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
904 {
905         /* Alas, we must pardon some whitelists. Mistakes already made */
906         static const struct regmask pardon[] = {
907                 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
908                 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
909         };
910
911         return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
912 }
913
914 static bool result_eq(struct intel_engine_cs *engine,
915                       u32 a, u32 b, i915_reg_t reg)
916 {
917         if (a != b && !pardon_reg(engine->i915, reg)) {
918                 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
919                        i915_mmio_reg_offset(reg), a, b);
920                 return false;
921         }
922
923         return true;
924 }
925
926 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
927 {
928         /* Some registers do not seem to behave and our writes unreadable */
929         static const struct regmask wo[] = {
930                 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
931         };
932
933         return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
934 }
935
936 static bool result_neq(struct intel_engine_cs *engine,
937                        u32 a, u32 b, i915_reg_t reg)
938 {
939         if (a == b && !writeonly_reg(engine->i915, reg)) {
940                 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
941                        i915_mmio_reg_offset(reg), a);
942                 return false;
943         }
944
945         return true;
946 }
947
948 static int
949 check_whitelisted_registers(struct intel_engine_cs *engine,
950                             struct i915_vma *A,
951                             struct i915_vma *B,
952                             bool (*fn)(struct intel_engine_cs *engine,
953                                        u32 a, u32 b,
954                                        i915_reg_t reg))
955 {
956         u32 *a, *b;
957         int i, err;
958
959         a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
960         if (IS_ERR(a))
961                 return PTR_ERR(a);
962
963         b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
964         if (IS_ERR(b)) {
965                 err = PTR_ERR(b);
966                 goto err_a;
967         }
968
969         err = 0;
970         for (i = 0; i < engine->whitelist.count; i++) {
971                 const struct i915_wa *wa = &engine->whitelist.list[i];
972
973                 if (i915_mmio_reg_offset(wa->reg) &
974                     RING_FORCE_TO_NONPRIV_ACCESS_RD)
975                         continue;
976
977                 if (!fn(engine, a[i], b[i], wa->reg))
978                         err = -EINVAL;
979         }
980
981         i915_gem_object_unpin_map(B->obj);
982 err_a:
983         i915_gem_object_unpin_map(A->obj);
984         return err;
985 }
986
987 static int live_isolated_whitelist(void *arg)
988 {
989         struct intel_gt *gt = arg;
990         struct {
991                 struct i915_gem_context *ctx;
992                 struct i915_vma *scratch[2];
993         } client[2] = {};
994         struct intel_engine_cs *engine;
995         enum intel_engine_id id;
996         int i, err = 0;
997
998         /*
999          * Check that a write into a whitelist register works, but
1000          * invisible to a second context.
1001          */
1002
1003         if (!intel_engines_has_context_isolation(gt->i915))
1004                 return 0;
1005
1006         for (i = 0; i < ARRAY_SIZE(client); i++) {
1007                 struct i915_address_space *vm;
1008                 struct i915_gem_context *c;
1009
1010                 c = kernel_context(gt->i915);
1011                 if (IS_ERR(c)) {
1012                         err = PTR_ERR(c);
1013                         goto err;
1014                 }
1015
1016                 vm = i915_gem_context_get_vm_rcu(c);
1017
1018                 client[i].scratch[0] = create_scratch(vm, 1024);
1019                 if (IS_ERR(client[i].scratch[0])) {
1020                         err = PTR_ERR(client[i].scratch[0]);
1021                         i915_vm_put(vm);
1022                         kernel_context_close(c);
1023                         goto err;
1024                 }
1025
1026                 client[i].scratch[1] = create_scratch(vm, 1024);
1027                 if (IS_ERR(client[i].scratch[1])) {
1028                         err = PTR_ERR(client[i].scratch[1]);
1029                         i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1030                         i915_vm_put(vm);
1031                         kernel_context_close(c);
1032                         goto err;
1033                 }
1034
1035                 client[i].ctx = c;
1036                 i915_vm_put(vm);
1037         }
1038
1039         for_each_engine(engine, gt, id) {
1040                 if (!engine->kernel_context->vm)
1041                         continue;
1042
1043                 if (!whitelist_writable_count(engine))
1044                         continue;
1045
1046                 /* Read default values */
1047                 err = read_whitelisted_registers(client[0].ctx, engine,
1048                                                  client[0].scratch[0]);
1049                 if (err)
1050                         goto err;
1051
1052                 /* Try to overwrite registers (should only affect ctx0) */
1053                 err = scrub_whitelisted_registers(client[0].ctx, engine);
1054                 if (err)
1055                         goto err;
1056
1057                 /* Read values from ctx1, we expect these to be defaults */
1058                 err = read_whitelisted_registers(client[1].ctx, engine,
1059                                                  client[1].scratch[0]);
1060                 if (err)
1061                         goto err;
1062
1063                 /* Verify that both reads return the same default values */
1064                 err = check_whitelisted_registers(engine,
1065                                                   client[0].scratch[0],
1066                                                   client[1].scratch[0],
1067                                                   result_eq);
1068                 if (err)
1069                         goto err;
1070
1071                 /* Read back the updated values in ctx0 */
1072                 err = read_whitelisted_registers(client[0].ctx, engine,
1073                                                  client[0].scratch[1]);
1074                 if (err)
1075                         goto err;
1076
1077                 /* User should be granted privilege to overwhite regs */
1078                 err = check_whitelisted_registers(engine,
1079                                                   client[0].scratch[0],
1080                                                   client[0].scratch[1],
1081                                                   result_neq);
1082                 if (err)
1083                         goto err;
1084         }
1085
1086 err:
1087         for (i = 0; i < ARRAY_SIZE(client); i++) {
1088                 if (!client[i].ctx)
1089                         break;
1090
1091                 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1092                 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1093                 kernel_context_close(client[i].ctx);
1094         }
1095
1096         if (igt_flush_test(gt->i915))
1097                 err = -EIO;
1098
1099         return err;
1100 }
1101
1102 static bool
1103 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1104                 const char *str)
1105 {
1106         struct drm_i915_private *i915 = ctx->i915;
1107         struct i915_gem_engines_iter it;
1108         struct intel_context *ce;
1109         bool ok = true;
1110
1111         ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1112
1113         for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
1114                 enum intel_engine_id id = ce->engine->id;
1115
1116                 ok &= engine_wa_list_verify(ce,
1117                                             &lists->engine[id].wa_list,
1118                                             str) == 0;
1119
1120                 ok &= engine_wa_list_verify(ce,
1121                                             &lists->engine[id].ctx_wa_list,
1122                                             str) == 0;
1123         }
1124
1125         return ok;
1126 }
1127
1128 static int
1129 live_gpu_reset_workarounds(void *arg)
1130 {
1131         struct intel_gt *gt = arg;
1132         struct i915_gem_context *ctx;
1133         intel_wakeref_t wakeref;
1134         struct wa_lists lists;
1135         bool ok;
1136
1137         if (!intel_has_gpu_reset(gt))
1138                 return 0;
1139
1140         ctx = kernel_context(gt->i915);
1141         if (IS_ERR(ctx))
1142                 return PTR_ERR(ctx);
1143
1144         i915_gem_context_lock_engines(ctx);
1145
1146         pr_info("Verifying after GPU reset...\n");
1147
1148         igt_global_reset_lock(gt);
1149         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1150
1151         reference_lists_init(gt, &lists);
1152
1153         ok = verify_wa_lists(ctx, &lists, "before reset");
1154         if (!ok)
1155                 goto out;
1156
1157         intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1158
1159         ok = verify_wa_lists(ctx, &lists, "after reset");
1160
1161 out:
1162         i915_gem_context_unlock_engines(ctx);
1163         kernel_context_close(ctx);
1164         reference_lists_fini(gt, &lists);
1165         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1166         igt_global_reset_unlock(gt);
1167
1168         return ok ? 0 : -ESRCH;
1169 }
1170
1171 static int
1172 live_engine_reset_workarounds(void *arg)
1173 {
1174         struct intel_gt *gt = arg;
1175         struct i915_gem_engines_iter it;
1176         struct i915_gem_context *ctx;
1177         struct intel_context *ce;
1178         struct igt_spinner spin;
1179         struct i915_request *rq;
1180         intel_wakeref_t wakeref;
1181         struct wa_lists lists;
1182         int ret = 0;
1183
1184         if (!intel_has_reset_engine(gt))
1185                 return 0;
1186
1187         ctx = kernel_context(gt->i915);
1188         if (IS_ERR(ctx))
1189                 return PTR_ERR(ctx);
1190
1191         igt_global_reset_lock(gt);
1192         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1193
1194         reference_lists_init(gt, &lists);
1195
1196         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1197                 struct intel_engine_cs *engine = ce->engine;
1198                 bool ok;
1199
1200                 pr_info("Verifying after %s reset...\n", engine->name);
1201
1202                 ok = verify_wa_lists(ctx, &lists, "before reset");
1203                 if (!ok) {
1204                         ret = -ESRCH;
1205                         goto err;
1206                 }
1207
1208                 intel_engine_reset(engine, "live_workarounds");
1209
1210                 ok = verify_wa_lists(ctx, &lists, "after idle reset");
1211                 if (!ok) {
1212                         ret = -ESRCH;
1213                         goto err;
1214                 }
1215
1216                 ret = igt_spinner_init(&spin, engine->gt);
1217                 if (ret)
1218                         goto err;
1219
1220                 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1221                 if (IS_ERR(rq)) {
1222                         ret = PTR_ERR(rq);
1223                         igt_spinner_fini(&spin);
1224                         goto err;
1225                 }
1226
1227                 ret = request_add_spin(rq, &spin);
1228                 if (ret) {
1229                         pr_err("Spinner failed to start\n");
1230                         igt_spinner_fini(&spin);
1231                         goto err;
1232                 }
1233
1234                 intel_engine_reset(engine, "live_workarounds");
1235
1236                 igt_spinner_end(&spin);
1237                 igt_spinner_fini(&spin);
1238
1239                 ok = verify_wa_lists(ctx, &lists, "after busy reset");
1240                 if (!ok) {
1241                         ret = -ESRCH;
1242                         goto err;
1243                 }
1244         }
1245 err:
1246         i915_gem_context_unlock_engines(ctx);
1247         reference_lists_fini(gt, &lists);
1248         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1249         igt_global_reset_unlock(gt);
1250         kernel_context_close(ctx);
1251
1252         igt_flush_test(gt->i915);
1253
1254         return ret;
1255 }
1256
1257 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1258 {
1259         static const struct i915_subtest tests[] = {
1260                 SUBTEST(live_dirty_whitelist),
1261                 SUBTEST(live_reset_whitelist),
1262                 SUBTEST(live_isolated_whitelist),
1263                 SUBTEST(live_gpu_reset_workarounds),
1264                 SUBTEST(live_engine_reset_workarounds),
1265         };
1266
1267         if (intel_gt_is_wedged(&i915->gt))
1268                 return 0;
1269
1270         return intel_gt_live_subtests(tests, &i915->gt);
1271 }