drm/i915: Move more GEM objects under gem/
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gt / selftest_lrc.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_reset.h"
11
12 #include "i915_selftest.h"
13 #include "selftests/i915_random.h"
14 #include "selftests/igt_flush_test.h"
15 #include "selftests/igt_live_test.h"
16 #include "selftests/igt_spinner.h"
17 #include "selftests/lib_sw_fence.h"
18
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
21
22 static int live_sanitycheck(void *arg)
23 {
24         struct drm_i915_private *i915 = arg;
25         struct intel_engine_cs *engine;
26         struct i915_gem_context *ctx;
27         enum intel_engine_id id;
28         struct igt_spinner spin;
29         intel_wakeref_t wakeref;
30         int err = -ENOMEM;
31
32         if (!HAS_LOGICAL_RING_CONTEXTS(i915))
33                 return 0;
34
35         mutex_lock(&i915->drm.struct_mutex);
36         wakeref = intel_runtime_pm_get(i915);
37
38         if (igt_spinner_init(&spin, i915))
39                 goto err_unlock;
40
41         ctx = kernel_context(i915);
42         if (!ctx)
43                 goto err_spin;
44
45         for_each_engine(engine, i915, id) {
46                 struct i915_request *rq;
47
48                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
49                 if (IS_ERR(rq)) {
50                         err = PTR_ERR(rq);
51                         goto err_ctx;
52                 }
53
54                 i915_request_add(rq);
55                 if (!igt_wait_for_spinner(&spin, rq)) {
56                         GEM_TRACE("spinner failed to start\n");
57                         GEM_TRACE_DUMP();
58                         i915_gem_set_wedged(i915);
59                         err = -EIO;
60                         goto err_ctx;
61                 }
62
63                 igt_spinner_end(&spin);
64                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
65                         err = -EIO;
66                         goto err_ctx;
67                 }
68         }
69
70         err = 0;
71 err_ctx:
72         kernel_context_close(ctx);
73 err_spin:
74         igt_spinner_fini(&spin);
75 err_unlock:
76         igt_flush_test(i915, I915_WAIT_LOCKED);
77         intel_runtime_pm_put(i915, wakeref);
78         mutex_unlock(&i915->drm.struct_mutex);
79         return err;
80 }
81
82 static int live_busywait_preempt(void *arg)
83 {
84         struct drm_i915_private *i915 = arg;
85         struct i915_gem_context *ctx_hi, *ctx_lo;
86         struct intel_engine_cs *engine;
87         struct drm_i915_gem_object *obj;
88         struct i915_vma *vma;
89         enum intel_engine_id id;
90         intel_wakeref_t wakeref;
91         int err = -ENOMEM;
92         u32 *map;
93
94         /*
95          * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
96          * preempt the busywaits used to synchronise between rings.
97          */
98
99         mutex_lock(&i915->drm.struct_mutex);
100         wakeref = intel_runtime_pm_get(i915);
101
102         ctx_hi = kernel_context(i915);
103         if (!ctx_hi)
104                 goto err_unlock;
105         ctx_hi->sched.priority =
106                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
107
108         ctx_lo = kernel_context(i915);
109         if (!ctx_lo)
110                 goto err_ctx_hi;
111         ctx_lo->sched.priority =
112                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
113
114         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
115         if (IS_ERR(obj)) {
116                 err = PTR_ERR(obj);
117                 goto err_ctx_lo;
118         }
119
120         map = i915_gem_object_pin_map(obj, I915_MAP_WC);
121         if (IS_ERR(map)) {
122                 err = PTR_ERR(map);
123                 goto err_obj;
124         }
125
126         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
127         if (IS_ERR(vma)) {
128                 err = PTR_ERR(vma);
129                 goto err_map;
130         }
131
132         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
133         if (err)
134                 goto err_map;
135
136         for_each_engine(engine, i915, id) {
137                 struct i915_request *lo, *hi;
138                 struct igt_live_test t;
139                 u32 *cs;
140
141                 if (!intel_engine_can_store_dword(engine))
142                         continue;
143
144                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
145                         err = -EIO;
146                         goto err_vma;
147                 }
148
149                 /*
150                  * We create two requests. The low priority request
151                  * busywaits on a semaphore (inside the ringbuffer where
152                  * is should be preemptible) and the high priority requests
153                  * uses a MI_STORE_DWORD_IMM to update the semaphore value
154                  * allowing the first request to complete. If preemption
155                  * fails, we hang instead.
156                  */
157
158                 lo = igt_request_alloc(ctx_lo, engine);
159                 if (IS_ERR(lo)) {
160                         err = PTR_ERR(lo);
161                         goto err_vma;
162                 }
163
164                 cs = intel_ring_begin(lo, 8);
165                 if (IS_ERR(cs)) {
166                         err = PTR_ERR(cs);
167                         i915_request_add(lo);
168                         goto err_vma;
169                 }
170
171                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
172                 *cs++ = i915_ggtt_offset(vma);
173                 *cs++ = 0;
174                 *cs++ = 1;
175
176                 /* XXX Do we need a flush + invalidate here? */
177
178                 *cs++ = MI_SEMAPHORE_WAIT |
179                         MI_SEMAPHORE_GLOBAL_GTT |
180                         MI_SEMAPHORE_POLL |
181                         MI_SEMAPHORE_SAD_EQ_SDD;
182                 *cs++ = 0;
183                 *cs++ = i915_ggtt_offset(vma);
184                 *cs++ = 0;
185
186                 intel_ring_advance(lo, cs);
187                 i915_request_add(lo);
188
189                 if (wait_for(READ_ONCE(*map), 10)) {
190                         err = -ETIMEDOUT;
191                         goto err_vma;
192                 }
193
194                 /* Low priority request should be busywaiting now */
195                 if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
196                         pr_err("%s: Busywaiting request did not!\n",
197                                engine->name);
198                         err = -EIO;
199                         goto err_vma;
200                 }
201
202                 hi = igt_request_alloc(ctx_hi, engine);
203                 if (IS_ERR(hi)) {
204                         err = PTR_ERR(hi);
205                         goto err_vma;
206                 }
207
208                 cs = intel_ring_begin(hi, 4);
209                 if (IS_ERR(cs)) {
210                         err = PTR_ERR(cs);
211                         i915_request_add(hi);
212                         goto err_vma;
213                 }
214
215                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
216                 *cs++ = i915_ggtt_offset(vma);
217                 *cs++ = 0;
218                 *cs++ = 0;
219
220                 intel_ring_advance(hi, cs);
221                 i915_request_add(hi);
222
223                 if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
224                         struct drm_printer p = drm_info_printer(i915->drm.dev);
225
226                         pr_err("%s: Failed to preempt semaphore busywait!\n",
227                                engine->name);
228
229                         intel_engine_dump(engine, &p, "%s\n", engine->name);
230                         GEM_TRACE_DUMP();
231
232                         i915_gem_set_wedged(i915);
233                         err = -EIO;
234                         goto err_vma;
235                 }
236                 GEM_BUG_ON(READ_ONCE(*map));
237
238                 if (igt_live_test_end(&t)) {
239                         err = -EIO;
240                         goto err_vma;
241                 }
242         }
243
244         err = 0;
245 err_vma:
246         i915_vma_unpin(vma);
247 err_map:
248         i915_gem_object_unpin_map(obj);
249 err_obj:
250         i915_gem_object_put(obj);
251 err_ctx_lo:
252         kernel_context_close(ctx_lo);
253 err_ctx_hi:
254         kernel_context_close(ctx_hi);
255 err_unlock:
256         if (igt_flush_test(i915, I915_WAIT_LOCKED))
257                 err = -EIO;
258         intel_runtime_pm_put(i915, wakeref);
259         mutex_unlock(&i915->drm.struct_mutex);
260         return err;
261 }
262
263 static int live_preempt(void *arg)
264 {
265         struct drm_i915_private *i915 = arg;
266         struct i915_gem_context *ctx_hi, *ctx_lo;
267         struct igt_spinner spin_hi, spin_lo;
268         struct intel_engine_cs *engine;
269         enum intel_engine_id id;
270         intel_wakeref_t wakeref;
271         int err = -ENOMEM;
272
273         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
274                 return 0;
275
276         if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
277                 pr_err("Logical preemption supported, but not exposed\n");
278
279         mutex_lock(&i915->drm.struct_mutex);
280         wakeref = intel_runtime_pm_get(i915);
281
282         if (igt_spinner_init(&spin_hi, i915))
283                 goto err_unlock;
284
285         if (igt_spinner_init(&spin_lo, i915))
286                 goto err_spin_hi;
287
288         ctx_hi = kernel_context(i915);
289         if (!ctx_hi)
290                 goto err_spin_lo;
291         ctx_hi->sched.priority =
292                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
293
294         ctx_lo = kernel_context(i915);
295         if (!ctx_lo)
296                 goto err_ctx_hi;
297         ctx_lo->sched.priority =
298                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
299
300         for_each_engine(engine, i915, id) {
301                 struct igt_live_test t;
302                 struct i915_request *rq;
303
304                 if (!intel_engine_has_preemption(engine))
305                         continue;
306
307                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
308                         err = -EIO;
309                         goto err_ctx_lo;
310                 }
311
312                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
313                                                 MI_ARB_CHECK);
314                 if (IS_ERR(rq)) {
315                         err = PTR_ERR(rq);
316                         goto err_ctx_lo;
317                 }
318
319                 i915_request_add(rq);
320                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
321                         GEM_TRACE("lo spinner failed to start\n");
322                         GEM_TRACE_DUMP();
323                         i915_gem_set_wedged(i915);
324                         err = -EIO;
325                         goto err_ctx_lo;
326                 }
327
328                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
329                                                 MI_ARB_CHECK);
330                 if (IS_ERR(rq)) {
331                         igt_spinner_end(&spin_lo);
332                         err = PTR_ERR(rq);
333                         goto err_ctx_lo;
334                 }
335
336                 i915_request_add(rq);
337                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
338                         GEM_TRACE("hi spinner failed to start\n");
339                         GEM_TRACE_DUMP();
340                         i915_gem_set_wedged(i915);
341                         err = -EIO;
342                         goto err_ctx_lo;
343                 }
344
345                 igt_spinner_end(&spin_hi);
346                 igt_spinner_end(&spin_lo);
347
348                 if (igt_live_test_end(&t)) {
349                         err = -EIO;
350                         goto err_ctx_lo;
351                 }
352         }
353
354         err = 0;
355 err_ctx_lo:
356         kernel_context_close(ctx_lo);
357 err_ctx_hi:
358         kernel_context_close(ctx_hi);
359 err_spin_lo:
360         igt_spinner_fini(&spin_lo);
361 err_spin_hi:
362         igt_spinner_fini(&spin_hi);
363 err_unlock:
364         igt_flush_test(i915, I915_WAIT_LOCKED);
365         intel_runtime_pm_put(i915, wakeref);
366         mutex_unlock(&i915->drm.struct_mutex);
367         return err;
368 }
369
370 static int live_late_preempt(void *arg)
371 {
372         struct drm_i915_private *i915 = arg;
373         struct i915_gem_context *ctx_hi, *ctx_lo;
374         struct igt_spinner spin_hi, spin_lo;
375         struct intel_engine_cs *engine;
376         struct i915_sched_attr attr = {};
377         enum intel_engine_id id;
378         intel_wakeref_t wakeref;
379         int err = -ENOMEM;
380
381         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
382                 return 0;
383
384         mutex_lock(&i915->drm.struct_mutex);
385         wakeref = intel_runtime_pm_get(i915);
386
387         if (igt_spinner_init(&spin_hi, i915))
388                 goto err_unlock;
389
390         if (igt_spinner_init(&spin_lo, i915))
391                 goto err_spin_hi;
392
393         ctx_hi = kernel_context(i915);
394         if (!ctx_hi)
395                 goto err_spin_lo;
396
397         ctx_lo = kernel_context(i915);
398         if (!ctx_lo)
399                 goto err_ctx_hi;
400
401         for_each_engine(engine, i915, id) {
402                 struct igt_live_test t;
403                 struct i915_request *rq;
404
405                 if (!intel_engine_has_preemption(engine))
406                         continue;
407
408                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
409                         err = -EIO;
410                         goto err_ctx_lo;
411                 }
412
413                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
414                                                 MI_ARB_CHECK);
415                 if (IS_ERR(rq)) {
416                         err = PTR_ERR(rq);
417                         goto err_ctx_lo;
418                 }
419
420                 i915_request_add(rq);
421                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
422                         pr_err("First context failed to start\n");
423                         goto err_wedged;
424                 }
425
426                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
427                                                 MI_NOOP);
428                 if (IS_ERR(rq)) {
429                         igt_spinner_end(&spin_lo);
430                         err = PTR_ERR(rq);
431                         goto err_ctx_lo;
432                 }
433
434                 i915_request_add(rq);
435                 if (igt_wait_for_spinner(&spin_hi, rq)) {
436                         pr_err("Second context overtook first?\n");
437                         goto err_wedged;
438                 }
439
440                 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
441                 engine->schedule(rq, &attr);
442
443                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
444                         pr_err("High priority context failed to preempt the low priority context\n");
445                         GEM_TRACE_DUMP();
446                         goto err_wedged;
447                 }
448
449                 igt_spinner_end(&spin_hi);
450                 igt_spinner_end(&spin_lo);
451
452                 if (igt_live_test_end(&t)) {
453                         err = -EIO;
454                         goto err_ctx_lo;
455                 }
456         }
457
458         err = 0;
459 err_ctx_lo:
460         kernel_context_close(ctx_lo);
461 err_ctx_hi:
462         kernel_context_close(ctx_hi);
463 err_spin_lo:
464         igt_spinner_fini(&spin_lo);
465 err_spin_hi:
466         igt_spinner_fini(&spin_hi);
467 err_unlock:
468         igt_flush_test(i915, I915_WAIT_LOCKED);
469         intel_runtime_pm_put(i915, wakeref);
470         mutex_unlock(&i915->drm.struct_mutex);
471         return err;
472
473 err_wedged:
474         igt_spinner_end(&spin_hi);
475         igt_spinner_end(&spin_lo);
476         i915_gem_set_wedged(i915);
477         err = -EIO;
478         goto err_ctx_lo;
479 }
480
481 struct preempt_client {
482         struct igt_spinner spin;
483         struct i915_gem_context *ctx;
484 };
485
486 static int preempt_client_init(struct drm_i915_private *i915,
487                                struct preempt_client *c)
488 {
489         c->ctx = kernel_context(i915);
490         if (!c->ctx)
491                 return -ENOMEM;
492
493         if (igt_spinner_init(&c->spin, i915))
494                 goto err_ctx;
495
496         return 0;
497
498 err_ctx:
499         kernel_context_close(c->ctx);
500         return -ENOMEM;
501 }
502
503 static void preempt_client_fini(struct preempt_client *c)
504 {
505         igt_spinner_fini(&c->spin);
506         kernel_context_close(c->ctx);
507 }
508
509 static int live_suppress_self_preempt(void *arg)
510 {
511         struct drm_i915_private *i915 = arg;
512         struct intel_engine_cs *engine;
513         struct i915_sched_attr attr = {
514                 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
515         };
516         struct preempt_client a, b;
517         enum intel_engine_id id;
518         intel_wakeref_t wakeref;
519         int err = -ENOMEM;
520
521         /*
522          * Verify that if a preemption request does not cause a change in
523          * the current execution order, the preempt-to-idle injection is
524          * skipped and that we do not accidentally apply it after the CS
525          * completion event.
526          */
527
528         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
529                 return 0;
530
531         if (USES_GUC_SUBMISSION(i915))
532                 return 0; /* presume black blox */
533
534         mutex_lock(&i915->drm.struct_mutex);
535         wakeref = intel_runtime_pm_get(i915);
536
537         if (preempt_client_init(i915, &a))
538                 goto err_unlock;
539         if (preempt_client_init(i915, &b))
540                 goto err_client_a;
541
542         for_each_engine(engine, i915, id) {
543                 struct i915_request *rq_a, *rq_b;
544                 int depth;
545
546                 if (!intel_engine_has_preemption(engine))
547                         continue;
548
549                 engine->execlists.preempt_hang.count = 0;
550
551                 rq_a = igt_spinner_create_request(&a.spin,
552                                                   a.ctx, engine,
553                                                   MI_NOOP);
554                 if (IS_ERR(rq_a)) {
555                         err = PTR_ERR(rq_a);
556                         goto err_client_b;
557                 }
558
559                 i915_request_add(rq_a);
560                 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
561                         pr_err("First client failed to start\n");
562                         goto err_wedged;
563                 }
564
565                 for (depth = 0; depth < 8; depth++) {
566                         rq_b = igt_spinner_create_request(&b.spin,
567                                                           b.ctx, engine,
568                                                           MI_NOOP);
569                         if (IS_ERR(rq_b)) {
570                                 err = PTR_ERR(rq_b);
571                                 goto err_client_b;
572                         }
573                         i915_request_add(rq_b);
574
575                         GEM_BUG_ON(i915_request_completed(rq_a));
576                         engine->schedule(rq_a, &attr);
577                         igt_spinner_end(&a.spin);
578
579                         if (!igt_wait_for_spinner(&b.spin, rq_b)) {
580                                 pr_err("Second client failed to start\n");
581                                 goto err_wedged;
582                         }
583
584                         swap(a, b);
585                         rq_a = rq_b;
586                 }
587                 igt_spinner_end(&a.spin);
588
589                 if (engine->execlists.preempt_hang.count) {
590                         pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
591                                engine->execlists.preempt_hang.count,
592                                depth);
593                         err = -EINVAL;
594                         goto err_client_b;
595                 }
596
597                 if (igt_flush_test(i915, I915_WAIT_LOCKED))
598                         goto err_wedged;
599         }
600
601         err = 0;
602 err_client_b:
603         preempt_client_fini(&b);
604 err_client_a:
605         preempt_client_fini(&a);
606 err_unlock:
607         if (igt_flush_test(i915, I915_WAIT_LOCKED))
608                 err = -EIO;
609         intel_runtime_pm_put(i915, wakeref);
610         mutex_unlock(&i915->drm.struct_mutex);
611         return err;
612
613 err_wedged:
614         igt_spinner_end(&b.spin);
615         igt_spinner_end(&a.spin);
616         i915_gem_set_wedged(i915);
617         err = -EIO;
618         goto err_client_b;
619 }
620
621 static int __i915_sw_fence_call
622 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
623 {
624         return NOTIFY_DONE;
625 }
626
627 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
628 {
629         struct i915_request *rq;
630
631         rq = kzalloc(sizeof(*rq), GFP_KERNEL);
632         if (!rq)
633                 return NULL;
634
635         INIT_LIST_HEAD(&rq->active_list);
636         rq->engine = engine;
637
638         i915_sched_node_init(&rq->sched);
639
640         /* mark this request as permanently incomplete */
641         rq->fence.seqno = 1;
642         BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
643         rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
644         GEM_BUG_ON(i915_request_completed(rq));
645
646         i915_sw_fence_init(&rq->submit, dummy_notify);
647         set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
648
649         return rq;
650 }
651
652 static void dummy_request_free(struct i915_request *dummy)
653 {
654         /* We have to fake the CS interrupt to kick the next request */
655         i915_sw_fence_commit(&dummy->submit);
656
657         i915_request_mark_complete(dummy);
658         dma_fence_signal(&dummy->fence);
659
660         i915_sched_node_fini(&dummy->sched);
661         i915_sw_fence_fini(&dummy->submit);
662
663         dma_fence_free(&dummy->fence);
664 }
665
666 static int live_suppress_wait_preempt(void *arg)
667 {
668         struct drm_i915_private *i915 = arg;
669         struct preempt_client client[4];
670         struct intel_engine_cs *engine;
671         enum intel_engine_id id;
672         intel_wakeref_t wakeref;
673         int err = -ENOMEM;
674         int i;
675
676         /*
677          * Waiters are given a little priority nudge, but not enough
678          * to actually cause any preemption. Double check that we do
679          * not needlessly generate preempt-to-idle cycles.
680          */
681
682         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
683                 return 0;
684
685         mutex_lock(&i915->drm.struct_mutex);
686         wakeref = intel_runtime_pm_get(i915);
687
688         if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
689                 goto err_unlock;
690         if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
691                 goto err_client_0;
692         if (preempt_client_init(i915, &client[2])) /* head of queue */
693                 goto err_client_1;
694         if (preempt_client_init(i915, &client[3])) /* bystander */
695                 goto err_client_2;
696
697         for_each_engine(engine, i915, id) {
698                 int depth;
699
700                 if (!intel_engine_has_preemption(engine))
701                         continue;
702
703                 if (!engine->emit_init_breadcrumb)
704                         continue;
705
706                 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
707                         struct i915_request *rq[ARRAY_SIZE(client)];
708                         struct i915_request *dummy;
709
710                         engine->execlists.preempt_hang.count = 0;
711
712                         dummy = dummy_request(engine);
713                         if (!dummy)
714                                 goto err_client_3;
715
716                         for (i = 0; i < ARRAY_SIZE(client); i++) {
717                                 rq[i] = igt_spinner_create_request(&client[i].spin,
718                                                                    client[i].ctx, engine,
719                                                                    MI_NOOP);
720                                 if (IS_ERR(rq[i])) {
721                                         err = PTR_ERR(rq[i]);
722                                         goto err_wedged;
723                                 }
724
725                                 /* Disable NEWCLIENT promotion */
726                                 __i915_active_request_set(&rq[i]->timeline->last_request,
727                                                           dummy);
728                                 i915_request_add(rq[i]);
729                         }
730
731                         dummy_request_free(dummy);
732
733                         GEM_BUG_ON(i915_request_completed(rq[0]));
734                         if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
735                                 pr_err("%s: First client failed to start\n",
736                                        engine->name);
737                                 goto err_wedged;
738                         }
739                         GEM_BUG_ON(!i915_request_started(rq[0]));
740
741                         if (i915_request_wait(rq[depth],
742                                               I915_WAIT_LOCKED |
743                                               I915_WAIT_PRIORITY,
744                                               1) != -ETIME) {
745                                 pr_err("%s: Waiter depth:%d completed!\n",
746                                        engine->name, depth);
747                                 goto err_wedged;
748                         }
749
750                         for (i = 0; i < ARRAY_SIZE(client); i++)
751                                 igt_spinner_end(&client[i].spin);
752
753                         if (igt_flush_test(i915, I915_WAIT_LOCKED))
754                                 goto err_wedged;
755
756                         if (engine->execlists.preempt_hang.count) {
757                                 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
758                                        engine->name,
759                                        engine->execlists.preempt_hang.count,
760                                        depth);
761                                 err = -EINVAL;
762                                 goto err_client_3;
763                         }
764                 }
765         }
766
767         err = 0;
768 err_client_3:
769         preempt_client_fini(&client[3]);
770 err_client_2:
771         preempt_client_fini(&client[2]);
772 err_client_1:
773         preempt_client_fini(&client[1]);
774 err_client_0:
775         preempt_client_fini(&client[0]);
776 err_unlock:
777         if (igt_flush_test(i915, I915_WAIT_LOCKED))
778                 err = -EIO;
779         intel_runtime_pm_put(i915, wakeref);
780         mutex_unlock(&i915->drm.struct_mutex);
781         return err;
782
783 err_wedged:
784         for (i = 0; i < ARRAY_SIZE(client); i++)
785                 igt_spinner_end(&client[i].spin);
786         i915_gem_set_wedged(i915);
787         err = -EIO;
788         goto err_client_3;
789 }
790
791 static int live_chain_preempt(void *arg)
792 {
793         struct drm_i915_private *i915 = arg;
794         struct intel_engine_cs *engine;
795         struct preempt_client hi, lo;
796         enum intel_engine_id id;
797         intel_wakeref_t wakeref;
798         int err = -ENOMEM;
799
800         /*
801          * Build a chain AB...BA between two contexts (A, B) and request
802          * preemption of the last request. It should then complete before
803          * the previously submitted spinner in B.
804          */
805
806         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
807                 return 0;
808
809         mutex_lock(&i915->drm.struct_mutex);
810         wakeref = intel_runtime_pm_get(i915);
811
812         if (preempt_client_init(i915, &hi))
813                 goto err_unlock;
814
815         if (preempt_client_init(i915, &lo))
816                 goto err_client_hi;
817
818         for_each_engine(engine, i915, id) {
819                 struct i915_sched_attr attr = {
820                         .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
821                 };
822                 struct igt_live_test t;
823                 struct i915_request *rq;
824                 int ring_size, count, i;
825
826                 if (!intel_engine_has_preemption(engine))
827                         continue;
828
829                 rq = igt_spinner_create_request(&lo.spin,
830                                                 lo.ctx, engine,
831                                                 MI_ARB_CHECK);
832                 if (IS_ERR(rq))
833                         goto err_wedged;
834                 i915_request_add(rq);
835
836                 ring_size = rq->wa_tail - rq->head;
837                 if (ring_size < 0)
838                         ring_size += rq->ring->size;
839                 ring_size = rq->ring->size / ring_size;
840                 pr_debug("%s(%s): Using maximum of %d requests\n",
841                          __func__, engine->name, ring_size);
842
843                 igt_spinner_end(&lo.spin);
844                 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
845                         pr_err("Timed out waiting to flush %s\n", engine->name);
846                         goto err_wedged;
847                 }
848
849                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
850                         err = -EIO;
851                         goto err_wedged;
852                 }
853
854                 for_each_prime_number_from(count, 1, ring_size) {
855                         rq = igt_spinner_create_request(&hi.spin,
856                                                         hi.ctx, engine,
857                                                         MI_ARB_CHECK);
858                         if (IS_ERR(rq))
859                                 goto err_wedged;
860                         i915_request_add(rq);
861                         if (!igt_wait_for_spinner(&hi.spin, rq))
862                                 goto err_wedged;
863
864                         rq = igt_spinner_create_request(&lo.spin,
865                                                         lo.ctx, engine,
866                                                         MI_ARB_CHECK);
867                         if (IS_ERR(rq))
868                                 goto err_wedged;
869                         i915_request_add(rq);
870
871                         for (i = 0; i < count; i++) {
872                                 rq = igt_request_alloc(lo.ctx, engine);
873                                 if (IS_ERR(rq))
874                                         goto err_wedged;
875                                 i915_request_add(rq);
876                         }
877
878                         rq = igt_request_alloc(hi.ctx, engine);
879                         if (IS_ERR(rq))
880                                 goto err_wedged;
881                         i915_request_add(rq);
882                         engine->schedule(rq, &attr);
883
884                         igt_spinner_end(&hi.spin);
885                         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
886                                 struct drm_printer p =
887                                         drm_info_printer(i915->drm.dev);
888
889                                 pr_err("Failed to preempt over chain of %d\n",
890                                        count);
891                                 intel_engine_dump(engine, &p,
892                                                   "%s\n", engine->name);
893                                 goto err_wedged;
894                         }
895                         igt_spinner_end(&lo.spin);
896
897                         rq = igt_request_alloc(lo.ctx, engine);
898                         if (IS_ERR(rq))
899                                 goto err_wedged;
900                         i915_request_add(rq);
901                         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
902                                 struct drm_printer p =
903                                         drm_info_printer(i915->drm.dev);
904
905                                 pr_err("Failed to flush low priority chain of %d requests\n",
906                                        count);
907                                 intel_engine_dump(engine, &p,
908                                                   "%s\n", engine->name);
909                                 goto err_wedged;
910                         }
911                 }
912
913                 if (igt_live_test_end(&t)) {
914                         err = -EIO;
915                         goto err_wedged;
916                 }
917         }
918
919         err = 0;
920 err_client_lo:
921         preempt_client_fini(&lo);
922 err_client_hi:
923         preempt_client_fini(&hi);
924 err_unlock:
925         if (igt_flush_test(i915, I915_WAIT_LOCKED))
926                 err = -EIO;
927         intel_runtime_pm_put(i915, wakeref);
928         mutex_unlock(&i915->drm.struct_mutex);
929         return err;
930
931 err_wedged:
932         igt_spinner_end(&hi.spin);
933         igt_spinner_end(&lo.spin);
934         i915_gem_set_wedged(i915);
935         err = -EIO;
936         goto err_client_lo;
937 }
938
939 static int live_preempt_hang(void *arg)
940 {
941         struct drm_i915_private *i915 = arg;
942         struct i915_gem_context *ctx_hi, *ctx_lo;
943         struct igt_spinner spin_hi, spin_lo;
944         struct intel_engine_cs *engine;
945         enum intel_engine_id id;
946         intel_wakeref_t wakeref;
947         int err = -ENOMEM;
948
949         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
950                 return 0;
951
952         if (!intel_has_reset_engine(i915))
953                 return 0;
954
955         mutex_lock(&i915->drm.struct_mutex);
956         wakeref = intel_runtime_pm_get(i915);
957
958         if (igt_spinner_init(&spin_hi, i915))
959                 goto err_unlock;
960
961         if (igt_spinner_init(&spin_lo, i915))
962                 goto err_spin_hi;
963
964         ctx_hi = kernel_context(i915);
965         if (!ctx_hi)
966                 goto err_spin_lo;
967         ctx_hi->sched.priority =
968                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
969
970         ctx_lo = kernel_context(i915);
971         if (!ctx_lo)
972                 goto err_ctx_hi;
973         ctx_lo->sched.priority =
974                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
975
976         for_each_engine(engine, i915, id) {
977                 struct i915_request *rq;
978
979                 if (!intel_engine_has_preemption(engine))
980                         continue;
981
982                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
983                                                 MI_ARB_CHECK);
984                 if (IS_ERR(rq)) {
985                         err = PTR_ERR(rq);
986                         goto err_ctx_lo;
987                 }
988
989                 i915_request_add(rq);
990                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
991                         GEM_TRACE("lo spinner failed to start\n");
992                         GEM_TRACE_DUMP();
993                         i915_gem_set_wedged(i915);
994                         err = -EIO;
995                         goto err_ctx_lo;
996                 }
997
998                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
999                                                 MI_ARB_CHECK);
1000                 if (IS_ERR(rq)) {
1001                         igt_spinner_end(&spin_lo);
1002                         err = PTR_ERR(rq);
1003                         goto err_ctx_lo;
1004                 }
1005
1006                 init_completion(&engine->execlists.preempt_hang.completion);
1007                 engine->execlists.preempt_hang.inject_hang = true;
1008
1009                 i915_request_add(rq);
1010
1011                 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1012                                                  HZ / 10)) {
1013                         pr_err("Preemption did not occur within timeout!");
1014                         GEM_TRACE_DUMP();
1015                         i915_gem_set_wedged(i915);
1016                         err = -EIO;
1017                         goto err_ctx_lo;
1018                 }
1019
1020                 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1021                 i915_reset_engine(engine, NULL);
1022                 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1023
1024                 engine->execlists.preempt_hang.inject_hang = false;
1025
1026                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1027                         GEM_TRACE("hi spinner failed to start\n");
1028                         GEM_TRACE_DUMP();
1029                         i915_gem_set_wedged(i915);
1030                         err = -EIO;
1031                         goto err_ctx_lo;
1032                 }
1033
1034                 igt_spinner_end(&spin_hi);
1035                 igt_spinner_end(&spin_lo);
1036                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1037                         err = -EIO;
1038                         goto err_ctx_lo;
1039                 }
1040         }
1041
1042         err = 0;
1043 err_ctx_lo:
1044         kernel_context_close(ctx_lo);
1045 err_ctx_hi:
1046         kernel_context_close(ctx_hi);
1047 err_spin_lo:
1048         igt_spinner_fini(&spin_lo);
1049 err_spin_hi:
1050         igt_spinner_fini(&spin_hi);
1051 err_unlock:
1052         igt_flush_test(i915, I915_WAIT_LOCKED);
1053         intel_runtime_pm_put(i915, wakeref);
1054         mutex_unlock(&i915->drm.struct_mutex);
1055         return err;
1056 }
1057
1058 static int random_range(struct rnd_state *rnd, int min, int max)
1059 {
1060         return i915_prandom_u32_max_state(max - min, rnd) + min;
1061 }
1062
1063 static int random_priority(struct rnd_state *rnd)
1064 {
1065         return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1066 }
1067
1068 struct preempt_smoke {
1069         struct drm_i915_private *i915;
1070         struct i915_gem_context **contexts;
1071         struct intel_engine_cs *engine;
1072         struct drm_i915_gem_object *batch;
1073         unsigned int ncontext;
1074         struct rnd_state prng;
1075         unsigned long count;
1076 };
1077
1078 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1079 {
1080         return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1081                                                           &smoke->prng)];
1082 }
1083
1084 static int smoke_submit(struct preempt_smoke *smoke,
1085                         struct i915_gem_context *ctx, int prio,
1086                         struct drm_i915_gem_object *batch)
1087 {
1088         struct i915_request *rq;
1089         struct i915_vma *vma = NULL;
1090         int err = 0;
1091
1092         if (batch) {
1093                 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1094                 if (IS_ERR(vma))
1095                         return PTR_ERR(vma);
1096
1097                 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1098                 if (err)
1099                         return err;
1100         }
1101
1102         ctx->sched.priority = prio;
1103
1104         rq = igt_request_alloc(ctx, smoke->engine);
1105         if (IS_ERR(rq)) {
1106                 err = PTR_ERR(rq);
1107                 goto unpin;
1108         }
1109
1110         if (vma) {
1111                 err = rq->engine->emit_bb_start(rq,
1112                                                 vma->node.start,
1113                                                 PAGE_SIZE, 0);
1114                 if (!err)
1115                         err = i915_vma_move_to_active(vma, rq, 0);
1116         }
1117
1118         i915_request_add(rq);
1119
1120 unpin:
1121         if (vma)
1122                 i915_vma_unpin(vma);
1123
1124         return err;
1125 }
1126
1127 static int smoke_crescendo_thread(void *arg)
1128 {
1129         struct preempt_smoke *smoke = arg;
1130         IGT_TIMEOUT(end_time);
1131         unsigned long count;
1132
1133         count = 0;
1134         do {
1135                 struct i915_gem_context *ctx = smoke_context(smoke);
1136                 int err;
1137
1138                 mutex_lock(&smoke->i915->drm.struct_mutex);
1139                 err = smoke_submit(smoke,
1140                                    ctx, count % I915_PRIORITY_MAX,
1141                                    smoke->batch);
1142                 mutex_unlock(&smoke->i915->drm.struct_mutex);
1143                 if (err)
1144                         return err;
1145
1146                 count++;
1147         } while (!__igt_timeout(end_time, NULL));
1148
1149         smoke->count = count;
1150         return 0;
1151 }
1152
1153 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1154 #define BATCH BIT(0)
1155 {
1156         struct task_struct *tsk[I915_NUM_ENGINES] = {};
1157         struct preempt_smoke arg[I915_NUM_ENGINES];
1158         struct intel_engine_cs *engine;
1159         enum intel_engine_id id;
1160         unsigned long count;
1161         int err = 0;
1162
1163         mutex_unlock(&smoke->i915->drm.struct_mutex);
1164
1165         for_each_engine(engine, smoke->i915, id) {
1166                 arg[id] = *smoke;
1167                 arg[id].engine = engine;
1168                 if (!(flags & BATCH))
1169                         arg[id].batch = NULL;
1170                 arg[id].count = 0;
1171
1172                 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1173                                       "igt/smoke:%d", id);
1174                 if (IS_ERR(tsk[id])) {
1175                         err = PTR_ERR(tsk[id]);
1176                         break;
1177                 }
1178                 get_task_struct(tsk[id]);
1179         }
1180
1181         count = 0;
1182         for_each_engine(engine, smoke->i915, id) {
1183                 int status;
1184
1185                 if (IS_ERR_OR_NULL(tsk[id]))
1186                         continue;
1187
1188                 status = kthread_stop(tsk[id]);
1189                 if (status && !err)
1190                         err = status;
1191
1192                 count += arg[id].count;
1193
1194                 put_task_struct(tsk[id]);
1195         }
1196
1197         mutex_lock(&smoke->i915->drm.struct_mutex);
1198
1199         pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1200                 count, flags,
1201                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1202         return 0;
1203 }
1204
1205 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1206 {
1207         enum intel_engine_id id;
1208         IGT_TIMEOUT(end_time);
1209         unsigned long count;
1210
1211         count = 0;
1212         do {
1213                 for_each_engine(smoke->engine, smoke->i915, id) {
1214                         struct i915_gem_context *ctx = smoke_context(smoke);
1215                         int err;
1216
1217                         err = smoke_submit(smoke,
1218                                            ctx, random_priority(&smoke->prng),
1219                                            flags & BATCH ? smoke->batch : NULL);
1220                         if (err)
1221                                 return err;
1222
1223                         count++;
1224                 }
1225         } while (!__igt_timeout(end_time, NULL));
1226
1227         pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1228                 count, flags,
1229                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1230         return 0;
1231 }
1232
1233 static int live_preempt_smoke(void *arg)
1234 {
1235         struct preempt_smoke smoke = {
1236                 .i915 = arg,
1237                 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1238                 .ncontext = 1024,
1239         };
1240         const unsigned int phase[] = { 0, BATCH };
1241         intel_wakeref_t wakeref;
1242         struct igt_live_test t;
1243         int err = -ENOMEM;
1244         u32 *cs;
1245         int n;
1246
1247         if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1248                 return 0;
1249
1250         smoke.contexts = kmalloc_array(smoke.ncontext,
1251                                        sizeof(*smoke.contexts),
1252                                        GFP_KERNEL);
1253         if (!smoke.contexts)
1254                 return -ENOMEM;
1255
1256         mutex_lock(&smoke.i915->drm.struct_mutex);
1257         wakeref = intel_runtime_pm_get(smoke.i915);
1258
1259         smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1260         if (IS_ERR(smoke.batch)) {
1261                 err = PTR_ERR(smoke.batch);
1262                 goto err_unlock;
1263         }
1264
1265         cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1266         if (IS_ERR(cs)) {
1267                 err = PTR_ERR(cs);
1268                 goto err_batch;
1269         }
1270         for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1271                 cs[n] = MI_ARB_CHECK;
1272         cs[n] = MI_BATCH_BUFFER_END;
1273         i915_gem_object_flush_map(smoke.batch);
1274         i915_gem_object_unpin_map(smoke.batch);
1275
1276         if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1277                 err = -EIO;
1278                 goto err_batch;
1279         }
1280
1281         for (n = 0; n < smoke.ncontext; n++) {
1282                 smoke.contexts[n] = kernel_context(smoke.i915);
1283                 if (!smoke.contexts[n])
1284                         goto err_ctx;
1285         }
1286
1287         for (n = 0; n < ARRAY_SIZE(phase); n++) {
1288                 err = smoke_crescendo(&smoke, phase[n]);
1289                 if (err)
1290                         goto err_ctx;
1291
1292                 err = smoke_random(&smoke, phase[n]);
1293                 if (err)
1294                         goto err_ctx;
1295         }
1296
1297 err_ctx:
1298         if (igt_live_test_end(&t))
1299                 err = -EIO;
1300
1301         for (n = 0; n < smoke.ncontext; n++) {
1302                 if (!smoke.contexts[n])
1303                         break;
1304                 kernel_context_close(smoke.contexts[n]);
1305         }
1306
1307 err_batch:
1308         i915_gem_object_put(smoke.batch);
1309 err_unlock:
1310         intel_runtime_pm_put(smoke.i915, wakeref);
1311         mutex_unlock(&smoke.i915->drm.struct_mutex);
1312         kfree(smoke.contexts);
1313
1314         return err;
1315 }
1316
1317 static int nop_virtual_engine(struct drm_i915_private *i915,
1318                               struct intel_engine_cs **siblings,
1319                               unsigned int nsibling,
1320                               unsigned int nctx,
1321                               unsigned int flags)
1322 #define CHAIN BIT(0)
1323 {
1324         IGT_TIMEOUT(end_time);
1325         struct i915_request *request[16];
1326         struct i915_gem_context *ctx[16];
1327         struct intel_context *ve[16];
1328         unsigned long n, prime, nc;
1329         struct igt_live_test t;
1330         ktime_t times[2] = {};
1331         int err;
1332
1333         GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
1334
1335         for (n = 0; n < nctx; n++) {
1336                 ctx[n] = kernel_context(i915);
1337                 if (!ctx[n]) {
1338                         err = -ENOMEM;
1339                         nctx = n;
1340                         goto out;
1341                 }
1342
1343                 ve[n] = intel_execlists_create_virtual(ctx[n],
1344                                                        siblings, nsibling);
1345                 if (IS_ERR(ve[n])) {
1346                         kernel_context_close(ctx[n]);
1347                         err = PTR_ERR(ve[n]);
1348                         nctx = n;
1349                         goto out;
1350                 }
1351
1352                 err = intel_context_pin(ve[n]);
1353                 if (err) {
1354                         intel_context_put(ve[n]);
1355                         kernel_context_close(ctx[n]);
1356                         nctx = n;
1357                         goto out;
1358                 }
1359         }
1360
1361         err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
1362         if (err)
1363                 goto out;
1364
1365         for_each_prime_number_from(prime, 1, 8192) {
1366                 times[1] = ktime_get_raw();
1367
1368                 if (flags & CHAIN) {
1369                         for (nc = 0; nc < nctx; nc++) {
1370                                 for (n = 0; n < prime; n++) {
1371                                         request[nc] =
1372                                                 i915_request_create(ve[nc]);
1373                                         if (IS_ERR(request[nc])) {
1374                                                 err = PTR_ERR(request[nc]);
1375                                                 goto out;
1376                                         }
1377
1378                                         i915_request_add(request[nc]);
1379                                 }
1380                         }
1381                 } else {
1382                         for (n = 0; n < prime; n++) {
1383                                 for (nc = 0; nc < nctx; nc++) {
1384                                         request[nc] =
1385                                                 i915_request_create(ve[nc]);
1386                                         if (IS_ERR(request[nc])) {
1387                                                 err = PTR_ERR(request[nc]);
1388                                                 goto out;
1389                                         }
1390
1391                                         i915_request_add(request[nc]);
1392                                 }
1393                         }
1394                 }
1395
1396                 for (nc = 0; nc < nctx; nc++) {
1397                         if (i915_request_wait(request[nc],
1398                                               I915_WAIT_LOCKED,
1399                                               HZ / 10) < 0) {
1400                                 pr_err("%s(%s): wait for %llx:%lld timed out\n",
1401                                        __func__, ve[0]->engine->name,
1402                                        request[nc]->fence.context,
1403                                        request[nc]->fence.seqno);
1404
1405                                 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1406                                           __func__, ve[0]->engine->name,
1407                                           request[nc]->fence.context,
1408                                           request[nc]->fence.seqno);
1409                                 GEM_TRACE_DUMP();
1410                                 i915_gem_set_wedged(i915);
1411                                 break;
1412                         }
1413                 }
1414
1415                 times[1] = ktime_sub(ktime_get_raw(), times[1]);
1416                 if (prime == 1)
1417                         times[0] = times[1];
1418
1419                 if (__igt_timeout(end_time, NULL))
1420                         break;
1421         }
1422
1423         err = igt_live_test_end(&t);
1424         if (err)
1425                 goto out;
1426
1427         pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
1428                 nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
1429                 prime, div64_u64(ktime_to_ns(times[1]), prime));
1430
1431 out:
1432         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1433                 err = -EIO;
1434
1435         for (nc = 0; nc < nctx; nc++) {
1436                 intel_context_unpin(ve[nc]);
1437                 intel_context_put(ve[nc]);
1438                 kernel_context_close(ctx[nc]);
1439         }
1440         return err;
1441 }
1442
1443 static int live_virtual_engine(void *arg)
1444 {
1445         struct drm_i915_private *i915 = arg;
1446         struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1447         struct intel_engine_cs *engine;
1448         enum intel_engine_id id;
1449         unsigned int class, inst;
1450         int err = -ENODEV;
1451
1452         if (USES_GUC_SUBMISSION(i915))
1453                 return 0;
1454
1455         mutex_lock(&i915->drm.struct_mutex);
1456
1457         for_each_engine(engine, i915, id) {
1458                 err = nop_virtual_engine(i915, &engine, 1, 1, 0);
1459                 if (err) {
1460                         pr_err("Failed to wrap engine %s: err=%d\n",
1461                                engine->name, err);
1462                         goto out_unlock;
1463                 }
1464         }
1465
1466         for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1467                 int nsibling, n;
1468
1469                 nsibling = 0;
1470                 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1471                         if (!i915->engine_class[class][inst])
1472                                 continue;
1473
1474                         siblings[nsibling++] = i915->engine_class[class][inst];
1475                 }
1476                 if (nsibling < 2)
1477                         continue;
1478
1479                 for (n = 1; n <= nsibling + 1; n++) {
1480                         err = nop_virtual_engine(i915, siblings, nsibling,
1481                                                  n, 0);
1482                         if (err)
1483                                 goto out_unlock;
1484                 }
1485
1486                 err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
1487                 if (err)
1488                         goto out_unlock;
1489         }
1490
1491 out_unlock:
1492         mutex_unlock(&i915->drm.struct_mutex);
1493         return err;
1494 }
1495
1496 static int mask_virtual_engine(struct drm_i915_private *i915,
1497                                struct intel_engine_cs **siblings,
1498                                unsigned int nsibling)
1499 {
1500         struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
1501         struct i915_gem_context *ctx;
1502         struct intel_context *ve;
1503         struct igt_live_test t;
1504         unsigned int n;
1505         int err;
1506
1507         /*
1508          * Check that by setting the execution mask on a request, we can
1509          * restrict it to our desired engine within the virtual engine.
1510          */
1511
1512         ctx = kernel_context(i915);
1513         if (!ctx)
1514                 return -ENOMEM;
1515
1516         ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
1517         if (IS_ERR(ve)) {
1518                 err = PTR_ERR(ve);
1519                 goto out_close;
1520         }
1521
1522         err = intel_context_pin(ve);
1523         if (err)
1524                 goto out_put;
1525
1526         err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
1527         if (err)
1528                 goto out_unpin;
1529
1530         for (n = 0; n < nsibling; n++) {
1531                 request[n] = i915_request_create(ve);
1532                 if (IS_ERR(request)) {
1533                         err = PTR_ERR(request);
1534                         nsibling = n;
1535                         goto out;
1536                 }
1537
1538                 /* Reverse order as it's more likely to be unnatural */
1539                 request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
1540
1541                 i915_request_get(request[n]);
1542                 i915_request_add(request[n]);
1543         }
1544
1545         for (n = 0; n < nsibling; n++) {
1546                 if (i915_request_wait(request[n], I915_WAIT_LOCKED, HZ / 10) < 0) {
1547                         pr_err("%s(%s): wait for %llx:%lld timed out\n",
1548                                __func__, ve->engine->name,
1549                                request[n]->fence.context,
1550                                request[n]->fence.seqno);
1551
1552                         GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1553                                   __func__, ve->engine->name,
1554                                   request[n]->fence.context,
1555                                   request[n]->fence.seqno);
1556                         GEM_TRACE_DUMP();
1557                         i915_gem_set_wedged(i915);
1558                         err = -EIO;
1559                         goto out;
1560                 }
1561
1562                 if (request[n]->engine != siblings[nsibling - n - 1]) {
1563                         pr_err("Executed on wrong sibling '%s', expected '%s'\n",
1564                                request[n]->engine->name,
1565                                siblings[nsibling - n - 1]->name);
1566                         err = -EINVAL;
1567                         goto out;
1568                 }
1569         }
1570
1571         err = igt_live_test_end(&t);
1572         if (err)
1573                 goto out;
1574
1575 out:
1576         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1577                 err = -EIO;
1578
1579         for (n = 0; n < nsibling; n++)
1580                 i915_request_put(request[n]);
1581
1582 out_unpin:
1583         intel_context_unpin(ve);
1584 out_put:
1585         intel_context_put(ve);
1586 out_close:
1587         kernel_context_close(ctx);
1588         return err;
1589 }
1590
1591 static int live_virtual_mask(void *arg)
1592 {
1593         struct drm_i915_private *i915 = arg;
1594         struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1595         unsigned int class, inst;
1596         int err = 0;
1597
1598         if (USES_GUC_SUBMISSION(i915))
1599                 return 0;
1600
1601         mutex_lock(&i915->drm.struct_mutex);
1602
1603         for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1604                 unsigned int nsibling;
1605
1606                 nsibling = 0;
1607                 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1608                         if (!i915->engine_class[class][inst])
1609                                 break;
1610
1611                         siblings[nsibling++] = i915->engine_class[class][inst];
1612                 }
1613                 if (nsibling < 2)
1614                         continue;
1615
1616                 err = mask_virtual_engine(i915, siblings, nsibling);
1617                 if (err)
1618                         goto out_unlock;
1619         }
1620
1621 out_unlock:
1622         mutex_unlock(&i915->drm.struct_mutex);
1623         return err;
1624 }
1625
1626 static int bond_virtual_engine(struct drm_i915_private *i915,
1627                                unsigned int class,
1628                                struct intel_engine_cs **siblings,
1629                                unsigned int nsibling,
1630                                unsigned int flags)
1631 #define BOND_SCHEDULE BIT(0)
1632 {
1633         struct intel_engine_cs *master;
1634         struct i915_gem_context *ctx;
1635         struct i915_request *rq[16];
1636         enum intel_engine_id id;
1637         unsigned long n;
1638         int err;
1639
1640         GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
1641
1642         ctx = kernel_context(i915);
1643         if (!ctx)
1644                 return -ENOMEM;
1645
1646         err = 0;
1647         rq[0] = ERR_PTR(-ENOMEM);
1648         for_each_engine(master, i915, id) {
1649                 struct i915_sw_fence fence = {};
1650
1651                 if (master->class == class)
1652                         continue;
1653
1654                 memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
1655
1656                 rq[0] = igt_request_alloc(ctx, master);
1657                 if (IS_ERR(rq[0])) {
1658                         err = PTR_ERR(rq[0]);
1659                         goto out;
1660                 }
1661                 i915_request_get(rq[0]);
1662
1663                 if (flags & BOND_SCHEDULE) {
1664                         onstack_fence_init(&fence);
1665                         err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
1666                                                                &fence,
1667                                                                GFP_KERNEL);
1668                 }
1669                 i915_request_add(rq[0]);
1670                 if (err < 0)
1671                         goto out;
1672
1673                 for (n = 0; n < nsibling; n++) {
1674                         struct intel_context *ve;
1675
1676                         ve = intel_execlists_create_virtual(ctx,
1677                                                             siblings,
1678                                                             nsibling);
1679                         if (IS_ERR(ve)) {
1680                                 err = PTR_ERR(ve);
1681                                 onstack_fence_fini(&fence);
1682                                 goto out;
1683                         }
1684
1685                         err = intel_virtual_engine_attach_bond(ve->engine,
1686                                                                master,
1687                                                                siblings[n]);
1688                         if (err) {
1689                                 intel_context_put(ve);
1690                                 onstack_fence_fini(&fence);
1691                                 goto out;
1692                         }
1693
1694                         err = intel_context_pin(ve);
1695                         intel_context_put(ve);
1696                         if (err) {
1697                                 onstack_fence_fini(&fence);
1698                                 goto out;
1699                         }
1700
1701                         rq[n + 1] = i915_request_create(ve);
1702                         intel_context_unpin(ve);
1703                         if (IS_ERR(rq[n + 1])) {
1704                                 err = PTR_ERR(rq[n + 1]);
1705                                 onstack_fence_fini(&fence);
1706                                 goto out;
1707                         }
1708                         i915_request_get(rq[n + 1]);
1709
1710                         err = i915_request_await_execution(rq[n + 1],
1711                                                            &rq[0]->fence,
1712                                                            ve->engine->bond_execute);
1713                         i915_request_add(rq[n + 1]);
1714                         if (err < 0) {
1715                                 onstack_fence_fini(&fence);
1716                                 goto out;
1717                         }
1718                 }
1719                 onstack_fence_fini(&fence);
1720
1721                 if (i915_request_wait(rq[0],
1722                                       I915_WAIT_LOCKED,
1723                                       HZ / 10) < 0) {
1724                         pr_err("Master request did not execute (on %s)!\n",
1725                                rq[0]->engine->name);
1726                         err = -EIO;
1727                         goto out;
1728                 }
1729
1730                 for (n = 0; n < nsibling; n++) {
1731                         if (i915_request_wait(rq[n + 1],
1732                                               I915_WAIT_LOCKED,
1733                                               MAX_SCHEDULE_TIMEOUT) < 0) {
1734                                 err = -EIO;
1735                                 goto out;
1736                         }
1737
1738                         if (rq[n + 1]->engine != siblings[n]) {
1739                                 pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
1740                                        siblings[n]->name,
1741                                        rq[n + 1]->engine->name,
1742                                        rq[0]->engine->name);
1743                                 err = -EINVAL;
1744                                 goto out;
1745                         }
1746                 }
1747
1748                 for (n = 0; !IS_ERR(rq[n]); n++)
1749                         i915_request_put(rq[n]);
1750                 rq[0] = ERR_PTR(-ENOMEM);
1751         }
1752
1753 out:
1754         for (n = 0; !IS_ERR(rq[n]); n++)
1755                 i915_request_put(rq[n]);
1756         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1757                 err = -EIO;
1758
1759         kernel_context_close(ctx);
1760         return err;
1761 }
1762
1763 static int live_virtual_bond(void *arg)
1764 {
1765         static const struct phase {
1766                 const char *name;
1767                 unsigned int flags;
1768         } phases[] = {
1769                 { "", 0 },
1770                 { "schedule", BOND_SCHEDULE },
1771                 { },
1772         };
1773         struct drm_i915_private *i915 = arg;
1774         struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1775         unsigned int class, inst;
1776         int err = 0;
1777
1778         if (USES_GUC_SUBMISSION(i915))
1779                 return 0;
1780
1781         mutex_lock(&i915->drm.struct_mutex);
1782
1783         for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1784                 const struct phase *p;
1785                 int nsibling;
1786
1787                 nsibling = 0;
1788                 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1789                         if (!i915->engine_class[class][inst])
1790                                 break;
1791
1792                         GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
1793                         siblings[nsibling++] = i915->engine_class[class][inst];
1794                 }
1795                 if (nsibling < 2)
1796                         continue;
1797
1798                 for (p = phases; p->name; p++) {
1799                         err = bond_virtual_engine(i915,
1800                                                   class, siblings, nsibling,
1801                                                   p->flags);
1802                         if (err) {
1803                                 pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
1804                                        __func__, p->name, class, nsibling, err);
1805                                 goto out_unlock;
1806                         }
1807                 }
1808         }
1809
1810 out_unlock:
1811         mutex_unlock(&i915->drm.struct_mutex);
1812         return err;
1813 }
1814
1815 int intel_execlists_live_selftests(struct drm_i915_private *i915)
1816 {
1817         static const struct i915_subtest tests[] = {
1818                 SUBTEST(live_sanitycheck),
1819                 SUBTEST(live_busywait_preempt),
1820                 SUBTEST(live_preempt),
1821                 SUBTEST(live_late_preempt),
1822                 SUBTEST(live_suppress_self_preempt),
1823                 SUBTEST(live_suppress_wait_preempt),
1824                 SUBTEST(live_chain_preempt),
1825                 SUBTEST(live_preempt_hang),
1826                 SUBTEST(live_preempt_smoke),
1827                 SUBTEST(live_virtual_engine),
1828                 SUBTEST(live_virtual_mask),
1829                 SUBTEST(live_virtual_bond),
1830         };
1831
1832         if (!HAS_EXECLISTS(i915))
1833                 return 0;
1834
1835         if (i915_terminally_wedged(i915))
1836                 return 0;
1837
1838         return i915_subtests(tests, i915);
1839 }