Merge remote-tracking branches 'regulator/fix/axp20x', 'regulator/fix/cpcap' and...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / selftests / i915_gem_request.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prime_numbers.h>
26
27 #include "../i915_selftest.h"
28
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
31
32 static int igt_add_request(void *arg)
33 {
34         struct drm_i915_private *i915 = arg;
35         struct drm_i915_gem_request *request;
36         int err = -ENOMEM;
37
38         /* Basic preliminary test to create a request and let it loose! */
39
40         mutex_lock(&i915->drm.struct_mutex);
41         request = mock_request(i915->engine[RCS],
42                                i915->kernel_context,
43                                HZ / 10);
44         if (!request)
45                 goto out_unlock;
46
47         i915_add_request(request);
48
49         err = 0;
50 out_unlock:
51         mutex_unlock(&i915->drm.struct_mutex);
52         return err;
53 }
54
55 static int igt_wait_request(void *arg)
56 {
57         const long T = HZ / 4;
58         struct drm_i915_private *i915 = arg;
59         struct drm_i915_gem_request *request;
60         int err = -EINVAL;
61
62         /* Submit a request, then wait upon it */
63
64         mutex_lock(&i915->drm.struct_mutex);
65         request = mock_request(i915->engine[RCS], i915->kernel_context, T);
66         if (!request) {
67                 err = -ENOMEM;
68                 goto out_unlock;
69         }
70
71         if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72                 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
73                 goto out_unlock;
74         }
75
76         if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
77                 pr_err("request wait succeeded (expected timeout before submit!)\n");
78                 goto out_unlock;
79         }
80
81         if (i915_gem_request_completed(request)) {
82                 pr_err("request completed before submit!!\n");
83                 goto out_unlock;
84         }
85
86         i915_add_request(request);
87
88         if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89                 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
90                 goto out_unlock;
91         }
92
93         if (i915_gem_request_completed(request)) {
94                 pr_err("request completed immediately!\n");
95                 goto out_unlock;
96         }
97
98         if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99                 pr_err("request wait succeeded (expected timeout!)\n");
100                 goto out_unlock;
101         }
102
103         if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
104                 pr_err("request wait timed out!\n");
105                 goto out_unlock;
106         }
107
108         if (!i915_gem_request_completed(request)) {
109                 pr_err("request not complete after waiting!\n");
110                 goto out_unlock;
111         }
112
113         if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
114                 pr_err("request wait timed out when already complete!\n");
115                 goto out_unlock;
116         }
117
118         err = 0;
119 out_unlock:
120         mock_device_flush(i915);
121         mutex_unlock(&i915->drm.struct_mutex);
122         return err;
123 }
124
125 static int igt_fence_wait(void *arg)
126 {
127         const long T = HZ / 4;
128         struct drm_i915_private *i915 = arg;
129         struct drm_i915_gem_request *request;
130         int err = -EINVAL;
131
132         /* Submit a request, treat it as a fence and wait upon it */
133
134         mutex_lock(&i915->drm.struct_mutex);
135         request = mock_request(i915->engine[RCS], i915->kernel_context, T);
136         if (!request) {
137                 err = -ENOMEM;
138                 goto out_locked;
139         }
140         mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
141
142         if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
143                 pr_err("fence wait success before submit (expected timeout)!\n");
144                 goto out_device;
145         }
146
147         mutex_lock(&i915->drm.struct_mutex);
148         i915_add_request(request);
149         mutex_unlock(&i915->drm.struct_mutex);
150
151         if (dma_fence_is_signaled(&request->fence)) {
152                 pr_err("fence signaled immediately!\n");
153                 goto out_device;
154         }
155
156         if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
157                 pr_err("fence wait success after submit (expected timeout)!\n");
158                 goto out_device;
159         }
160
161         if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
162                 pr_err("fence wait timed out (expected success)!\n");
163                 goto out_device;
164         }
165
166         if (!dma_fence_is_signaled(&request->fence)) {
167                 pr_err("fence unsignaled after waiting!\n");
168                 goto out_device;
169         }
170
171         if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
172                 pr_err("fence wait timed out when complete (expected success)!\n");
173                 goto out_device;
174         }
175
176         err = 0;
177 out_device:
178         mutex_lock(&i915->drm.struct_mutex);
179 out_locked:
180         mock_device_flush(i915);
181         mutex_unlock(&i915->drm.struct_mutex);
182         return err;
183 }
184
185 static int igt_request_rewind(void *arg)
186 {
187         struct drm_i915_private *i915 = arg;
188         struct drm_i915_gem_request *request, *vip;
189         struct i915_gem_context *ctx[2];
190         int err = -EINVAL;
191
192         mutex_lock(&i915->drm.struct_mutex);
193         ctx[0] = mock_context(i915, "A");
194         request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
195         if (!request) {
196                 err = -ENOMEM;
197                 goto err_context_0;
198         }
199
200         i915_gem_request_get(request);
201         i915_add_request(request);
202
203         ctx[1] = mock_context(i915, "B");
204         vip = mock_request(i915->engine[RCS], ctx[1], 0);
205         if (!vip) {
206                 err = -ENOMEM;
207                 goto err_context_1;
208         }
209
210         /* Simulate preemption by manual reordering */
211         if (!mock_cancel_request(request)) {
212                 pr_err("failed to cancel request (already executed)!\n");
213                 i915_add_request(vip);
214                 goto err_context_1;
215         }
216         i915_gem_request_get(vip);
217         i915_add_request(vip);
218         request->engine->submit_request(request);
219
220         mutex_unlock(&i915->drm.struct_mutex);
221
222         if (i915_wait_request(vip, 0, HZ) == -ETIME) {
223                 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
224                        vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
225                 goto err;
226         }
227
228         if (i915_gem_request_completed(request)) {
229                 pr_err("low priority request already completed\n");
230                 goto err;
231         }
232
233         err = 0;
234 err:
235         i915_gem_request_put(vip);
236         mutex_lock(&i915->drm.struct_mutex);
237 err_context_1:
238         mock_context_close(ctx[1]);
239         i915_gem_request_put(request);
240 err_context_0:
241         mock_context_close(ctx[0]);
242         mock_device_flush(i915);
243         mutex_unlock(&i915->drm.struct_mutex);
244         return err;
245 }
246
247 int i915_gem_request_mock_selftests(void)
248 {
249         static const struct i915_subtest tests[] = {
250                 SUBTEST(igt_add_request),
251                 SUBTEST(igt_wait_request),
252                 SUBTEST(igt_fence_wait),
253                 SUBTEST(igt_request_rewind),
254         };
255         struct drm_i915_private *i915;
256         int err;
257
258         i915 = mock_gem_device();
259         if (!i915)
260                 return -ENOMEM;
261
262         err = i915_subtests(tests, i915);
263         drm_dev_unref(&i915->drm);
264
265         return err;
266 }
267
268 struct live_test {
269         struct drm_i915_private *i915;
270         const char *func;
271         const char *name;
272
273         unsigned int reset_count;
274 };
275
276 static int begin_live_test(struct live_test *t,
277                            struct drm_i915_private *i915,
278                            const char *func,
279                            const char *name)
280 {
281         int err;
282
283         t->i915 = i915;
284         t->func = func;
285         t->name = name;
286
287         err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
288         if (err) {
289                 pr_err("%s(%s): failed to idle before, with err=%d!",
290                        func, name, err);
291                 return err;
292         }
293
294         i915->gpu_error.missed_irq_rings = 0;
295         t->reset_count = i915_reset_count(&i915->gpu_error);
296
297         return 0;
298 }
299
300 static int end_live_test(struct live_test *t)
301 {
302         struct drm_i915_private *i915 = t->i915;
303
304         i915_gem_retire_requests(i915);
305
306         if (wait_for(intel_engines_are_idle(i915), 10)) {
307                 pr_err("%s(%s): GPU not idle\n", t->func, t->name);
308                 return -EIO;
309         }
310
311         if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
312                 pr_err("%s(%s): GPU was reset %d times!\n",
313                        t->func, t->name,
314                        i915_reset_count(&i915->gpu_error) - t->reset_count);
315                 return -EIO;
316         }
317
318         if (i915->gpu_error.missed_irq_rings) {
319                 pr_err("%s(%s): Missed interrupts on engines %lx\n",
320                        t->func, t->name, i915->gpu_error.missed_irq_rings);
321                 return -EIO;
322         }
323
324         return 0;
325 }
326
327 static int live_nop_request(void *arg)
328 {
329         struct drm_i915_private *i915 = arg;
330         struct intel_engine_cs *engine;
331         struct live_test t;
332         unsigned int id;
333         int err;
334
335         /* Submit various sized batches of empty requests, to each engine
336          * (individually), and wait for the batch to complete. We can check
337          * the overhead of submitting requests to the hardware.
338          */
339
340         mutex_lock(&i915->drm.struct_mutex);
341
342         for_each_engine(engine, i915, id) {
343                 IGT_TIMEOUT(end_time);
344                 struct drm_i915_gem_request *request;
345                 unsigned long n, prime;
346                 ktime_t times[2] = {};
347
348                 err = begin_live_test(&t, i915, __func__, engine->name);
349                 if (err)
350                         goto out_unlock;
351
352                 for_each_prime_number_from(prime, 1, 8192) {
353                         times[1] = ktime_get_raw();
354
355                         for (n = 0; n < prime; n++) {
356                                 request = i915_gem_request_alloc(engine,
357                                                                  i915->kernel_context);
358                                 if (IS_ERR(request)) {
359                                         err = PTR_ERR(request);
360                                         goto out_unlock;
361                                 }
362
363                                 /* This space is left intentionally blank.
364                                  *
365                                  * We do not actually want to perform any
366                                  * action with this request, we just want
367                                  * to measure the latency in allocation
368                                  * and submission of our breadcrumbs -
369                                  * ensuring that the bare request is sufficient
370                                  * for the system to work (i.e. proper HEAD
371                                  * tracking of the rings, interrupt handling,
372                                  * etc). It also gives us the lowest bounds
373                                  * for latency.
374                                  */
375
376                                 i915_add_request(request);
377                         }
378                         i915_wait_request(request,
379                                           I915_WAIT_LOCKED,
380                                           MAX_SCHEDULE_TIMEOUT);
381
382                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
383                         if (prime == 1)
384                                 times[0] = times[1];
385
386                         if (__igt_timeout(end_time, NULL))
387                                 break;
388                 }
389
390                 err = end_live_test(&t);
391                 if (err)
392                         goto out_unlock;
393
394                 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
395                         engine->name,
396                         ktime_to_ns(times[0]),
397                         prime, div64_u64(ktime_to_ns(times[1]), prime));
398         }
399
400 out_unlock:
401         mutex_unlock(&i915->drm.struct_mutex);
402         return err;
403 }
404
405 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
406 {
407         struct drm_i915_gem_object *obj;
408         struct i915_vma *vma;
409         u32 *cmd;
410         int err;
411
412         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
413         if (IS_ERR(obj))
414                 return ERR_CAST(obj);
415
416         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
417         if (IS_ERR(cmd)) {
418                 err = PTR_ERR(cmd);
419                 goto err;
420         }
421         *cmd = MI_BATCH_BUFFER_END;
422         i915_gem_object_unpin_map(obj);
423
424         err = i915_gem_object_set_to_gtt_domain(obj, false);
425         if (err)
426                 goto err;
427
428         vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
429         if (IS_ERR(vma)) {
430                 err = PTR_ERR(vma);
431                 goto err;
432         }
433
434         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
435         if (err)
436                 goto err;
437
438         return vma;
439
440 err:
441         i915_gem_object_put(obj);
442         return ERR_PTR(err);
443 }
444
445 static struct drm_i915_gem_request *
446 empty_request(struct intel_engine_cs *engine,
447               struct i915_vma *batch)
448 {
449         struct drm_i915_gem_request *request;
450         int err;
451
452         request = i915_gem_request_alloc(engine,
453                                          engine->i915->kernel_context);
454         if (IS_ERR(request))
455                 return request;
456
457         err = engine->emit_flush(request, EMIT_INVALIDATE);
458         if (err)
459                 goto out_request;
460
461         err = i915_switch_context(request);
462         if (err)
463                 goto out_request;
464
465         err = engine->emit_bb_start(request,
466                                     batch->node.start,
467                                     batch->node.size,
468                                     I915_DISPATCH_SECURE);
469         if (err)
470                 goto out_request;
471
472 out_request:
473         __i915_add_request(request, err == 0);
474         return err ? ERR_PTR(err) : request;
475 }
476
477 static int live_empty_request(void *arg)
478 {
479         struct drm_i915_private *i915 = arg;
480         struct intel_engine_cs *engine;
481         struct live_test t;
482         struct i915_vma *batch;
483         unsigned int id;
484         int err = 0;
485
486         /* Submit various sized batches of empty requests, to each engine
487          * (individually), and wait for the batch to complete. We can check
488          * the overhead of submitting requests to the hardware.
489          */
490
491         mutex_lock(&i915->drm.struct_mutex);
492
493         batch = empty_batch(i915);
494         if (IS_ERR(batch)) {
495                 err = PTR_ERR(batch);
496                 goto out_unlock;
497         }
498
499         for_each_engine(engine, i915, id) {
500                 IGT_TIMEOUT(end_time);
501                 struct drm_i915_gem_request *request;
502                 unsigned long n, prime;
503                 ktime_t times[2] = {};
504
505                 err = begin_live_test(&t, i915, __func__, engine->name);
506                 if (err)
507                         goto out_batch;
508
509                 /* Warmup / preload */
510                 request = empty_request(engine, batch);
511                 if (IS_ERR(request)) {
512                         err = PTR_ERR(request);
513                         goto out_batch;
514                 }
515                 i915_wait_request(request,
516                                   I915_WAIT_LOCKED,
517                                   MAX_SCHEDULE_TIMEOUT);
518
519                 for_each_prime_number_from(prime, 1, 8192) {
520                         times[1] = ktime_get_raw();
521
522                         for (n = 0; n < prime; n++) {
523                                 request = empty_request(engine, batch);
524                                 if (IS_ERR(request)) {
525                                         err = PTR_ERR(request);
526                                         goto out_batch;
527                                 }
528                         }
529                         i915_wait_request(request,
530                                           I915_WAIT_LOCKED,
531                                           MAX_SCHEDULE_TIMEOUT);
532
533                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
534                         if (prime == 1)
535                                 times[0] = times[1];
536
537                         if (__igt_timeout(end_time, NULL))
538                                 break;
539                 }
540
541                 err = end_live_test(&t);
542                 if (err)
543                         goto out_batch;
544
545                 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
546                         engine->name,
547                         ktime_to_ns(times[0]),
548                         prime, div64_u64(ktime_to_ns(times[1]), prime));
549         }
550
551 out_batch:
552         i915_vma_unpin(batch);
553         i915_vma_put(batch);
554 out_unlock:
555         mutex_unlock(&i915->drm.struct_mutex);
556         return err;
557 }
558
559 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
560 {
561         struct i915_gem_context *ctx = i915->kernel_context;
562         struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
563         struct drm_i915_gem_object *obj;
564         const int gen = INTEL_GEN(i915);
565         struct i915_vma *vma;
566         u32 *cmd;
567         int err;
568
569         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
570         if (IS_ERR(obj))
571                 return ERR_CAST(obj);
572
573         vma = i915_vma_instance(obj, vm, NULL);
574         if (IS_ERR(vma)) {
575                 err = PTR_ERR(vma);
576                 goto err;
577         }
578
579         err = i915_vma_pin(vma, 0, 0, PIN_USER);
580         if (err)
581                 goto err;
582
583         err = i915_gem_object_set_to_wc_domain(obj, true);
584         if (err)
585                 goto err;
586
587         cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
588         if (IS_ERR(cmd)) {
589                 err = PTR_ERR(cmd);
590                 goto err;
591         }
592
593         if (gen >= 8) {
594                 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
595                 *cmd++ = lower_32_bits(vma->node.start);
596                 *cmd++ = upper_32_bits(vma->node.start);
597         } else if (gen >= 6) {
598                 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
599                 *cmd++ = lower_32_bits(vma->node.start);
600         } else if (gen >= 4) {
601                 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
602                 *cmd++ = lower_32_bits(vma->node.start);
603         } else {
604                 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
605                 *cmd++ = lower_32_bits(vma->node.start);
606         }
607         *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
608
609         wmb();
610         i915_gem_object_unpin_map(obj);
611
612         return vma;
613
614 err:
615         i915_gem_object_put(obj);
616         return ERR_PTR(err);
617 }
618
619 static int recursive_batch_resolve(struct i915_vma *batch)
620 {
621         u32 *cmd;
622
623         cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
624         if (IS_ERR(cmd))
625                 return PTR_ERR(cmd);
626
627         *cmd = MI_BATCH_BUFFER_END;
628         wmb();
629
630         i915_gem_object_unpin_map(batch->obj);
631
632         return 0;
633 }
634
635 static int live_all_engines(void *arg)
636 {
637         struct drm_i915_private *i915 = arg;
638         struct intel_engine_cs *engine;
639         struct drm_i915_gem_request *request[I915_NUM_ENGINES];
640         struct i915_vma *batch;
641         struct live_test t;
642         unsigned int id;
643         int err;
644
645         /* Check we can submit requests to all engines simultaneously. We
646          * send a recursive batch to each engine - checking that we don't
647          * block doing so, and that they don't complete too soon.
648          */
649
650         mutex_lock(&i915->drm.struct_mutex);
651
652         err = begin_live_test(&t, i915, __func__, "");
653         if (err)
654                 goto out_unlock;
655
656         batch = recursive_batch(i915);
657         if (IS_ERR(batch)) {
658                 err = PTR_ERR(batch);
659                 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
660                 goto out_unlock;
661         }
662
663         for_each_engine(engine, i915, id) {
664                 request[id] = i915_gem_request_alloc(engine,
665                                                      i915->kernel_context);
666                 if (IS_ERR(request[id])) {
667                         err = PTR_ERR(request[id]);
668                         pr_err("%s: Request allocation failed with err=%d\n",
669                                __func__, err);
670                         goto out_request;
671                 }
672
673                 err = engine->emit_flush(request[id], EMIT_INVALIDATE);
674                 GEM_BUG_ON(err);
675
676                 err = i915_switch_context(request[id]);
677                 GEM_BUG_ON(err);
678
679                 err = engine->emit_bb_start(request[id],
680                                             batch->node.start,
681                                             batch->node.size,
682                                             0);
683                 GEM_BUG_ON(err);
684                 request[id]->batch = batch;
685
686                 if (!i915_gem_object_has_active_reference(batch->obj)) {
687                         i915_gem_object_get(batch->obj);
688                         i915_gem_object_set_active_reference(batch->obj);
689                 }
690
691                 i915_vma_move_to_active(batch, request[id], 0);
692                 i915_gem_request_get(request[id]);
693                 i915_add_request(request[id]);
694         }
695
696         for_each_engine(engine, i915, id) {
697                 if (i915_gem_request_completed(request[id])) {
698                         pr_err("%s(%s): request completed too early!\n",
699                                __func__, engine->name);
700                         err = -EINVAL;
701                         goto out_request;
702                 }
703         }
704
705         err = recursive_batch_resolve(batch);
706         if (err) {
707                 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
708                 goto out_request;
709         }
710
711         for_each_engine(engine, i915, id) {
712                 long timeout;
713
714                 timeout = i915_wait_request(request[id],
715                                             I915_WAIT_LOCKED,
716                                             MAX_SCHEDULE_TIMEOUT);
717                 if (timeout < 0) {
718                         err = timeout;
719                         pr_err("%s: error waiting for request on %s, err=%d\n",
720                                __func__, engine->name, err);
721                         goto out_request;
722                 }
723
724                 GEM_BUG_ON(!i915_gem_request_completed(request[id]));
725                 i915_gem_request_put(request[id]);
726                 request[id] = NULL;
727         }
728
729         err = end_live_test(&t);
730
731 out_request:
732         for_each_engine(engine, i915, id)
733                 if (request[id])
734                         i915_gem_request_put(request[id]);
735         i915_vma_unpin(batch);
736         i915_vma_put(batch);
737 out_unlock:
738         mutex_unlock(&i915->drm.struct_mutex);
739         return err;
740 }
741
742 static int live_sequential_engines(void *arg)
743 {
744         struct drm_i915_private *i915 = arg;
745         struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
746         struct drm_i915_gem_request *prev = NULL;
747         struct intel_engine_cs *engine;
748         struct live_test t;
749         unsigned int id;
750         int err;
751
752         /* Check we can submit requests to all engines sequentially, such
753          * that each successive request waits for the earlier ones. This
754          * tests that we don't execute requests out of order, even though
755          * they are running on independent engines.
756          */
757
758         mutex_lock(&i915->drm.struct_mutex);
759
760         err = begin_live_test(&t, i915, __func__, "");
761         if (err)
762                 goto out_unlock;
763
764         for_each_engine(engine, i915, id) {
765                 struct i915_vma *batch;
766
767                 batch = recursive_batch(i915);
768                 if (IS_ERR(batch)) {
769                         err = PTR_ERR(batch);
770                         pr_err("%s: Unable to create batch for %s, err=%d\n",
771                                __func__, engine->name, err);
772                         goto out_unlock;
773                 }
774
775                 request[id] = i915_gem_request_alloc(engine,
776                                                      i915->kernel_context);
777                 if (IS_ERR(request[id])) {
778                         err = PTR_ERR(request[id]);
779                         pr_err("%s: Request allocation failed for %s with err=%d\n",
780                                __func__, engine->name, err);
781                         goto out_request;
782                 }
783
784                 if (prev) {
785                         err = i915_gem_request_await_dma_fence(request[id],
786                                                                &prev->fence);
787                         if (err) {
788                                 i915_add_request(request[id]);
789                                 pr_err("%s: Request await failed for %s with err=%d\n",
790                                        __func__, engine->name, err);
791                                 goto out_request;
792                         }
793                 }
794
795                 err = engine->emit_flush(request[id], EMIT_INVALIDATE);
796                 GEM_BUG_ON(err);
797
798                 err = i915_switch_context(request[id]);
799                 GEM_BUG_ON(err);
800
801                 err = engine->emit_bb_start(request[id],
802                                             batch->node.start,
803                                             batch->node.size,
804                                             0);
805                 GEM_BUG_ON(err);
806                 request[id]->batch = batch;
807
808                 i915_vma_move_to_active(batch, request[id], 0);
809                 i915_gem_object_set_active_reference(batch->obj);
810                 i915_vma_get(batch);
811
812                 i915_gem_request_get(request[id]);
813                 i915_add_request(request[id]);
814
815                 prev = request[id];
816         }
817
818         for_each_engine(engine, i915, id) {
819                 long timeout;
820
821                 if (i915_gem_request_completed(request[id])) {
822                         pr_err("%s(%s): request completed too early!\n",
823                                __func__, engine->name);
824                         err = -EINVAL;
825                         goto out_request;
826                 }
827
828                 err = recursive_batch_resolve(request[id]->batch);
829                 if (err) {
830                         pr_err("%s: failed to resolve batch, err=%d\n",
831                                __func__, err);
832                         goto out_request;
833                 }
834
835                 timeout = i915_wait_request(request[id],
836                                             I915_WAIT_LOCKED,
837                                             MAX_SCHEDULE_TIMEOUT);
838                 if (timeout < 0) {
839                         err = timeout;
840                         pr_err("%s: error waiting for request on %s, err=%d\n",
841                                __func__, engine->name, err);
842                         goto out_request;
843                 }
844
845                 GEM_BUG_ON(!i915_gem_request_completed(request[id]));
846         }
847
848         err = end_live_test(&t);
849
850 out_request:
851         for_each_engine(engine, i915, id) {
852                 u32 *cmd;
853
854                 if (!request[id])
855                         break;
856
857                 cmd = i915_gem_object_pin_map(request[id]->batch->obj,
858                                               I915_MAP_WC);
859                 if (!IS_ERR(cmd)) {
860                         *cmd = MI_BATCH_BUFFER_END;
861                         wmb();
862                         i915_gem_object_unpin_map(request[id]->batch->obj);
863                 }
864
865                 i915_vma_put(request[id]->batch);
866                 i915_gem_request_put(request[id]);
867         }
868 out_unlock:
869         mutex_unlock(&i915->drm.struct_mutex);
870         return err;
871 }
872
873 int i915_gem_request_live_selftests(struct drm_i915_private *i915)
874 {
875         static const struct i915_subtest tests[] = {
876                 SUBTEST(live_nop_request),
877                 SUBTEST(live_all_engines),
878                 SUBTEST(live_sequential_engines),
879                 SUBTEST(live_empty_request),
880         };
881         return i915_subtests(tests, i915);
882 }