Merge drm/drm-next into drm-intel-next-queued
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/dma-fence-array.h>
26 #include <linux/irq_work.h>
27 #include <linux/prefetch.h>
28 #include <linux/sched.h>
29 #include <linux/sched/clock.h>
30 #include <linux/sched/signal.h>
31
32 #include "gem/i915_gem_context.h"
33 #include "gt/intel_context.h"
34
35 #include "i915_active.h"
36 #include "i915_drv.h"
37 #include "i915_globals.h"
38 #include "intel_pm.h"
39
40 struct execute_cb {
41         struct list_head link;
42         struct irq_work work;
43         struct i915_sw_fence *fence;
44         void (*hook)(struct i915_request *rq, struct dma_fence *signal);
45         struct i915_request *signal;
46 };
47
48 static struct i915_global_request {
49         struct i915_global base;
50         struct kmem_cache *slab_requests;
51         struct kmem_cache *slab_dependencies;
52         struct kmem_cache *slab_execute_cbs;
53 } global;
54
55 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
56 {
57         return "i915";
58 }
59
60 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
61 {
62         /*
63          * The timeline struct (as part of the ppgtt underneath a context)
64          * may be freed when the request is no longer in use by the GPU.
65          * We could extend the life of a context to beyond that of all
66          * fences, possibly keeping the hw resource around indefinitely,
67          * or we just give them a false name. Since
68          * dma_fence_ops.get_timeline_name is a debug feature, the occasional
69          * lie seems justifiable.
70          */
71         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
72                 return "signaled";
73
74         return to_request(fence)->gem_context->name ?: "[i915]";
75 }
76
77 static bool i915_fence_signaled(struct dma_fence *fence)
78 {
79         return i915_request_completed(to_request(fence));
80 }
81
82 static bool i915_fence_enable_signaling(struct dma_fence *fence)
83 {
84         return i915_request_enable_breadcrumb(to_request(fence));
85 }
86
87 static signed long i915_fence_wait(struct dma_fence *fence,
88                                    bool interruptible,
89                                    signed long timeout)
90 {
91         return i915_request_wait(to_request(fence),
92                                  interruptible | I915_WAIT_PRIORITY,
93                                  timeout);
94 }
95
96 static void i915_fence_release(struct dma_fence *fence)
97 {
98         struct i915_request *rq = to_request(fence);
99
100         /*
101          * The request is put onto a RCU freelist (i.e. the address
102          * is immediately reused), mark the fences as being freed now.
103          * Otherwise the debugobjects for the fences are only marked as
104          * freed when the slab cache itself is freed, and so we would get
105          * caught trying to reuse dead objects.
106          */
107         i915_sw_fence_fini(&rq->submit);
108         i915_sw_fence_fini(&rq->semaphore);
109
110         kmem_cache_free(global.slab_requests, rq);
111 }
112
113 const struct dma_fence_ops i915_fence_ops = {
114         .get_driver_name = i915_fence_get_driver_name,
115         .get_timeline_name = i915_fence_get_timeline_name,
116         .enable_signaling = i915_fence_enable_signaling,
117         .signaled = i915_fence_signaled,
118         .wait = i915_fence_wait,
119         .release = i915_fence_release,
120 };
121
122 static void irq_execute_cb(struct irq_work *wrk)
123 {
124         struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
125
126         i915_sw_fence_complete(cb->fence);
127         kmem_cache_free(global.slab_execute_cbs, cb);
128 }
129
130 static void irq_execute_cb_hook(struct irq_work *wrk)
131 {
132         struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
133
134         cb->hook(container_of(cb->fence, struct i915_request, submit),
135                  &cb->signal->fence);
136         i915_request_put(cb->signal);
137
138         irq_execute_cb(wrk);
139 }
140
141 static void __notify_execute_cb(struct i915_request *rq)
142 {
143         struct execute_cb *cb;
144
145         lockdep_assert_held(&rq->lock);
146
147         if (list_empty(&rq->execute_cb))
148                 return;
149
150         list_for_each_entry(cb, &rq->execute_cb, link)
151                 irq_work_queue(&cb->work);
152
153         /*
154          * XXX Rollback on __i915_request_unsubmit()
155          *
156          * In the future, perhaps when we have an active time-slicing scheduler,
157          * it will be interesting to unsubmit parallel execution and remove
158          * busywaits from the GPU until their master is restarted. This is
159          * quite hairy, we have to carefully rollback the fence and do a
160          * preempt-to-idle cycle on the target engine, all the while the
161          * master execute_cb may refire.
162          */
163         INIT_LIST_HEAD(&rq->execute_cb);
164 }
165
166 static inline void
167 i915_request_remove_from_client(struct i915_request *request)
168 {
169         struct drm_i915_file_private *file_priv;
170
171         file_priv = request->file_priv;
172         if (!file_priv)
173                 return;
174
175         spin_lock(&file_priv->mm.lock);
176         if (request->file_priv) {
177                 list_del(&request->client_link);
178                 request->file_priv = NULL;
179         }
180         spin_unlock(&file_priv->mm.lock);
181 }
182
183 static void advance_ring(struct i915_request *request)
184 {
185         struct intel_ring *ring = request->ring;
186         unsigned int tail;
187
188         /*
189          * We know the GPU must have read the request to have
190          * sent us the seqno + interrupt, so use the position
191          * of tail of the request to update the last known position
192          * of the GPU head.
193          *
194          * Note this requires that we are always called in request
195          * completion order.
196          */
197         GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
198         if (list_is_last(&request->ring_link, &ring->request_list)) {
199                 /*
200                  * We may race here with execlists resubmitting this request
201                  * as we retire it. The resubmission will move the ring->tail
202                  * forwards (to request->wa_tail). We either read the
203                  * current value that was written to hw, or the value that
204                  * is just about to be. Either works, if we miss the last two
205                  * noops - they are safe to be replayed on a reset.
206                  */
207                 tail = READ_ONCE(request->tail);
208                 list_del(&ring->active_link);
209         } else {
210                 tail = request->postfix;
211         }
212         list_del_init(&request->ring_link);
213
214         ring->head = tail;
215 }
216
217 static void free_capture_list(struct i915_request *request)
218 {
219         struct i915_capture_list *capture;
220
221         capture = request->capture_list;
222         while (capture) {
223                 struct i915_capture_list *next = capture->next;
224
225                 kfree(capture);
226                 capture = next;
227         }
228 }
229
230 static bool i915_request_retire(struct i915_request *rq)
231 {
232         struct i915_active_request *active, *next;
233
234         lockdep_assert_held(&rq->i915->drm.struct_mutex);
235         if (!i915_request_completed(rq))
236                 return false;
237
238         GEM_TRACE("%s fence %llx:%lld, current %d\n",
239                   rq->engine->name,
240                   rq->fence.context, rq->fence.seqno,
241                   hwsp_seqno(rq));
242
243         GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
244         trace_i915_request_retire(rq);
245
246         advance_ring(rq);
247
248         /*
249          * Walk through the active list, calling retire on each. This allows
250          * objects to track their GPU activity and mark themselves as idle
251          * when their *last* active request is completed (updating state
252          * tracking lists for eviction, active references for GEM, etc).
253          *
254          * As the ->retire() may free the node, we decouple it first and
255          * pass along the auxiliary information (to avoid dereferencing
256          * the node after the callback).
257          */
258         list_for_each_entry_safe(active, next, &rq->active_list, link) {
259                 /*
260                  * In microbenchmarks or focusing upon time inside the kernel,
261                  * we may spend an inordinate amount of time simply handling
262                  * the retirement of requests and processing their callbacks.
263                  * Of which, this loop itself is particularly hot due to the
264                  * cache misses when jumping around the list of
265                  * i915_active_request.  So we try to keep this loop as
266                  * streamlined as possible and also prefetch the next
267                  * i915_active_request to try and hide the likely cache miss.
268                  */
269                 prefetchw(next);
270
271                 INIT_LIST_HEAD(&active->link);
272                 RCU_INIT_POINTER(active->request, NULL);
273
274                 active->retire(active, rq);
275         }
276
277         local_irq_disable();
278
279         /*
280          * We only loosely track inflight requests across preemption,
281          * and so we may find ourselves attempting to retire a _completed_
282          * request that we have removed from the HW and put back on a run
283          * queue.
284          */
285         spin_lock(&rq->engine->active.lock);
286         list_del(&rq->sched.link);
287         spin_unlock(&rq->engine->active.lock);
288
289         spin_lock(&rq->lock);
290         i915_request_mark_complete(rq);
291         if (!i915_request_signaled(rq))
292                 dma_fence_signal_locked(&rq->fence);
293         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
294                 i915_request_cancel_breadcrumb(rq);
295         if (i915_request_has_waitboost(rq)) {
296                 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
297                 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
298         }
299         if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
300                 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
301                 __notify_execute_cb(rq);
302         }
303         GEM_BUG_ON(!list_empty(&rq->execute_cb));
304         spin_unlock(&rq->lock);
305
306         local_irq_enable();
307
308         intel_context_exit(rq->hw_context);
309         intel_context_unpin(rq->hw_context);
310
311         i915_request_remove_from_client(rq);
312         list_del(&rq->link);
313
314         free_capture_list(rq);
315         i915_sched_node_fini(&rq->sched);
316         i915_request_put(rq);
317
318         return true;
319 }
320
321 void i915_request_retire_upto(struct i915_request *rq)
322 {
323         struct intel_ring *ring = rq->ring;
324         struct i915_request *tmp;
325
326         GEM_TRACE("%s fence %llx:%lld, current %d\n",
327                   rq->engine->name,
328                   rq->fence.context, rq->fence.seqno,
329                   hwsp_seqno(rq));
330
331         lockdep_assert_held(&rq->i915->drm.struct_mutex);
332         GEM_BUG_ON(!i915_request_completed(rq));
333
334         if (list_empty(&rq->ring_link))
335                 return;
336
337         do {
338                 tmp = list_first_entry(&ring->request_list,
339                                        typeof(*tmp), ring_link);
340         } while (i915_request_retire(tmp) && tmp != rq);
341 }
342
343 static int
344 __i915_request_await_execution(struct i915_request *rq,
345                                struct i915_request *signal,
346                                void (*hook)(struct i915_request *rq,
347                                             struct dma_fence *signal),
348                                gfp_t gfp)
349 {
350         struct execute_cb *cb;
351
352         if (i915_request_is_active(signal)) {
353                 if (hook)
354                         hook(rq, &signal->fence);
355                 return 0;
356         }
357
358         cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
359         if (!cb)
360                 return -ENOMEM;
361
362         cb->fence = &rq->submit;
363         i915_sw_fence_await(cb->fence);
364         init_irq_work(&cb->work, irq_execute_cb);
365
366         if (hook) {
367                 cb->hook = hook;
368                 cb->signal = i915_request_get(signal);
369                 cb->work.func = irq_execute_cb_hook;
370         }
371
372         spin_lock_irq(&signal->lock);
373         if (i915_request_is_active(signal)) {
374                 if (hook) {
375                         hook(rq, &signal->fence);
376                         i915_request_put(signal);
377                 }
378                 i915_sw_fence_complete(cb->fence);
379                 kmem_cache_free(global.slab_execute_cbs, cb);
380         } else {
381                 list_add_tail(&cb->link, &signal->execute_cb);
382         }
383         spin_unlock_irq(&signal->lock);
384
385         return 0;
386 }
387
388 void __i915_request_submit(struct i915_request *request)
389 {
390         struct intel_engine_cs *engine = request->engine;
391
392         GEM_TRACE("%s fence %llx:%lld, current %d\n",
393                   engine->name,
394                   request->fence.context, request->fence.seqno,
395                   hwsp_seqno(request));
396
397         GEM_BUG_ON(!irqs_disabled());
398         lockdep_assert_held(&engine->active.lock);
399
400         if (i915_gem_context_is_banned(request->gem_context))
401                 i915_request_skip(request, -EIO);
402
403         /*
404          * Are we using semaphores when the gpu is already saturated?
405          *
406          * Using semaphores incurs a cost in having the GPU poll a
407          * memory location, busywaiting for it to change. The continual
408          * memory reads can have a noticeable impact on the rest of the
409          * system with the extra bus traffic, stalling the cpu as it too
410          * tries to access memory across the bus (perf stat -e bus-cycles).
411          *
412          * If we installed a semaphore on this request and we only submit
413          * the request after the signaler completed, that indicates the
414          * system is overloaded and using semaphores at this time only
415          * increases the amount of work we are doing. If so, we disable
416          * further use of semaphores until we are idle again, whence we
417          * optimistically try again.
418          */
419         if (request->sched.semaphores &&
420             i915_sw_fence_signaled(&request->semaphore))
421                 engine->saturated |= request->sched.semaphores;
422
423         /* We may be recursing from the signal callback of another i915 fence */
424         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
425
426         list_move_tail(&request->sched.link, &engine->active.requests);
427
428         GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
429         set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
430
431         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
432             !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
433             !i915_request_enable_breadcrumb(request))
434                 intel_engine_queue_breadcrumbs(engine);
435
436         __notify_execute_cb(request);
437
438         spin_unlock(&request->lock);
439
440         engine->emit_fini_breadcrumb(request,
441                                      request->ring->vaddr + request->postfix);
442
443         engine->serial++;
444
445         trace_i915_request_execute(request);
446 }
447
448 void i915_request_submit(struct i915_request *request)
449 {
450         struct intel_engine_cs *engine = request->engine;
451         unsigned long flags;
452
453         /* Will be called from irq-context when using foreign fences. */
454         spin_lock_irqsave(&engine->active.lock, flags);
455
456         __i915_request_submit(request);
457
458         spin_unlock_irqrestore(&engine->active.lock, flags);
459 }
460
461 void __i915_request_unsubmit(struct i915_request *request)
462 {
463         struct intel_engine_cs *engine = request->engine;
464
465         GEM_TRACE("%s fence %llx:%lld, current %d\n",
466                   engine->name,
467                   request->fence.context, request->fence.seqno,
468                   hwsp_seqno(request));
469
470         GEM_BUG_ON(!irqs_disabled());
471         lockdep_assert_held(&engine->active.lock);
472
473         /*
474          * Only unwind in reverse order, required so that the per-context list
475          * is kept in seqno/ring order.
476          */
477
478         /* We may be recursing from the signal callback of another i915 fence */
479         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
480
481         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
482                 i915_request_cancel_breadcrumb(request);
483
484         GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
485         clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
486
487         spin_unlock(&request->lock);
488
489         /* We've already spun, don't charge on resubmitting. */
490         if (request->sched.semaphores && i915_request_started(request)) {
491                 request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
492                 request->sched.semaphores = 0;
493         }
494
495         /*
496          * We don't need to wake_up any waiters on request->execute, they
497          * will get woken by any other event or us re-adding this request
498          * to the engine timeline (__i915_request_submit()). The waiters
499          * should be quite adapt at finding that the request now has a new
500          * global_seqno to the one they went to sleep on.
501          */
502 }
503
504 void i915_request_unsubmit(struct i915_request *request)
505 {
506         struct intel_engine_cs *engine = request->engine;
507         unsigned long flags;
508
509         /* Will be called from irq-context when using foreign fences. */
510         spin_lock_irqsave(&engine->active.lock, flags);
511
512         __i915_request_unsubmit(request);
513
514         spin_unlock_irqrestore(&engine->active.lock, flags);
515 }
516
517 static int __i915_sw_fence_call
518 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
519 {
520         struct i915_request *request =
521                 container_of(fence, typeof(*request), submit);
522
523         switch (state) {
524         case FENCE_COMPLETE:
525                 trace_i915_request_submit(request);
526                 /*
527                  * We need to serialize use of the submit_request() callback
528                  * with its hotplugging performed during an emergency
529                  * i915_gem_set_wedged().  We use the RCU mechanism to mark the
530                  * critical section in order to force i915_gem_set_wedged() to
531                  * wait until the submit_request() is completed before
532                  * proceeding.
533                  */
534                 rcu_read_lock();
535                 request->engine->submit_request(request);
536                 rcu_read_unlock();
537                 break;
538
539         case FENCE_FREE:
540                 i915_request_put(request);
541                 break;
542         }
543
544         return NOTIFY_DONE;
545 }
546
547 static int __i915_sw_fence_call
548 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
549 {
550         struct i915_request *request =
551                 container_of(fence, typeof(*request), semaphore);
552
553         switch (state) {
554         case FENCE_COMPLETE:
555                 i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
556                 break;
557
558         case FENCE_FREE:
559                 i915_request_put(request);
560                 break;
561         }
562
563         return NOTIFY_DONE;
564 }
565
566 static void ring_retire_requests(struct intel_ring *ring)
567 {
568         struct i915_request *rq, *rn;
569
570         list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
571                 if (!i915_request_retire(rq))
572                         break;
573 }
574
575 static noinline struct i915_request *
576 request_alloc_slow(struct intel_context *ce, gfp_t gfp)
577 {
578         struct intel_ring *ring = ce->ring;
579         struct i915_request *rq;
580
581         if (list_empty(&ring->request_list))
582                 goto out;
583
584         if (!gfpflags_allow_blocking(gfp))
585                 goto out;
586
587         /* Move our oldest request to the slab-cache (if not in use!) */
588         rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
589         i915_request_retire(rq);
590
591         rq = kmem_cache_alloc(global.slab_requests,
592                               gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
593         if (rq)
594                 return rq;
595
596         /* Ratelimit ourselves to prevent oom from malicious clients */
597         rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
598         cond_synchronize_rcu(rq->rcustate);
599
600         /* Retire our old requests in the hope that we free some */
601         ring_retire_requests(ring);
602
603 out:
604         return kmem_cache_alloc(global.slab_requests, gfp);
605 }
606
607 struct i915_request *
608 __i915_request_create(struct intel_context *ce, gfp_t gfp)
609 {
610         struct intel_timeline *tl = ce->ring->timeline;
611         struct i915_request *rq;
612         u32 seqno;
613         int ret;
614
615         might_sleep_if(gfpflags_allow_blocking(gfp));
616
617         /* Check that the caller provided an already pinned context */
618         __intel_context_pin(ce);
619
620         /*
621          * Beware: Dragons be flying overhead.
622          *
623          * We use RCU to look up requests in flight. The lookups may
624          * race with the request being allocated from the slab freelist.
625          * That is the request we are writing to here, may be in the process
626          * of being read by __i915_active_request_get_rcu(). As such,
627          * we have to be very careful when overwriting the contents. During
628          * the RCU lookup, we change chase the request->engine pointer,
629          * read the request->global_seqno and increment the reference count.
630          *
631          * The reference count is incremented atomically. If it is zero,
632          * the lookup knows the request is unallocated and complete. Otherwise,
633          * it is either still in use, or has been reallocated and reset
634          * with dma_fence_init(). This increment is safe for release as we
635          * check that the request we have a reference to and matches the active
636          * request.
637          *
638          * Before we increment the refcount, we chase the request->engine
639          * pointer. We must not call kmem_cache_zalloc() or else we set
640          * that pointer to NULL and cause a crash during the lookup. If
641          * we see the request is completed (based on the value of the
642          * old engine and seqno), the lookup is complete and reports NULL.
643          * If we decide the request is not completed (new engine or seqno),
644          * then we grab a reference and double check that it is still the
645          * active request - which it won't be and restart the lookup.
646          *
647          * Do not use kmem_cache_zalloc() here!
648          */
649         rq = kmem_cache_alloc(global.slab_requests,
650                               gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
651         if (unlikely(!rq)) {
652                 rq = request_alloc_slow(ce, gfp);
653                 if (!rq) {
654                         ret = -ENOMEM;
655                         goto err_unreserve;
656                 }
657         }
658
659         ret = intel_timeline_get_seqno(tl, rq, &seqno);
660         if (ret)
661                 goto err_free;
662
663         rq->i915 = ce->engine->i915;
664         rq->hw_context = ce;
665         rq->gem_context = ce->gem_context;
666         rq->engine = ce->engine;
667         rq->ring = ce->ring;
668         rq->timeline = tl;
669         rq->hwsp_seqno = tl->hwsp_seqno;
670         rq->hwsp_cacheline = tl->hwsp_cacheline;
671         rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
672
673         spin_lock_init(&rq->lock);
674         dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
675                        tl->fence_context, seqno);
676
677         /* We bump the ref for the fence chain */
678         i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
679         i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
680
681         i915_sched_node_init(&rq->sched);
682
683         /* No zalloc, must clear what we need by hand */
684         rq->file_priv = NULL;
685         rq->batch = NULL;
686         rq->capture_list = NULL;
687         rq->flags = 0;
688         rq->execution_mask = ALL_ENGINES;
689
690         INIT_LIST_HEAD(&rq->active_list);
691         INIT_LIST_HEAD(&rq->execute_cb);
692
693         /*
694          * Reserve space in the ring buffer for all the commands required to
695          * eventually emit this request. This is to guarantee that the
696          * i915_request_add() call can't fail. Note that the reserve may need
697          * to be redone if the request is not actually submitted straight
698          * away, e.g. because a GPU scheduler has deferred it.
699          *
700          * Note that due to how we add reserved_space to intel_ring_begin()
701          * we need to double our request to ensure that if we need to wrap
702          * around inside i915_request_add() there is sufficient space at
703          * the beginning of the ring as well.
704          */
705         rq->reserved_space =
706                 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
707
708         /*
709          * Record the position of the start of the request so that
710          * should we detect the updated seqno part-way through the
711          * GPU processing the request, we never over-estimate the
712          * position of the head.
713          */
714         rq->head = rq->ring->emit;
715
716         ret = rq->engine->request_alloc(rq);
717         if (ret)
718                 goto err_unwind;
719
720         rq->infix = rq->ring->emit; /* end of header; start of user payload */
721
722         intel_context_mark_active(ce);
723         return rq;
724
725 err_unwind:
726         ce->ring->emit = rq->head;
727
728         /* Make sure we didn't add ourselves to external state before freeing */
729         GEM_BUG_ON(!list_empty(&rq->active_list));
730         GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
731         GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
732
733 err_free:
734         kmem_cache_free(global.slab_requests, rq);
735 err_unreserve:
736         intel_context_unpin(ce);
737         return ERR_PTR(ret);
738 }
739
740 struct i915_request *
741 i915_request_create(struct intel_context *ce)
742 {
743         struct i915_request *rq;
744         int err;
745
746         err = intel_context_timeline_lock(ce);
747         if (err)
748                 return ERR_PTR(err);
749
750         /* Move our oldest request to the slab-cache (if not in use!) */
751         rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
752         if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
753                 i915_request_retire(rq);
754
755         intel_context_enter(ce);
756         rq = __i915_request_create(ce, GFP_KERNEL);
757         intel_context_exit(ce); /* active reference transferred to request */
758         if (IS_ERR(rq))
759                 goto err_unlock;
760
761         /* Check that we do not interrupt ourselves with a new request */
762         rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
763
764         return rq;
765
766 err_unlock:
767         intel_context_timeline_unlock(ce);
768         return rq;
769 }
770
771 static int
772 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
773 {
774         if (list_is_first(&signal->ring_link, &signal->ring->request_list))
775                 return 0;
776
777         signal = list_prev_entry(signal, ring_link);
778         if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
779                 return 0;
780
781         return i915_sw_fence_await_dma_fence(&rq->submit,
782                                              &signal->fence, 0,
783                                              I915_FENCE_GFP);
784 }
785
786 static intel_engine_mask_t
787 already_busywaiting(struct i915_request *rq)
788 {
789         /*
790          * Polling a semaphore causes bus traffic, delaying other users of
791          * both the GPU and CPU. We want to limit the impact on others,
792          * while taking advantage of early submission to reduce GPU
793          * latency. Therefore we restrict ourselves to not using more
794          * than one semaphore from each source, and not using a semaphore
795          * if we have detected the engine is saturated (i.e. would not be
796          * submitted early and cause bus traffic reading an already passed
797          * semaphore).
798          *
799          * See the are-we-too-late? check in __i915_request_submit().
800          */
801         return rq->sched.semaphores | rq->engine->saturated;
802 }
803
804 static int
805 emit_semaphore_wait(struct i915_request *to,
806                     struct i915_request *from,
807                     gfp_t gfp)
808 {
809         u32 hwsp_offset;
810         u32 *cs;
811         int err;
812
813         GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
814         GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
815
816         /* Just emit the first semaphore we see as request space is limited. */
817         if (already_busywaiting(to) & from->engine->mask)
818                 return i915_sw_fence_await_dma_fence(&to->submit,
819                                                      &from->fence, 0,
820                                                      I915_FENCE_GFP);
821
822         err = i915_request_await_start(to, from);
823         if (err < 0)
824                 return err;
825
826         /* Only submit our spinner after the signaler is running! */
827         err = __i915_request_await_execution(to, from, NULL, gfp);
828         if (err)
829                 return err;
830
831         /* We need to pin the signaler's HWSP until we are finished reading. */
832         err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
833         if (err)
834                 return err;
835
836         cs = intel_ring_begin(to, 4);
837         if (IS_ERR(cs))
838                 return PTR_ERR(cs);
839
840         /*
841          * Using greater-than-or-equal here means we have to worry
842          * about seqno wraparound. To side step that issue, we swap
843          * the timeline HWSP upon wrapping, so that everyone listening
844          * for the old (pre-wrap) values do not see the much smaller
845          * (post-wrap) values than they were expecting (and so wait
846          * forever).
847          */
848         *cs++ = MI_SEMAPHORE_WAIT |
849                 MI_SEMAPHORE_GLOBAL_GTT |
850                 MI_SEMAPHORE_POLL |
851                 MI_SEMAPHORE_SAD_GTE_SDD;
852         *cs++ = from->fence.seqno;
853         *cs++ = hwsp_offset;
854         *cs++ = 0;
855
856         intel_ring_advance(to, cs);
857         to->sched.semaphores |= from->engine->mask;
858         to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
859         return 0;
860 }
861
862 static int
863 i915_request_await_request(struct i915_request *to, struct i915_request *from)
864 {
865         int ret;
866
867         GEM_BUG_ON(to == from);
868         GEM_BUG_ON(to->timeline == from->timeline);
869
870         if (i915_request_completed(from))
871                 return 0;
872
873         if (to->engine->schedule) {
874                 ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
875                 if (ret < 0)
876                         return ret;
877         }
878
879         if (to->engine == from->engine) {
880                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
881                                                        &from->submit,
882                                                        I915_FENCE_GFP);
883         } else if (intel_engine_has_semaphores(to->engine) &&
884                    to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
885                 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
886         } else {
887                 ret = i915_sw_fence_await_dma_fence(&to->submit,
888                                                     &from->fence, 0,
889                                                     I915_FENCE_GFP);
890         }
891         if (ret < 0)
892                 return ret;
893
894         if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
895                 ret = i915_sw_fence_await_dma_fence(&to->semaphore,
896                                                     &from->fence, 0,
897                                                     I915_FENCE_GFP);
898                 if (ret < 0)
899                         return ret;
900         }
901
902         return 0;
903 }
904
905 int
906 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
907 {
908         struct dma_fence **child = &fence;
909         unsigned int nchild = 1;
910         int ret;
911
912         /*
913          * Note that if the fence-array was created in signal-on-any mode,
914          * we should *not* decompose it into its individual fences. However,
915          * we don't currently store which mode the fence-array is operating
916          * in. Fortunately, the only user of signal-on-any is private to
917          * amdgpu and we should not see any incoming fence-array from
918          * sync-file being in signal-on-any mode.
919          */
920         if (dma_fence_is_array(fence)) {
921                 struct dma_fence_array *array = to_dma_fence_array(fence);
922
923                 child = array->fences;
924                 nchild = array->num_fences;
925                 GEM_BUG_ON(!nchild);
926         }
927
928         do {
929                 fence = *child++;
930                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
931                         continue;
932
933                 /*
934                  * Requests on the same timeline are explicitly ordered, along
935                  * with their dependencies, by i915_request_add() which ensures
936                  * that requests are submitted in-order through each ring.
937                  */
938                 if (fence->context == rq->fence.context)
939                         continue;
940
941                 /* Squash repeated waits to the same timelines */
942                 if (fence->context != rq->i915->mm.unordered_timeline &&
943                     intel_timeline_sync_is_later(rq->timeline, fence))
944                         continue;
945
946                 if (dma_fence_is_i915(fence))
947                         ret = i915_request_await_request(rq, to_request(fence));
948                 else
949                         ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
950                                                             I915_FENCE_TIMEOUT,
951                                                             I915_FENCE_GFP);
952                 if (ret < 0)
953                         return ret;
954
955                 /* Record the latest fence used against each timeline */
956                 if (fence->context != rq->i915->mm.unordered_timeline)
957                         intel_timeline_sync_set(rq->timeline, fence);
958         } while (--nchild);
959
960         return 0;
961 }
962
963 int
964 i915_request_await_execution(struct i915_request *rq,
965                              struct dma_fence *fence,
966                              void (*hook)(struct i915_request *rq,
967                                           struct dma_fence *signal))
968 {
969         struct dma_fence **child = &fence;
970         unsigned int nchild = 1;
971         int ret;
972
973         if (dma_fence_is_array(fence)) {
974                 struct dma_fence_array *array = to_dma_fence_array(fence);
975
976                 /* XXX Error for signal-on-any fence arrays */
977
978                 child = array->fences;
979                 nchild = array->num_fences;
980                 GEM_BUG_ON(!nchild);
981         }
982
983         do {
984                 fence = *child++;
985                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
986                         continue;
987
988                 /*
989                  * We don't squash repeated fence dependencies here as we
990                  * want to run our callback in all cases.
991                  */
992
993                 if (dma_fence_is_i915(fence))
994                         ret = __i915_request_await_execution(rq,
995                                                              to_request(fence),
996                                                              hook,
997                                                              I915_FENCE_GFP);
998                 else
999                         ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
1000                                                             I915_FENCE_TIMEOUT,
1001                                                             GFP_KERNEL);
1002                 if (ret < 0)
1003                         return ret;
1004         } while (--nchild);
1005
1006         return 0;
1007 }
1008
1009 /**
1010  * i915_request_await_object - set this request to (async) wait upon a bo
1011  * @to: request we are wishing to use
1012  * @obj: object which may be in use on another ring.
1013  * @write: whether the wait is on behalf of a writer
1014  *
1015  * This code is meant to abstract object synchronization with the GPU.
1016  * Conceptually we serialise writes between engines inside the GPU.
1017  * We only allow one engine to write into a buffer at any time, but
1018  * multiple readers. To ensure each has a coherent view of memory, we must:
1019  *
1020  * - If there is an outstanding write request to the object, the new
1021  *   request must wait for it to complete (either CPU or in hw, requests
1022  *   on the same ring will be naturally ordered).
1023  *
1024  * - If we are a write request (pending_write_domain is set), the new
1025  *   request must wait for outstanding read requests to complete.
1026  *
1027  * Returns 0 if successful, else propagates up the lower layer error.
1028  */
1029 int
1030 i915_request_await_object(struct i915_request *to,
1031                           struct drm_i915_gem_object *obj,
1032                           bool write)
1033 {
1034         struct dma_fence *excl;
1035         int ret = 0;
1036
1037         if (write) {
1038                 struct dma_fence **shared;
1039                 unsigned int count, i;
1040
1041                 ret = reservation_object_get_fences_rcu(obj->base.resv,
1042                                                         &excl, &count, &shared);
1043                 if (ret)
1044                         return ret;
1045
1046                 for (i = 0; i < count; i++) {
1047                         ret = i915_request_await_dma_fence(to, shared[i]);
1048                         if (ret)
1049                                 break;
1050
1051                         dma_fence_put(shared[i]);
1052                 }
1053
1054                 for (; i < count; i++)
1055                         dma_fence_put(shared[i]);
1056                 kfree(shared);
1057         } else {
1058                 excl = reservation_object_get_excl_rcu(obj->base.resv);
1059         }
1060
1061         if (excl) {
1062                 if (ret == 0)
1063                         ret = i915_request_await_dma_fence(to, excl);
1064
1065                 dma_fence_put(excl);
1066         }
1067
1068         return ret;
1069 }
1070
1071 void i915_request_skip(struct i915_request *rq, int error)
1072 {
1073         void *vaddr = rq->ring->vaddr;
1074         u32 head;
1075
1076         GEM_BUG_ON(!IS_ERR_VALUE((long)error));
1077         dma_fence_set_error(&rq->fence, error);
1078
1079         /*
1080          * As this request likely depends on state from the lost
1081          * context, clear out all the user operations leaving the
1082          * breadcrumb at the end (so we get the fence notifications).
1083          */
1084         head = rq->infix;
1085         if (rq->postfix < head) {
1086                 memset(vaddr + head, 0, rq->ring->size - head);
1087                 head = 0;
1088         }
1089         memset(vaddr + head, 0, rq->postfix - head);
1090 }
1091
1092 static struct i915_request *
1093 __i915_request_add_to_timeline(struct i915_request *rq)
1094 {
1095         struct intel_timeline *timeline = rq->timeline;
1096         struct i915_request *prev;
1097
1098         /*
1099          * Dependency tracking and request ordering along the timeline
1100          * is special cased so that we can eliminate redundant ordering
1101          * operations while building the request (we know that the timeline
1102          * itself is ordered, and here we guarantee it).
1103          *
1104          * As we know we will need to emit tracking along the timeline,
1105          * we embed the hooks into our request struct -- at the cost of
1106          * having to have specialised no-allocation interfaces (which will
1107          * be beneficial elsewhere).
1108          *
1109          * A second benefit to open-coding i915_request_await_request is
1110          * that we can apply a slight variant of the rules specialised
1111          * for timelines that jump between engines (such as virtual engines).
1112          * If we consider the case of virtual engine, we must emit a dma-fence
1113          * to prevent scheduling of the second request until the first is
1114          * complete (to maximise our greedy late load balancing) and this
1115          * precludes optimising to use semaphores serialisation of a single
1116          * timeline across engines.
1117          */
1118         prev = rcu_dereference_protected(timeline->last_request.request, 1);
1119         if (prev && !i915_request_completed(prev)) {
1120                 if (is_power_of_2(prev->engine->mask | rq->engine->mask))
1121                         i915_sw_fence_await_sw_fence(&rq->submit,
1122                                                      &prev->submit,
1123                                                      &rq->submitq);
1124                 else
1125                         __i915_sw_fence_await_dma_fence(&rq->submit,
1126                                                         &prev->fence,
1127                                                         &rq->dmaq);
1128                 if (rq->engine->schedule)
1129                         __i915_sched_node_add_dependency(&rq->sched,
1130                                                          &prev->sched,
1131                                                          &rq->dep,
1132                                                          0);
1133         }
1134
1135         list_add_tail(&rq->link, &timeline->requests);
1136
1137         /*
1138          * Make sure that no request gazumped us - if it was allocated after
1139          * our i915_request_alloc() and called __i915_request_add() before
1140          * us, the timeline will hold its seqno which is later than ours.
1141          */
1142         GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1143         __i915_active_request_set(&timeline->last_request, rq);
1144
1145         return prev;
1146 }
1147
1148 /*
1149  * NB: This function is not allowed to fail. Doing so would mean the the
1150  * request is not being tracked for completion but the work itself is
1151  * going to happen on the hardware. This would be a Bad Thing(tm).
1152  */
1153 struct i915_request *__i915_request_commit(struct i915_request *rq)
1154 {
1155         struct intel_engine_cs *engine = rq->engine;
1156         struct intel_ring *ring = rq->ring;
1157         struct i915_request *prev;
1158         u32 *cs;
1159
1160         GEM_TRACE("%s fence %llx:%lld\n",
1161                   engine->name, rq->fence.context, rq->fence.seqno);
1162
1163         /*
1164          * To ensure that this call will not fail, space for its emissions
1165          * should already have been reserved in the ring buffer. Let the ring
1166          * know that it is time to use that space up.
1167          */
1168         GEM_BUG_ON(rq->reserved_space > ring->space);
1169         rq->reserved_space = 0;
1170
1171         /*
1172          * Record the position of the start of the breadcrumb so that
1173          * should we detect the updated seqno part-way through the
1174          * GPU processing the request, we never over-estimate the
1175          * position of the ring's HEAD.
1176          */
1177         cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1178         GEM_BUG_ON(IS_ERR(cs));
1179         rq->postfix = intel_ring_offset(rq, cs);
1180
1181         prev = __i915_request_add_to_timeline(rq);
1182
1183         list_add_tail(&rq->ring_link, &ring->request_list);
1184         if (list_is_first(&rq->ring_link, &ring->request_list))
1185                 list_add(&ring->active_link, &rq->i915->gt.active_rings);
1186         rq->emitted_jiffies = jiffies;
1187
1188         /*
1189          * Let the backend know a new request has arrived that may need
1190          * to adjust the existing execution schedule due to a high priority
1191          * request - i.e. we may want to preempt the current request in order
1192          * to run a high priority dependency chain *before* we can execute this
1193          * request.
1194          *
1195          * This is called before the request is ready to run so that we can
1196          * decide whether to preempt the entire chain so that it is ready to
1197          * run at the earliest possible convenience.
1198          */
1199         local_bh_disable();
1200         i915_sw_fence_commit(&rq->semaphore);
1201         rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1202         if (engine->schedule) {
1203                 struct i915_sched_attr attr = rq->gem_context->sched;
1204
1205                 /*
1206                  * Boost actual workloads past semaphores!
1207                  *
1208                  * With semaphores we spin on one engine waiting for another,
1209                  * simply to reduce the latency of starting our work when
1210                  * the signaler completes. However, if there is any other
1211                  * work that we could be doing on this engine instead, that
1212                  * is better utilisation and will reduce the overall duration
1213                  * of the current work. To avoid PI boosting a semaphore
1214                  * far in the distance past over useful work, we keep a history
1215                  * of any semaphore use along our dependency chain.
1216                  */
1217                 if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
1218                         attr.priority |= I915_PRIORITY_NOSEMAPHORE;
1219
1220                 /*
1221                  * Boost priorities to new clients (new request flows).
1222                  *
1223                  * Allow interactive/synchronous clients to jump ahead of
1224                  * the bulk clients. (FQ_CODEL)
1225                  */
1226                 if (list_empty(&rq->sched.signalers_list))
1227                         attr.priority |= I915_PRIORITY_WAIT;
1228
1229                 engine->schedule(rq, &attr);
1230         }
1231         rcu_read_unlock();
1232         i915_sw_fence_commit(&rq->submit);
1233         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1234
1235         return prev;
1236 }
1237
1238 void i915_request_add(struct i915_request *rq)
1239 {
1240         struct i915_request *prev;
1241
1242         lockdep_assert_held(&rq->timeline->mutex);
1243         lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
1244
1245         trace_i915_request_add(rq);
1246
1247         prev = __i915_request_commit(rq);
1248
1249         /*
1250          * In typical scenarios, we do not expect the previous request on
1251          * the timeline to be still tracked by timeline->last_request if it
1252          * has been completed. If the completed request is still here, that
1253          * implies that request retirement is a long way behind submission,
1254          * suggesting that we haven't been retiring frequently enough from
1255          * the combination of retire-before-alloc, waiters and the background
1256          * retirement worker. So if the last request on this timeline was
1257          * already completed, do a catch up pass, flushing the retirement queue
1258          * up to this client. Since we have now moved the heaviest operations
1259          * during retirement onto secondary workers, such as freeing objects
1260          * or contexts, retiring a bunch of requests is mostly list management
1261          * (and cache misses), and so we should not be overly penalizing this
1262          * client by performing excess work, though we may still performing
1263          * work on behalf of others -- but instead we should benefit from
1264          * improved resource management. (Well, that's the theory at least.)
1265          */
1266         if (prev && i915_request_completed(prev))
1267                 i915_request_retire_upto(prev);
1268
1269         mutex_unlock(&rq->timeline->mutex);
1270 }
1271
1272 static unsigned long local_clock_us(unsigned int *cpu)
1273 {
1274         unsigned long t;
1275
1276         /*
1277          * Cheaply and approximately convert from nanoseconds to microseconds.
1278          * The result and subsequent calculations are also defined in the same
1279          * approximate microseconds units. The principal source of timing
1280          * error here is from the simple truncation.
1281          *
1282          * Note that local_clock() is only defined wrt to the current CPU;
1283          * the comparisons are no longer valid if we switch CPUs. Instead of
1284          * blocking preemption for the entire busywait, we can detect the CPU
1285          * switch and use that as indicator of system load and a reason to
1286          * stop busywaiting, see busywait_stop().
1287          */
1288         *cpu = get_cpu();
1289         t = local_clock() >> 10;
1290         put_cpu();
1291
1292         return t;
1293 }
1294
1295 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1296 {
1297         unsigned int this_cpu;
1298
1299         if (time_after(local_clock_us(&this_cpu), timeout))
1300                 return true;
1301
1302         return this_cpu != cpu;
1303 }
1304
1305 static bool __i915_spin_request(const struct i915_request * const rq,
1306                                 int state, unsigned long timeout_us)
1307 {
1308         unsigned int cpu;
1309
1310         /*
1311          * Only wait for the request if we know it is likely to complete.
1312          *
1313          * We don't track the timestamps around requests, nor the average
1314          * request length, so we do not have a good indicator that this
1315          * request will complete within the timeout. What we do know is the
1316          * order in which requests are executed by the context and so we can
1317          * tell if the request has been started. If the request is not even
1318          * running yet, it is a fair assumption that it will not complete
1319          * within our relatively short timeout.
1320          */
1321         if (!i915_request_is_running(rq))
1322                 return false;
1323
1324         /*
1325          * When waiting for high frequency requests, e.g. during synchronous
1326          * rendering split between the CPU and GPU, the finite amount of time
1327          * required to set up the irq and wait upon it limits the response
1328          * rate. By busywaiting on the request completion for a short while we
1329          * can service the high frequency waits as quick as possible. However,
1330          * if it is a slow request, we want to sleep as quickly as possible.
1331          * The tradeoff between waiting and sleeping is roughly the time it
1332          * takes to sleep on a request, on the order of a microsecond.
1333          */
1334
1335         timeout_us += local_clock_us(&cpu);
1336         do {
1337                 if (i915_request_completed(rq))
1338                         return true;
1339
1340                 if (signal_pending_state(state, current))
1341                         break;
1342
1343                 if (busywait_stop(timeout_us, cpu))
1344                         break;
1345
1346                 cpu_relax();
1347         } while (!need_resched());
1348
1349         return false;
1350 }
1351
1352 struct request_wait {
1353         struct dma_fence_cb cb;
1354         struct task_struct *tsk;
1355 };
1356
1357 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1358 {
1359         struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1360
1361         wake_up_process(wait->tsk);
1362 }
1363
1364 /**
1365  * i915_request_wait - wait until execution of request has finished
1366  * @rq: the request to wait upon
1367  * @flags: how to wait
1368  * @timeout: how long to wait in jiffies
1369  *
1370  * i915_request_wait() waits for the request to be completed, for a
1371  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1372  * unbounded wait).
1373  *
1374  * Returns the remaining time (in jiffies) if the request completed, which may
1375  * be zero or -ETIME if the request is unfinished after the timeout expires.
1376  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1377  * pending before the request completes.
1378  */
1379 long i915_request_wait(struct i915_request *rq,
1380                        unsigned int flags,
1381                        long timeout)
1382 {
1383         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1384                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1385         struct request_wait wait;
1386
1387         might_sleep();
1388         GEM_BUG_ON(timeout < 0);
1389
1390         if (dma_fence_is_signaled(&rq->fence))
1391                 return timeout;
1392
1393         if (!timeout)
1394                 return -ETIME;
1395
1396         trace_i915_request_wait_begin(rq, flags);
1397
1398         /*
1399          * We must never wait on the GPU while holding a lock as we
1400          * may need to perform a GPU reset. So while we don't need to
1401          * serialise wait/reset with an explicit lock, we do want
1402          * lockdep to detect potential dependency cycles.
1403          */
1404         mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
1405                       0, 0, _THIS_IP_);
1406
1407         /*
1408          * Optimistic spin before touching IRQs.
1409          *
1410          * We may use a rather large value here to offset the penalty of
1411          * switching away from the active task. Frequently, the client will
1412          * wait upon an old swapbuffer to throttle itself to remain within a
1413          * frame of the gpu. If the client is running in lockstep with the gpu,
1414          * then it should not be waiting long at all, and a sleep now will incur
1415          * extra scheduler latency in producing the next frame. To try to
1416          * avoid adding the cost of enabling/disabling the interrupt to the
1417          * short wait, we first spin to see if the request would have completed
1418          * in the time taken to setup the interrupt.
1419          *
1420          * We need upto 5us to enable the irq, and upto 20us to hide the
1421          * scheduler latency of a context switch, ignoring the secondary
1422          * impacts from a context switch such as cache eviction.
1423          *
1424          * The scheme used for low-latency IO is called "hybrid interrupt
1425          * polling". The suggestion there is to sleep until just before you
1426          * expect to be woken by the device interrupt and then poll for its
1427          * completion. That requires having a good predictor for the request
1428          * duration, which we currently lack.
1429          */
1430         if (CONFIG_DRM_I915_SPIN_REQUEST &&
1431             __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
1432                 dma_fence_signal(&rq->fence);
1433                 goto out;
1434         }
1435
1436         /*
1437          * This client is about to stall waiting for the GPU. In many cases
1438          * this is undesirable and limits the throughput of the system, as
1439          * many clients cannot continue processing user input/output whilst
1440          * blocked. RPS autotuning may take tens of milliseconds to respond
1441          * to the GPU load and thus incurs additional latency for the client.
1442          * We can circumvent that by promoting the GPU frequency to maximum
1443          * before we sleep. This makes the GPU throttle up much more quickly
1444          * (good for benchmarks and user experience, e.g. window animations),
1445          * but at a cost of spending more power processing the workload
1446          * (bad for battery).
1447          */
1448         if (flags & I915_WAIT_PRIORITY) {
1449                 if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
1450                         gen6_rps_boost(rq);
1451                 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1452         }
1453
1454         wait.tsk = current;
1455         if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1456                 goto out;
1457
1458         for (;;) {
1459                 set_current_state(state);
1460
1461                 if (i915_request_completed(rq)) {
1462                         dma_fence_signal(&rq->fence);
1463                         break;
1464                 }
1465
1466                 if (signal_pending_state(state, current)) {
1467                         timeout = -ERESTARTSYS;
1468                         break;
1469                 }
1470
1471                 if (!timeout) {
1472                         timeout = -ETIME;
1473                         break;
1474                 }
1475
1476                 timeout = io_schedule_timeout(timeout);
1477         }
1478         __set_current_state(TASK_RUNNING);
1479
1480         dma_fence_remove_callback(&rq->fence, &wait.cb);
1481
1482 out:
1483         mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
1484         trace_i915_request_wait_end(rq);
1485         return timeout;
1486 }
1487
1488 bool i915_retire_requests(struct drm_i915_private *i915)
1489 {
1490         struct intel_ring *ring, *tmp;
1491
1492         lockdep_assert_held(&i915->drm.struct_mutex);
1493
1494         list_for_each_entry_safe(ring, tmp,
1495                                  &i915->gt.active_rings, active_link) {
1496                 intel_ring_get(ring); /* last rq holds reference! */
1497                 ring_retire_requests(ring);
1498                 intel_ring_put(ring);
1499         }
1500
1501         return !list_empty(&i915->gt.active_rings);
1502 }
1503
1504 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1505 #include "selftests/mock_request.c"
1506 #include "selftests/i915_request.c"
1507 #endif
1508
1509 static void i915_global_request_shrink(void)
1510 {
1511         kmem_cache_shrink(global.slab_dependencies);
1512         kmem_cache_shrink(global.slab_execute_cbs);
1513         kmem_cache_shrink(global.slab_requests);
1514 }
1515
1516 static void i915_global_request_exit(void)
1517 {
1518         kmem_cache_destroy(global.slab_dependencies);
1519         kmem_cache_destroy(global.slab_execute_cbs);
1520         kmem_cache_destroy(global.slab_requests);
1521 }
1522
1523 static struct i915_global_request global = { {
1524         .shrink = i915_global_request_shrink,
1525         .exit = i915_global_request_exit,
1526 } };
1527
1528 int __init i915_global_request_init(void)
1529 {
1530         global.slab_requests = KMEM_CACHE(i915_request,
1531                                           SLAB_HWCACHE_ALIGN |
1532                                           SLAB_RECLAIM_ACCOUNT |
1533                                           SLAB_TYPESAFE_BY_RCU);
1534         if (!global.slab_requests)
1535                 return -ENOMEM;
1536
1537         global.slab_execute_cbs = KMEM_CACHE(execute_cb,
1538                                              SLAB_HWCACHE_ALIGN |
1539                                              SLAB_RECLAIM_ACCOUNT |
1540                                              SLAB_TYPESAFE_BY_RCU);
1541         if (!global.slab_execute_cbs)
1542                 goto err_requests;
1543
1544         global.slab_dependencies = KMEM_CACHE(i915_dependency,
1545                                               SLAB_HWCACHE_ALIGN |
1546                                               SLAB_RECLAIM_ACCOUNT);
1547         if (!global.slab_dependencies)
1548                 goto err_execute_cbs;
1549
1550         i915_global_register(&global.base);
1551         return 0;
1552
1553 err_execute_cbs:
1554         kmem_cache_destroy(global.slab_execute_cbs);
1555 err_requests:
1556         kmem_cache_destroy(global.slab_requests);
1557         return -ENOMEM;
1558 }