2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
33 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
41 * The timeline struct (as part of the ppgtt underneath a context)
42 * may be freed when the request is no longer in use by the GPU.
43 * We could extend the life of a context to beyond that of all
44 * fences, possibly keeping the hw resource around indefinitely,
45 * or we just give them a false name. Since
46 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
47 * lie seems justifiable.
49 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
52 return to_request(fence)->timeline->name;
55 static bool i915_fence_signaled(struct dma_fence *fence)
57 return i915_request_completed(to_request(fence));
60 static bool i915_fence_enable_signaling(struct dma_fence *fence)
62 return intel_engine_enable_signaling(to_request(fence), true);
65 static signed long i915_fence_wait(struct dma_fence *fence,
69 return i915_request_wait(to_request(fence), interruptible, timeout);
72 static void i915_fence_release(struct dma_fence *fence)
74 struct i915_request *rq = to_request(fence);
77 * The request is put onto a RCU freelist (i.e. the address
78 * is immediately reused), mark the fences as being freed now.
79 * Otherwise the debugobjects for the fences are only marked as
80 * freed when the slab cache itself is freed, and so we would get
81 * caught trying to reuse dead objects.
83 i915_sw_fence_fini(&rq->submit);
85 kmem_cache_free(rq->i915->requests, rq);
88 const struct dma_fence_ops i915_fence_ops = {
89 .get_driver_name = i915_fence_get_driver_name,
90 .get_timeline_name = i915_fence_get_timeline_name,
91 .enable_signaling = i915_fence_enable_signaling,
92 .signaled = i915_fence_signaled,
93 .wait = i915_fence_wait,
94 .release = i915_fence_release,
98 i915_request_remove_from_client(struct i915_request *request)
100 struct drm_i915_file_private *file_priv;
102 file_priv = request->file_priv;
106 spin_lock(&file_priv->mm.lock);
107 if (request->file_priv) {
108 list_del(&request->client_link);
109 request->file_priv = NULL;
111 spin_unlock(&file_priv->mm.lock);
114 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
116 struct intel_engine_cs *engine;
117 struct i915_timeline *timeline;
118 enum intel_engine_id id;
121 /* Carefully retire all requests without writing to the rings */
122 ret = i915_gem_wait_for_idle(i915,
123 I915_WAIT_INTERRUPTIBLE |
125 MAX_SCHEDULE_TIMEOUT);
129 GEM_BUG_ON(i915->gt.active_requests);
131 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
132 for_each_engine(engine, i915, id) {
133 GEM_TRACE("%s seqno %d (current %d) -> %d\n",
135 engine->timeline.seqno,
136 intel_engine_get_seqno(engine),
139 if (seqno == engine->timeline.seqno)
142 kthread_park(engine->breadcrumbs.signaler);
144 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
145 /* Flush any waiters before we reuse the seqno */
146 intel_engine_disarm_breadcrumbs(engine);
147 intel_engine_init_hangcheck(engine);
148 GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
151 /* Check we are idle before we fiddle with hw state! */
152 GEM_BUG_ON(!intel_engine_is_idle(engine));
153 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
155 /* Finally reset hw state */
156 intel_engine_init_global_seqno(engine, seqno);
157 engine->timeline.seqno = seqno;
159 kthread_unpark(engine->breadcrumbs.signaler);
162 list_for_each_entry(timeline, &i915->gt.timelines, link)
163 memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
165 i915->gt.request_serial = seqno;
170 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
172 struct drm_i915_private *i915 = to_i915(dev);
174 lockdep_assert_held(&i915->drm.struct_mutex);
179 /* HWS page needs to be set less than what we will inject to ring */
180 return reset_all_global_seqno(i915, seqno - 1);
183 static int reserve_gt(struct drm_i915_private *i915)
188 * Reservation is fine until we may need to wrap around
190 * By incrementing the serial for every request, we know that no
191 * individual engine may exceed that serial (as each is reset to 0
192 * on any wrap). This protects even the most pessimistic of migrations
193 * of every request from all engines onto just one.
195 while (unlikely(++i915->gt.request_serial == 0)) {
196 ret = reset_all_global_seqno(i915, 0);
198 i915->gt.request_serial--;
203 if (!i915->gt.active_requests++)
204 i915_gem_unpark(i915);
209 static void unreserve_gt(struct drm_i915_private *i915)
211 GEM_BUG_ON(!i915->gt.active_requests);
212 if (!--i915->gt.active_requests)
216 void i915_gem_retire_noop(struct i915_gem_active *active,
217 struct i915_request *request)
219 /* Space left intentionally blank */
222 static void advance_ring(struct i915_request *request)
224 struct intel_ring *ring = request->ring;
228 * We know the GPU must have read the request to have
229 * sent us the seqno + interrupt, so use the position
230 * of tail of the request to update the last known position
233 * Note this requires that we are always called in request
236 GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
237 if (list_is_last(&request->ring_link, &ring->request_list)) {
239 * We may race here with execlists resubmitting this request
240 * as we retire it. The resubmission will move the ring->tail
241 * forwards (to request->wa_tail). We either read the
242 * current value that was written to hw, or the value that
243 * is just about to be. Either works, if we miss the last two
244 * noops - they are safe to be replayed on a reset.
246 GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
247 tail = READ_ONCE(request->tail);
248 list_del(&ring->active_link);
250 tail = request->postfix;
252 list_del_init(&request->ring_link);
257 static void free_capture_list(struct i915_request *request)
259 struct i915_capture_list *capture;
261 capture = request->capture_list;
263 struct i915_capture_list *next = capture->next;
270 static void __retire_engine_request(struct intel_engine_cs *engine,
271 struct i915_request *rq)
273 GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
274 __func__, engine->name,
275 rq->fence.context, rq->fence.seqno,
277 intel_engine_get_seqno(engine));
279 GEM_BUG_ON(!i915_request_completed(rq));
283 spin_lock(&engine->timeline.lock);
284 GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
285 list_del_init(&rq->link);
286 spin_unlock(&engine->timeline.lock);
288 spin_lock(&rq->lock);
289 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
290 dma_fence_signal_locked(&rq->fence);
291 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
292 intel_engine_cancel_signaling(rq);
294 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
295 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
297 spin_unlock(&rq->lock);
302 * The backing object for the context is done after switching to the
303 * *next* context. Therefore we cannot retire the previous context until
304 * the next context has already started running. However, since we
305 * cannot take the required locks at i915_request_submit() we
306 * defer the unpinning of the active context to now, retirement of
307 * the subsequent request.
309 if (engine->last_retired_context)
310 intel_context_unpin(engine->last_retired_context);
311 engine->last_retired_context = rq->hw_context;
314 static void __retire_engine_upto(struct intel_engine_cs *engine,
315 struct i915_request *rq)
317 struct i915_request *tmp;
319 if (list_empty(&rq->link))
323 tmp = list_first_entry(&engine->timeline.requests,
326 GEM_BUG_ON(tmp->engine != engine);
327 __retire_engine_request(engine, tmp);
331 static void i915_request_retire(struct i915_request *request)
333 struct i915_gem_active *active, *next;
335 GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
336 request->engine->name,
337 request->fence.context, request->fence.seqno,
338 request->global_seqno,
339 intel_engine_get_seqno(request->engine));
341 lockdep_assert_held(&request->i915->drm.struct_mutex);
342 GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
343 GEM_BUG_ON(!i915_request_completed(request));
345 trace_i915_request_retire(request);
347 advance_ring(request);
348 free_capture_list(request);
351 * Walk through the active list, calling retire on each. This allows
352 * objects to track their GPU activity and mark themselves as idle
353 * when their *last* active request is completed (updating state
354 * tracking lists for eviction, active references for GEM, etc).
356 * As the ->retire() may free the node, we decouple it first and
357 * pass along the auxiliary information (to avoid dereferencing
358 * the node after the callback).
360 list_for_each_entry_safe(active, next, &request->active_list, link) {
362 * In microbenchmarks or focusing upon time inside the kernel,
363 * we may spend an inordinate amount of time simply handling
364 * the retirement of requests and processing their callbacks.
365 * Of which, this loop itself is particularly hot due to the
366 * cache misses when jumping around the list of i915_gem_active.
367 * So we try to keep this loop as streamlined as possible and
368 * also prefetch the next i915_gem_active to try and hide
369 * the likely cache miss.
373 INIT_LIST_HEAD(&active->link);
374 RCU_INIT_POINTER(active->request, NULL);
376 active->retire(active, request);
379 i915_request_remove_from_client(request);
381 /* Retirement decays the ban score as it is a sign of ctx progress */
382 atomic_dec_if_positive(&request->gem_context->ban_score);
383 intel_context_unpin(request->hw_context);
385 __retire_engine_upto(request->engine, request);
387 unreserve_gt(request->i915);
389 i915_sched_node_fini(request->i915, &request->sched);
390 i915_request_put(request);
393 void i915_request_retire_upto(struct i915_request *rq)
395 struct intel_ring *ring = rq->ring;
396 struct i915_request *tmp;
398 GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
400 rq->fence.context, rq->fence.seqno,
402 intel_engine_get_seqno(rq->engine));
404 lockdep_assert_held(&rq->i915->drm.struct_mutex);
405 GEM_BUG_ON(!i915_request_completed(rq));
407 if (list_empty(&rq->ring_link))
411 tmp = list_first_entry(&ring->request_list,
412 typeof(*tmp), ring_link);
414 i915_request_retire(tmp);
418 static u32 timeline_get_seqno(struct i915_timeline *tl)
423 static void move_to_timeline(struct i915_request *request,
424 struct i915_timeline *timeline)
426 GEM_BUG_ON(request->timeline == &request->engine->timeline);
427 lockdep_assert_held(&request->engine->timeline.lock);
429 spin_lock(&request->timeline->lock);
430 list_move_tail(&request->link, &timeline->requests);
431 spin_unlock(&request->timeline->lock);
434 void __i915_request_submit(struct i915_request *request)
436 struct intel_engine_cs *engine = request->engine;
439 GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
441 request->fence.context, request->fence.seqno,
442 engine->timeline.seqno + 1,
443 intel_engine_get_seqno(engine));
445 GEM_BUG_ON(!irqs_disabled());
446 lockdep_assert_held(&engine->timeline.lock);
448 GEM_BUG_ON(request->global_seqno);
450 seqno = timeline_get_seqno(&engine->timeline);
452 GEM_BUG_ON(intel_engine_signaled(engine, seqno));
454 /* We may be recursing from the signal callback of another i915 fence */
455 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
456 request->global_seqno = seqno;
457 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
458 intel_engine_enable_signaling(request, false);
459 spin_unlock(&request->lock);
461 engine->emit_breadcrumb(request,
462 request->ring->vaddr + request->postfix);
464 /* Transfer from per-context onto the global per-engine timeline */
465 move_to_timeline(request, &engine->timeline);
467 trace_i915_request_execute(request);
469 wake_up_all(&request->execute);
472 void i915_request_submit(struct i915_request *request)
474 struct intel_engine_cs *engine = request->engine;
477 /* Will be called from irq-context when using foreign fences. */
478 spin_lock_irqsave(&engine->timeline.lock, flags);
480 __i915_request_submit(request);
482 spin_unlock_irqrestore(&engine->timeline.lock, flags);
485 void __i915_request_unsubmit(struct i915_request *request)
487 struct intel_engine_cs *engine = request->engine;
489 GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
491 request->fence.context, request->fence.seqno,
492 request->global_seqno,
493 intel_engine_get_seqno(engine));
495 GEM_BUG_ON(!irqs_disabled());
496 lockdep_assert_held(&engine->timeline.lock);
499 * Only unwind in reverse order, required so that the per-context list
500 * is kept in seqno/ring order.
502 GEM_BUG_ON(!request->global_seqno);
503 GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
504 GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
505 engine->timeline.seqno--;
507 /* We may be recursing from the signal callback of another i915 fence */
508 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
509 request->global_seqno = 0;
510 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
511 intel_engine_cancel_signaling(request);
512 spin_unlock(&request->lock);
514 /* Transfer back from the global per-engine timeline to per-context */
515 move_to_timeline(request, request->timeline);
518 * We don't need to wake_up any waiters on request->execute, they
519 * will get woken by any other event or us re-adding this request
520 * to the engine timeline (__i915_request_submit()). The waiters
521 * should be quite adapt at finding that the request now has a new
522 * global_seqno to the one they went to sleep on.
526 void i915_request_unsubmit(struct i915_request *request)
528 struct intel_engine_cs *engine = request->engine;
531 /* Will be called from irq-context when using foreign fences. */
532 spin_lock_irqsave(&engine->timeline.lock, flags);
534 __i915_request_unsubmit(request);
536 spin_unlock_irqrestore(&engine->timeline.lock, flags);
539 static int __i915_sw_fence_call
540 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
542 struct i915_request *request =
543 container_of(fence, typeof(*request), submit);
547 trace_i915_request_submit(request);
549 * We need to serialize use of the submit_request() callback
550 * with its hotplugging performed during an emergency
551 * i915_gem_set_wedged(). We use the RCU mechanism to mark the
552 * critical section in order to force i915_gem_set_wedged() to
553 * wait until the submit_request() is completed before
557 request->engine->submit_request(request);
562 i915_request_put(request);
570 * i915_request_alloc - allocate a request structure
572 * @engine: engine that we wish to issue the request on.
573 * @ctx: context that the request will be associated with.
575 * Returns a pointer to the allocated request if successful,
576 * or an error code if not.
578 struct i915_request *
579 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
581 struct drm_i915_private *i915 = engine->i915;
582 struct i915_request *rq;
583 struct intel_context *ce;
586 lockdep_assert_held(&i915->drm.struct_mutex);
589 * Preempt contexts are reserved for exclusive use to inject a
590 * preemption context switch. They are never to be used for any trivial
593 GEM_BUG_ON(ctx == i915->preempt_context);
596 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
597 * EIO if the GPU is already wedged.
599 if (i915_terminally_wedged(&i915->gpu_error))
600 return ERR_PTR(-EIO);
603 * Pinning the contexts may generate requests in order to acquire
604 * GGTT space, so do this first before we reserve a seqno for
607 ce = intel_context_pin(ctx, engine);
611 ret = reserve_gt(i915);
615 ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
619 /* Move our oldest request to the slab-cache (if not in use!) */
620 rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
621 if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
622 i915_request_completed(rq))
623 i915_request_retire(rq);
626 * Beware: Dragons be flying overhead.
628 * We use RCU to look up requests in flight. The lookups may
629 * race with the request being allocated from the slab freelist.
630 * That is the request we are writing to here, may be in the process
631 * of being read by __i915_gem_active_get_rcu(). As such,
632 * we have to be very careful when overwriting the contents. During
633 * the RCU lookup, we change chase the request->engine pointer,
634 * read the request->global_seqno and increment the reference count.
636 * The reference count is incremented atomically. If it is zero,
637 * the lookup knows the request is unallocated and complete. Otherwise,
638 * it is either still in use, or has been reallocated and reset
639 * with dma_fence_init(). This increment is safe for release as we
640 * check that the request we have a reference to and matches the active
643 * Before we increment the refcount, we chase the request->engine
644 * pointer. We must not call kmem_cache_zalloc() or else we set
645 * that pointer to NULL and cause a crash during the lookup. If
646 * we see the request is completed (based on the value of the
647 * old engine and seqno), the lookup is complete and reports NULL.
648 * If we decide the request is not completed (new engine or seqno),
649 * then we grab a reference and double check that it is still the
650 * active request - which it won't be and restart the lookup.
652 * Do not use kmem_cache_zalloc() here!
654 rq = kmem_cache_alloc(i915->requests,
655 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
657 i915_retire_requests(i915);
659 /* Ratelimit ourselves to prevent oom from malicious clients */
660 rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
661 &i915->drm.struct_mutex);
663 cond_synchronize_rcu(rq->rcustate);
665 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
672 rq->rcustate = get_state_synchronize_rcu();
674 INIT_LIST_HEAD(&rq->active_list);
677 rq->gem_context = ctx;
680 rq->timeline = ce->ring->timeline;
681 GEM_BUG_ON(rq->timeline == &engine->timeline);
683 spin_lock_init(&rq->lock);
684 dma_fence_init(&rq->fence,
687 rq->timeline->fence_context,
688 timeline_get_seqno(rq->timeline));
690 /* We bump the ref for the fence chain */
691 i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
692 init_waitqueue_head(&rq->execute);
694 i915_sched_node_init(&rq->sched);
696 /* No zalloc, must clear what we need by hand */
697 rq->global_seqno = 0;
698 rq->signaling.wait.seqno = 0;
699 rq->file_priv = NULL;
701 rq->capture_list = NULL;
702 rq->waitboost = false;
705 * Reserve space in the ring buffer for all the commands required to
706 * eventually emit this request. This is to guarantee that the
707 * i915_request_add() call can't fail. Note that the reserve may need
708 * to be redone if the request is not actually submitted straight
709 * away, e.g. because a GPU scheduler has deferred it.
711 rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
712 GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
715 * Record the position of the start of the request so that
716 * should we detect the updated seqno part-way through the
717 * GPU processing the request, we never over-estimate the
718 * position of the head.
720 rq->head = rq->ring->emit;
722 /* Unconditionally invalidate GPU caches and TLBs. */
723 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
727 ret = engine->request_alloc(rq);
731 /* Keep a second pin for the dual retirement along engine and ring */
732 __intel_context_pin(ce);
734 rq->infix = rq->ring->emit; /* end of header; start of user payload */
736 /* Check that we didn't interrupt ourselves with a new request */
737 GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
741 ce->ring->emit = rq->head;
743 /* Make sure we didn't add ourselves to external state before freeing */
744 GEM_BUG_ON(!list_empty(&rq->active_list));
745 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
746 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
748 kmem_cache_free(i915->requests, rq);
752 intel_context_unpin(ce);
757 i915_request_await_request(struct i915_request *to, struct i915_request *from)
761 GEM_BUG_ON(to == from);
762 GEM_BUG_ON(to->timeline == from->timeline);
764 if (i915_request_completed(from))
767 if (to->engine->schedule) {
768 ret = i915_sched_node_add_dependency(to->i915,
775 if (to->engine == from->engine) {
776 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
779 return ret < 0 ? ret : 0;
782 if (to->engine->semaphore.sync_to) {
785 GEM_BUG_ON(!from->engine->semaphore.signal);
787 seqno = i915_request_global_seqno(from);
789 goto await_dma_fence;
791 if (seqno <= to->timeline->global_sync[from->engine->id])
794 trace_i915_gem_ring_sync_to(to, from);
795 ret = to->engine->semaphore.sync_to(to, from);
799 to->timeline->global_sync[from->engine->id] = seqno;
804 ret = i915_sw_fence_await_dma_fence(&to->submit,
807 return ret < 0 ? ret : 0;
811 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
813 struct dma_fence **child = &fence;
814 unsigned int nchild = 1;
818 * Note that if the fence-array was created in signal-on-any mode,
819 * we should *not* decompose it into its individual fences. However,
820 * we don't currently store which mode the fence-array is operating
821 * in. Fortunately, the only user of signal-on-any is private to
822 * amdgpu and we should not see any incoming fence-array from
823 * sync-file being in signal-on-any mode.
825 if (dma_fence_is_array(fence)) {
826 struct dma_fence_array *array = to_dma_fence_array(fence);
828 child = array->fences;
829 nchild = array->num_fences;
835 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
839 * Requests on the same timeline are explicitly ordered, along
840 * with their dependencies, by i915_request_add() which ensures
841 * that requests are submitted in-order through each ring.
843 if (fence->context == rq->fence.context)
846 /* Squash repeated waits to the same timelines */
847 if (fence->context != rq->i915->mm.unordered_timeline &&
848 i915_timeline_sync_is_later(rq->timeline, fence))
851 if (dma_fence_is_i915(fence))
852 ret = i915_request_await_request(rq, to_request(fence));
854 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
860 /* Record the latest fence used against each timeline */
861 if (fence->context != rq->i915->mm.unordered_timeline)
862 i915_timeline_sync_set(rq->timeline, fence);
869 * i915_request_await_object - set this request to (async) wait upon a bo
870 * @to: request we are wishing to use
871 * @obj: object which may be in use on another ring.
872 * @write: whether the wait is on behalf of a writer
874 * This code is meant to abstract object synchronization with the GPU.
875 * Conceptually we serialise writes between engines inside the GPU.
876 * We only allow one engine to write into a buffer at any time, but
877 * multiple readers. To ensure each has a coherent view of memory, we must:
879 * - If there is an outstanding write request to the object, the new
880 * request must wait for it to complete (either CPU or in hw, requests
881 * on the same ring will be naturally ordered).
883 * - If we are a write request (pending_write_domain is set), the new
884 * request must wait for outstanding read requests to complete.
886 * Returns 0 if successful, else propagates up the lower layer error.
889 i915_request_await_object(struct i915_request *to,
890 struct drm_i915_gem_object *obj,
893 struct dma_fence *excl;
897 struct dma_fence **shared;
898 unsigned int count, i;
900 ret = reservation_object_get_fences_rcu(obj->resv,
901 &excl, &count, &shared);
905 for (i = 0; i < count; i++) {
906 ret = i915_request_await_dma_fence(to, shared[i]);
910 dma_fence_put(shared[i]);
913 for (; i < count; i++)
914 dma_fence_put(shared[i]);
917 excl = reservation_object_get_excl_rcu(obj->resv);
922 ret = i915_request_await_dma_fence(to, excl);
930 void i915_request_skip(struct i915_request *rq, int error)
932 void *vaddr = rq->ring->vaddr;
935 GEM_BUG_ON(!IS_ERR_VALUE((long)error));
936 dma_fence_set_error(&rq->fence, error);
939 * As this request likely depends on state from the lost
940 * context, clear out all the user operations leaving the
941 * breadcrumb at the end (so we get the fence notifications).
944 if (rq->postfix < head) {
945 memset(vaddr + head, 0, rq->ring->size - head);
948 memset(vaddr + head, 0, rq->postfix - head);
952 * NB: This function is not allowed to fail. Doing so would mean the the
953 * request is not being tracked for completion but the work itself is
954 * going to happen on the hardware. This would be a Bad Thing(tm).
956 void i915_request_add(struct i915_request *request)
958 struct intel_engine_cs *engine = request->engine;
959 struct i915_timeline *timeline = request->timeline;
960 struct intel_ring *ring = request->ring;
961 struct i915_request *prev;
964 GEM_TRACE("%s fence %llx:%d\n",
965 engine->name, request->fence.context, request->fence.seqno);
967 lockdep_assert_held(&request->i915->drm.struct_mutex);
968 trace_i915_request_add(request);
971 * Make sure that no request gazumped us - if it was allocated after
972 * our i915_request_alloc() and called __i915_request_add() before
973 * us, the timeline will hold its seqno which is later than ours.
975 GEM_BUG_ON(timeline->seqno != request->fence.seqno);
978 * To ensure that this call will not fail, space for its emissions
979 * should already have been reserved in the ring buffer. Let the ring
980 * know that it is time to use that space up.
982 request->reserved_space = 0;
983 engine->emit_flush(request, EMIT_FLUSH);
986 * Record the position of the start of the breadcrumb so that
987 * should we detect the updated seqno part-way through the
988 * GPU processing the request, we never over-estimate the
989 * position of the ring's HEAD.
991 cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
992 GEM_BUG_ON(IS_ERR(cs));
993 request->postfix = intel_ring_offset(request, cs);
996 * Seal the request and mark it as pending execution. Note that
997 * we may inspect this state, without holding any locks, during
998 * hangcheck. Hence we apply the barrier to ensure that we do not
999 * see a more recent value in the hws than we are tracking.
1002 prev = i915_gem_active_raw(&timeline->last_request,
1003 &request->i915->drm.struct_mutex);
1004 if (prev && !i915_request_completed(prev)) {
1005 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
1007 if (engine->schedule)
1008 __i915_sched_node_add_dependency(&request->sched,
1014 spin_lock_irq(&timeline->lock);
1015 list_add_tail(&request->link, &timeline->requests);
1016 spin_unlock_irq(&timeline->lock);
1018 GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1019 i915_gem_active_set(&timeline->last_request, request);
1021 list_add_tail(&request->ring_link, &ring->request_list);
1022 if (list_is_first(&request->ring_link, &ring->request_list)) {
1023 GEM_TRACE("marking %s as active\n", ring->timeline->name);
1024 list_add(&ring->active_link, &request->i915->gt.active_rings);
1026 request->emitted_jiffies = jiffies;
1029 * Let the backend know a new request has arrived that may need
1030 * to adjust the existing execution schedule due to a high priority
1031 * request - i.e. we may want to preempt the current request in order
1032 * to run a high priority dependency chain *before* we can execute this
1035 * This is called before the request is ready to run so that we can
1036 * decide whether to preempt the entire chain so that it is ready to
1037 * run at the earliest possible convenience.
1040 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1041 if (engine->schedule) {
1042 struct i915_sched_attr attr = request->gem_context->sched;
1045 * Boost priorities to new clients (new request flows).
1047 * Allow interactive/synchronous clients to jump ahead of
1048 * the bulk clients. (FQ_CODEL)
1050 if (!prev || i915_request_completed(prev))
1051 attr.priority |= I915_PRIORITY_NEWCLIENT;
1053 engine->schedule(request, &attr);
1056 i915_sw_fence_commit(&request->submit);
1057 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1060 * In typical scenarios, we do not expect the previous request on
1061 * the timeline to be still tracked by timeline->last_request if it
1062 * has been completed. If the completed request is still here, that
1063 * implies that request retirement is a long way behind submission,
1064 * suggesting that we haven't been retiring frequently enough from
1065 * the combination of retire-before-alloc, waiters and the background
1066 * retirement worker. So if the last request on this timeline was
1067 * already completed, do a catch up pass, flushing the retirement queue
1068 * up to this client. Since we have now moved the heaviest operations
1069 * during retirement onto secondary workers, such as freeing objects
1070 * or contexts, retiring a bunch of requests is mostly list management
1071 * (and cache misses), and so we should not be overly penalizing this
1072 * client by performing excess work, though we may still performing
1073 * work on behalf of others -- but instead we should benefit from
1074 * improved resource management. (Well, that's the theory at least.)
1076 if (prev && i915_request_completed(prev))
1077 i915_request_retire_upto(prev);
1080 static unsigned long local_clock_us(unsigned int *cpu)
1085 * Cheaply and approximately convert from nanoseconds to microseconds.
1086 * The result and subsequent calculations are also defined in the same
1087 * approximate microseconds units. The principal source of timing
1088 * error here is from the simple truncation.
1090 * Note that local_clock() is only defined wrt to the current CPU;
1091 * the comparisons are no longer valid if we switch CPUs. Instead of
1092 * blocking preemption for the entire busywait, we can detect the CPU
1093 * switch and use that as indicator of system load and a reason to
1094 * stop busywaiting, see busywait_stop().
1097 t = local_clock() >> 10;
1103 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1105 unsigned int this_cpu;
1107 if (time_after(local_clock_us(&this_cpu), timeout))
1110 return this_cpu != cpu;
1113 static bool __i915_spin_request(const struct i915_request *rq,
1114 u32 seqno, int state, unsigned long timeout_us)
1116 struct intel_engine_cs *engine = rq->engine;
1117 unsigned int irq, cpu;
1122 * Only wait for the request if we know it is likely to complete.
1124 * We don't track the timestamps around requests, nor the average
1125 * request length, so we do not have a good indicator that this
1126 * request will complete within the timeout. What we do know is the
1127 * order in which requests are executed by the engine and so we can
1128 * tell if the request has started. If the request hasn't started yet,
1129 * it is a fair assumption that it will not complete within our
1130 * relatively short timeout.
1132 if (!intel_engine_has_started(engine, seqno))
1136 * When waiting for high frequency requests, e.g. during synchronous
1137 * rendering split between the CPU and GPU, the finite amount of time
1138 * required to set up the irq and wait upon it limits the response
1139 * rate. By busywaiting on the request completion for a short while we
1140 * can service the high frequency waits as quick as possible. However,
1141 * if it is a slow request, we want to sleep as quickly as possible.
1142 * The tradeoff between waiting and sleeping is roughly the time it
1143 * takes to sleep on a request, on the order of a microsecond.
1146 irq = READ_ONCE(engine->breadcrumbs.irq_count);
1147 timeout_us += local_clock_us(&cpu);
1149 if (intel_engine_has_completed(engine, seqno))
1150 return seqno == i915_request_global_seqno(rq);
1153 * Seqno are meant to be ordered *before* the interrupt. If
1154 * we see an interrupt without a corresponding seqno advance,
1155 * assume we won't see one in the near future but require
1156 * the engine->seqno_barrier() to fixup coherency.
1158 if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
1161 if (signal_pending_state(state, current))
1164 if (busywait_stop(timeout_us, cpu))
1168 } while (!need_resched());
1173 static bool __i915_wait_request_check_and_reset(struct i915_request *request)
1175 struct i915_gpu_error *error = &request->i915->gpu_error;
1177 if (likely(!i915_reset_handoff(error)))
1180 __set_current_state(TASK_RUNNING);
1181 i915_reset(request->i915, error->stalled_mask, error->reason);
1186 * i915_request_wait - wait until execution of request has finished
1187 * @rq: the request to wait upon
1188 * @flags: how to wait
1189 * @timeout: how long to wait in jiffies
1191 * i915_request_wait() waits for the request to be completed, for a
1192 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1195 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1196 * in via the flags, and vice versa if the struct_mutex is not held, the caller
1197 * must not specify that the wait is locked.
1199 * Returns the remaining time (in jiffies) if the request completed, which may
1200 * be zero or -ETIME if the request is unfinished after the timeout expires.
1201 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1202 * pending before the request completes.
1204 long i915_request_wait(struct i915_request *rq,
1208 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1209 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1210 wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
1211 DEFINE_WAIT_FUNC(reset, default_wake_function);
1212 DEFINE_WAIT_FUNC(exec, default_wake_function);
1213 struct intel_wait wait;
1216 #if IS_ENABLED(CONFIG_LOCKDEP)
1217 GEM_BUG_ON(debug_locks &&
1218 !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
1219 !!(flags & I915_WAIT_LOCKED));
1221 GEM_BUG_ON(timeout < 0);
1223 if (i915_request_completed(rq))
1229 trace_i915_request_wait_begin(rq, flags);
1231 add_wait_queue(&rq->execute, &exec);
1232 if (flags & I915_WAIT_LOCKED)
1233 add_wait_queue(errq, &reset);
1235 intel_wait_init(&wait);
1236 if (flags & I915_WAIT_PRIORITY)
1237 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1241 set_current_state(state);
1242 if (intel_wait_update_request(&wait, rq))
1245 if (flags & I915_WAIT_LOCKED &&
1246 __i915_wait_request_check_and_reset(rq))
1249 if (signal_pending_state(state, current)) {
1250 timeout = -ERESTARTSYS;
1259 timeout = io_schedule_timeout(timeout);
1262 GEM_BUG_ON(!intel_wait_has_seqno(&wait));
1263 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
1265 /* Optimistic short spin before touching IRQs */
1266 if (__i915_spin_request(rq, wait.seqno, state, 5))
1269 set_current_state(state);
1270 if (intel_engine_add_wait(rq->engine, &wait))
1272 * In order to check that we haven't missed the interrupt
1273 * as we enabled it, we need to kick ourselves to do a
1274 * coherent check on the seqno before we sleep.
1278 if (flags & I915_WAIT_LOCKED)
1279 __i915_wait_request_check_and_reset(rq);
1282 if (signal_pending_state(state, current)) {
1283 timeout = -ERESTARTSYS;
1292 timeout = io_schedule_timeout(timeout);
1294 if (intel_wait_complete(&wait) &&
1295 intel_wait_check_request(&wait, rq))
1298 set_current_state(state);
1302 * Carefully check if the request is complete, giving time
1303 * for the seqno to be visible following the interrupt.
1304 * We also have to check in case we are kicked by the GPU
1305 * reset in order to drop the struct_mutex.
1307 if (__i915_request_irq_complete(rq))
1311 * If the GPU is hung, and we hold the lock, reset the GPU
1312 * and then check for completion. On a full reset, the engine's
1313 * HW seqno will be advanced passed us and we are complete.
1314 * If we do a partial reset, we have to wait for the GPU to
1315 * resume and update the breadcrumb.
1317 * If we don't hold the mutex, we can just wait for the worker
1318 * to come along and update the breadcrumb (either directly
1319 * itself, or indirectly by recovering the GPU).
1321 if (flags & I915_WAIT_LOCKED &&
1322 __i915_wait_request_check_and_reset(rq))
1325 /* Only spin if we know the GPU is processing this request */
1326 if (__i915_spin_request(rq, wait.seqno, state, 2))
1329 if (!intel_wait_check_request(&wait, rq)) {
1330 intel_engine_remove_wait(rq->engine, &wait);
1335 intel_engine_remove_wait(rq->engine, &wait);
1337 __set_current_state(TASK_RUNNING);
1338 if (flags & I915_WAIT_LOCKED)
1339 remove_wait_queue(errq, &reset);
1340 remove_wait_queue(&rq->execute, &exec);
1341 trace_i915_request_wait_end(rq);
1346 static void ring_retire_requests(struct intel_ring *ring)
1348 struct i915_request *request, *next;
1350 list_for_each_entry_safe(request, next,
1351 &ring->request_list, ring_link) {
1352 if (!i915_request_completed(request))
1355 i915_request_retire(request);
1359 void i915_retire_requests(struct drm_i915_private *i915)
1361 struct intel_ring *ring, *tmp;
1363 lockdep_assert_held(&i915->drm.struct_mutex);
1365 if (!i915->gt.active_requests)
1368 list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
1369 ring_retire_requests(ring);
1372 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1373 #include "selftests/mock_request.c"
1374 #include "selftests/i915_request.c"