2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef I915_GEM_REQUEST_H
26 #define I915_GEM_REQUEST_H
28 #include <linux/dma-fence.h>
31 #include "i915_sw_fence.h"
35 struct task_struct *tsk;
39 struct intel_signal_node {
41 struct intel_wait wait;
45 * Request queue structure.
47 * The request queue allows us to note sequence numbers that have been emitted
48 * and may be associated with active buffers to be retired.
50 * By keeping this list, we can avoid having to do questionable sequence
51 * number comparisons on buffer last_read|write_seqno. It also allows an
52 * emission time to be associated with the request for tracking how far ahead
53 * of the GPU the submission is.
55 * When modifying this structure be very aware that we perform a lockless
56 * RCU lookup of it that may race against reallocation of the struct
57 * from the slab freelist. We intentionally do not zero the structure on
58 * allocation so that the lookup can use the dangling pointers (and is
59 * cogniscent that those pointers may be wrong). Instead, everything that
60 * needs to be initialised must be done so explicitly.
62 * The requests are reference counted.
64 struct drm_i915_gem_request {
65 struct dma_fence fence;
68 /** On Which ring this request was generated */
69 struct drm_i915_private *i915;
72 * Context and ring buffer related to this request
73 * Contexts are refcounted, so when this request is associated with a
74 * context, we must increment the context's refcount, to guarantee that
75 * it persists while any request is linked to it. Requests themselves
76 * are also refcounted, so the request will only be freed when the last
77 * reference to it is dismissed, and the code in
78 * i915_gem_request_free() will then decrement the refcount on the
81 struct i915_gem_context *ctx;
82 struct intel_engine_cs *engine;
83 struct intel_ring *ring;
84 struct intel_signal_node signaling;
86 struct i915_sw_fence submit;
89 /** GEM sequence number associated with the previous request,
90 * when the HWS breadcrumb is equal to this the GPU is processing
95 /** Position in the ring of the start of the request */
99 * Position in the ring of the start of the postfix.
100 * This is required to calculate the maximum available ring space
101 * without overwriting the postfix.
105 /** Position in the ring of the end of the whole request */
108 /** Position in the ring of the end of any workarounds after the tail */
111 /** Preallocate space in the ring for the emitting the request */
115 * Context related to the previous request.
116 * As the contexts are accessed by the hardware until the switch is
117 * completed to a new context, the hardware may still be writing
118 * to the context object after the breadcrumb is visible. We must
119 * not unpin/unbind/prune that object whilst still active and so
120 * we keep the previous context pinned until the following (this)
121 * request is retired.
123 struct i915_gem_context *previous_context;
125 /** Batch buffer related to this request if any (used for
126 * error state dump only).
128 struct i915_vma *batch;
129 struct list_head active_list;
131 /** Time at which this request was emitted, in jiffies. */
132 unsigned long emitted_jiffies;
134 /** engine->request_list entry for this request */
135 struct list_head link;
137 /** ring->request_list entry for this request */
138 struct list_head ring_link;
140 struct drm_i915_file_private *file_priv;
141 /** file_priv list entry for this request */
142 struct list_head client_list;
144 /** Link in the execlist submission queue, guarded by execlist_lock. */
145 struct list_head execlist_link;
148 extern const struct dma_fence_ops i915_fence_ops;
150 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
152 return fence->ops == &i915_fence_ops;
155 struct drm_i915_gem_request * __must_check
156 i915_gem_request_alloc(struct intel_engine_cs *engine,
157 struct i915_gem_context *ctx);
158 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
159 struct drm_file *file);
160 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
163 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
165 return req ? req->fence.seqno : 0;
168 static inline struct intel_engine_cs *
169 i915_gem_request_get_engine(struct drm_i915_gem_request *req)
171 return req ? req->engine : NULL;
174 static inline struct drm_i915_gem_request *
175 to_request(struct dma_fence *fence)
177 /* We assume that NULL fence/request are interoperable */
178 BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
179 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
180 return container_of(fence, struct drm_i915_gem_request, fence);
183 static inline struct drm_i915_gem_request *
184 i915_gem_request_get(struct drm_i915_gem_request *req)
186 return to_request(dma_fence_get(&req->fence));
189 static inline struct drm_i915_gem_request *
190 i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
192 return to_request(dma_fence_get_rcu(&req->fence));
196 i915_gem_request_put(struct drm_i915_gem_request *req)
198 dma_fence_put(&req->fence);
201 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
202 struct drm_i915_gem_request *src)
205 i915_gem_request_get(src);
208 i915_gem_request_put(*pdst);
214 i915_gem_request_await_object(struct drm_i915_gem_request *to,
215 struct drm_i915_gem_object *obj,
217 int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
218 struct dma_fence *fence);
220 void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
221 #define i915_add_request(req) \
222 __i915_add_request(req, true)
223 #define i915_add_request_no_flush(req) \
224 __i915_add_request(req, false)
226 struct intel_rps_client;
227 #define NO_WAITBOOST ERR_PTR(-1)
228 #define IS_RPS_CLIENT(p) (!IS_ERR(p))
229 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
231 long i915_wait_request(struct drm_i915_gem_request *req,
234 __attribute__((nonnull(1)));
235 #define I915_WAIT_INTERRUPTIBLE BIT(0)
236 #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
237 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
239 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
242 * Returns true if seq1 is later than seq2.
244 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
246 return (s32)(seq1 - seq2) >= 0;
250 i915_gem_request_started(const struct drm_i915_gem_request *req)
252 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
253 req->previous_seqno);
257 i915_gem_request_completed(const struct drm_i915_gem_request *req)
259 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
263 bool __i915_spin_request(const struct drm_i915_gem_request *request,
264 int state, unsigned long timeout_us);
265 static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
266 int state, unsigned long timeout_us)
268 return (i915_gem_request_started(request) &&
269 __i915_spin_request(request, state, timeout_us));
272 /* We treat requests as fences. This is not be to confused with our
273 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
274 * We use the fences to synchronize access from the CPU with activity on the
275 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
276 * is reading them. We also track fences at a higher level to provide
277 * implicit synchronisation around GEM objects, e.g. set-domain will wait
278 * for outstanding GPU rendering before marking the object ready for CPU
279 * access, or a pageflip will wait until the GPU is complete before showing
280 * the frame on the scanout.
282 * In order to use a fence, the object must track the fence it needs to
283 * serialise with. For example, GEM objects want to track both read and
284 * write access so that we can perform concurrent read operations between
285 * the CPU and GPU engines, as well as waiting for all rendering to
286 * complete, or waiting for the last GPU user of a "fence register". The
287 * object then embeds a #i915_gem_active to track the most recent (in
288 * retirement order) request relevant for the desired mode of access.
289 * The #i915_gem_active is updated with i915_gem_active_set() to track the
290 * most recent fence request, typically this is done as part of
291 * i915_vma_move_to_active().
293 * When the #i915_gem_active completes (is retired), it will
294 * signal its completion to the owner through a callback as well as mark
295 * itself as idle (i915_gem_active.request == NULL). The owner
296 * can then perform any action, such as delayed freeing of an active
297 * resource including itself.
299 struct i915_gem_active;
301 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
302 struct drm_i915_gem_request *);
304 struct i915_gem_active {
305 struct drm_i915_gem_request __rcu *request;
306 struct list_head link;
307 i915_gem_retire_fn retire;
310 void i915_gem_retire_noop(struct i915_gem_active *,
311 struct drm_i915_gem_request *request);
314 * init_request_active - prepares the activity tracker for use
315 * @active - the active tracker
316 * @func - a callback when then the tracker is retired (becomes idle),
319 * init_request_active() prepares the embedded @active struct for use as
320 * an activity tracker, that is for tracking the last known active request
321 * associated with it. When the last request becomes idle, when it is retired
322 * after completion, the optional callback @func is invoked.
325 init_request_active(struct i915_gem_active *active,
326 i915_gem_retire_fn retire)
328 INIT_LIST_HEAD(&active->link);
329 active->retire = retire ?: i915_gem_retire_noop;
333 * i915_gem_active_set - updates the tracker to watch the current request
334 * @active - the active tracker
335 * @request - the request to watch
337 * i915_gem_active_set() watches the given @request for completion. Whilst
338 * that @request is busy, the @active reports busy. When that @request is
339 * retired, the @active tracker is updated to report idle.
342 i915_gem_active_set(struct i915_gem_active *active,
343 struct drm_i915_gem_request *request)
345 list_move(&active->link, &request->active_list);
346 rcu_assign_pointer(active->request, request);
349 static inline struct drm_i915_gem_request *
350 __i915_gem_active_peek(const struct i915_gem_active *active)
352 /* Inside the error capture (running with the driver in an unknown
353 * state), we want to bend the rules slightly (a lot).
355 * Work is in progress to make it safer, in the meantime this keeps
356 * the known issue from spamming the logs.
358 return rcu_dereference_protected(active->request, 1);
362 * i915_gem_active_raw - return the active request
363 * @active - the active tracker
365 * i915_gem_active_raw() returns the current request being tracked, or NULL.
366 * It does not obtain a reference on the request for the caller, so the caller
367 * must hold struct_mutex.
369 static inline struct drm_i915_gem_request *
370 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
372 return rcu_dereference_protected(active->request,
373 lockdep_is_held(mutex));
377 * i915_gem_active_peek - report the active request being monitored
378 * @active - the active tracker
380 * i915_gem_active_peek() returns the current request being tracked if
381 * still active, or NULL. It does not obtain a reference on the request
382 * for the caller, so the caller must hold struct_mutex.
384 static inline struct drm_i915_gem_request *
385 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
387 struct drm_i915_gem_request *request;
389 request = i915_gem_active_raw(active, mutex);
390 if (!request || i915_gem_request_completed(request))
397 * i915_gem_active_get - return a reference to the active request
398 * @active - the active tracker
400 * i915_gem_active_get() returns a reference to the active request, or NULL
401 * if the active tracker is idle. The caller must hold struct_mutex.
403 static inline struct drm_i915_gem_request *
404 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
406 return i915_gem_request_get(i915_gem_active_peek(active, mutex));
410 * __i915_gem_active_get_rcu - return a reference to the active request
411 * @active - the active tracker
413 * __i915_gem_active_get() returns a reference to the active request, or NULL
414 * if the active tracker is idle. The caller must hold the RCU read lock, but
415 * the returned pointer is safe to use outside of RCU.
417 static inline struct drm_i915_gem_request *
418 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
420 /* Performing a lockless retrieval of the active request is super
421 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
422 * slab of request objects will not be freed whilst we hold the
423 * RCU read lock. It does not guarantee that the request itself
424 * will not be freed and then *reused*. Viz,
428 * req = active.request
429 * retire(req) -> free(req);
430 * (req is now first on the slab freelist)
431 * active.request = NULL
433 * req = new submission on a new object
436 * To prevent the request from being reused whilst the caller
437 * uses it, we take a reference like normal. Whilst acquiring
438 * the reference we check that it is not in a destroyed state
439 * (refcnt == 0). That prevents the request being reallocated
440 * whilst the caller holds on to it. To check that the request
441 * was not reallocated as we acquired the reference we have to
442 * check that our request remains the active request across
443 * the lookup, in the same manner as a seqlock. The visibility
444 * of the pointer versus the reference counting is controlled
445 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
447 * In the middle of all that, we inspect whether the request is
448 * complete. Retiring is lazy so the request may be completed long
449 * before the active tracker is updated. Querying whether the
450 * request is complete is far cheaper (as it involves no locked
451 * instructions setting cachelines to exclusive) than acquiring
452 * the reference, so we do it first. The RCU read lock ensures the
453 * pointer dereference is valid, but does not ensure that the
454 * seqno nor HWS is the right one! However, if the request was
455 * reallocated, that means the active tracker's request was complete.
456 * If the new request is also complete, then both are and we can
457 * just report the active tracker is idle. If the new request is
458 * incomplete, then we acquire a reference on it and check that
459 * it remained the active request.
461 * It is then imperative that we do not zero the request on
462 * reallocation, so that we can chase the dangling pointers!
463 * See i915_gem_request_alloc().
466 struct drm_i915_gem_request *request;
468 request = rcu_dereference(active->request);
469 if (!request || i915_gem_request_completed(request))
472 /* An especially silly compiler could decide to recompute the
473 * result of i915_gem_request_completed, more specifically
474 * re-emit the load for request->fence.seqno. A race would catch
475 * a later seqno value, which could flip the result from true to
476 * false. Which means part of the instructions below might not
477 * be executed, while later on instructions are executed. Due to
478 * barriers within the refcounting the inconsistency can't reach
479 * past the call to i915_gem_request_get_rcu, but not executing
480 * that while still executing i915_gem_request_put() creates
481 * havoc enough. Prevent this with a compiler barrier.
485 request = i915_gem_request_get_rcu(request);
487 /* What stops the following rcu_access_pointer() from occurring
488 * before the above i915_gem_request_get_rcu()? If we were
489 * to read the value before pausing to get the reference to
490 * the request, we may not notice a change in the active
493 * The rcu_access_pointer() is a mere compiler barrier, which
494 * means both the CPU and compiler are free to perform the
495 * memory read without constraint. The compiler only has to
496 * ensure that any operations after the rcu_access_pointer()
497 * occur afterwards in program order. This means the read may
498 * be performed earlier by an out-of-order CPU, or adventurous
501 * The atomic operation at the heart of
502 * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
503 * atomic_inc_not_zero() which is only a full memory barrier
504 * when successful. That is, if i915_gem_request_get_rcu()
505 * returns the request (and so with the reference counted
506 * incremented) then the following read for rcu_access_pointer()
507 * must occur after the atomic operation and so confirm
508 * that this request is the one currently being tracked.
510 * The corresponding write barrier is part of
511 * rcu_assign_pointer().
513 if (!request || request == rcu_access_pointer(active->request))
514 return rcu_pointer_handoff(request);
516 i915_gem_request_put(request);
521 * i915_gem_active_get_unlocked - return a reference to the active request
522 * @active - the active tracker
524 * i915_gem_active_get_unlocked() returns a reference to the active request,
525 * or NULL if the active tracker is idle. The reference is obtained under RCU,
526 * so no locking is required by the caller.
528 * The reference should be freed with i915_gem_request_put().
530 static inline struct drm_i915_gem_request *
531 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
533 struct drm_i915_gem_request *request;
536 request = __i915_gem_active_get_rcu(active);
543 * i915_gem_active_isset - report whether the active tracker is assigned
544 * @active - the active tracker
546 * i915_gem_active_isset() returns true if the active tracker is currently
547 * assigned to a request. Due to the lazy retiring, that request may be idle
548 * and this may report stale information.
551 i915_gem_active_isset(const struct i915_gem_active *active)
553 return rcu_access_pointer(active->request);
557 * i915_gem_active_is_idle - report whether the active tracker is idle
558 * @active - the active tracker
560 * i915_gem_active_is_idle() returns true if the active tracker is currently
561 * unassigned or if the request is complete (but not yet retired). Requires
562 * the caller to hold struct_mutex (but that can be relaxed if desired).
565 i915_gem_active_is_idle(const struct i915_gem_active *active,
568 return !i915_gem_active_peek(active, mutex);
572 * i915_gem_active_wait - waits until the request is completed
573 * @active - the active request on which to wait
575 * i915_gem_active_wait() waits until the request is completed before
576 * returning. Note that it does not guarantee that the request is
577 * retired first, see i915_gem_active_retire().
579 * i915_gem_active_wait() returns immediately if the active
580 * request is already complete.
582 static inline int __must_check
583 i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
585 struct drm_i915_gem_request *request;
588 request = i915_gem_active_peek(active, mutex);
592 ret = i915_wait_request(request,
593 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
594 MAX_SCHEDULE_TIMEOUT);
595 return ret < 0 ? ret : 0;
599 * i915_gem_active_wait_unlocked - waits until the request is completed
600 * @active - the active request on which to wait
601 * @flags - how to wait
602 * @timeout - how long to wait at most
603 * @rps - userspace client to charge for a waitboost
605 * i915_gem_active_wait_unlocked() waits until the request is completed before
606 * returning, without requiring any locks to be held. Note that it does not
607 * retire any requests before returning.
609 * This function relies on RCU in order to acquire the reference to the active
610 * request without holding any locks. See __i915_gem_active_get_rcu() for the
611 * glory details on how that is managed. Once the reference is acquired, we
612 * can then wait upon the request, and afterwards release our reference,
613 * free of any locking.
615 * This function wraps i915_wait_request(), see it for the full details on
618 * Returns 0 if successful, or a negative error code.
621 i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
624 struct drm_i915_gem_request *request;
627 request = i915_gem_active_get_unlocked(active);
629 ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
630 i915_gem_request_put(request);
633 return ret < 0 ? ret : 0;
637 * i915_gem_active_retire - waits until the request is retired
638 * @active - the active request on which to wait
640 * i915_gem_active_retire() waits until the request is completed,
641 * and then ensures that at least the retirement handler for this
642 * @active tracker is called before returning. If the @active
643 * tracker is idle, the function returns immediately.
645 static inline int __must_check
646 i915_gem_active_retire(struct i915_gem_active *active,
649 struct drm_i915_gem_request *request;
652 request = i915_gem_active_raw(active, mutex);
656 ret = i915_wait_request(request,
657 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
658 MAX_SCHEDULE_TIMEOUT);
662 list_del_init(&active->link);
663 RCU_INIT_POINTER(active->request, NULL);
665 active->retire(active, request);
670 /* Convenience functions for peeking at state inside active's request whilst
671 * guarded by the struct_mutex.
674 static inline uint32_t
675 i915_gem_active_get_seqno(const struct i915_gem_active *active,
678 return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
681 static inline struct intel_engine_cs *
682 i915_gem_active_get_engine(const struct i915_gem_active *active,
685 return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
688 #define for_each_active(mask, idx) \
689 for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
691 #endif /* I915_GEM_REQUEST_H */