2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef I915_GEM_REQUEST_H
26 #define I915_GEM_REQUEST_H
28 #include <linux/fence.h>
34 struct task_struct
*tsk
;
38 struct intel_signal_node
{
40 struct intel_wait wait
;
44 * Request queue structure.
46 * The request queue allows us to note sequence numbers that have been emitted
47 * and may be associated with active buffers to be retired.
49 * By keeping this list, we can avoid having to do questionable sequence
50 * number comparisons on buffer last_read|write_seqno. It also allows an
51 * emission time to be associated with the request for tracking how far ahead
52 * of the GPU the submission is.
54 * When modifying this structure be very aware that we perform a lockless
55 * RCU lookup of it that may race against reallocation of the struct
56 * from the slab freelist. We intentionally do not zero the structure on
57 * allocation so that the lookup can use the dangling pointers (and is
58 * cogniscent that those pointers may be wrong). Instead, everything that
59 * needs to be initialised must be done so explicitly.
61 * The requests are reference counted.
63 struct drm_i915_gem_request
{
67 /** On Which ring this request was generated */
68 struct drm_i915_private
*i915
;
71 * Context and ring buffer related to this request
72 * Contexts are refcounted, so when this request is associated with a
73 * context, we must increment the context's refcount, to guarantee that
74 * it persists while any request is linked to it. Requests themselves
75 * are also refcounted, so the request will only be freed when the last
76 * reference to it is dismissed, and the code in
77 * i915_gem_request_free() will then decrement the refcount on the
80 struct i915_gem_context
*ctx
;
81 struct intel_engine_cs
*engine
;
82 struct intel_ring
*ring
;
83 struct intel_signal_node signaling
;
85 /** GEM sequence number associated with the previous request,
86 * when the HWS breadcrumb is equal to this the GPU is processing
91 /** Position in the ringbuffer of the start of the request */
95 * Position in the ringbuffer of the start of the postfix.
96 * This is required to calculate the maximum available ringbuffer
97 * space without overwriting the postfix.
101 /** Position in the ringbuffer of the end of the whole request */
104 /** Preallocate space in the ringbuffer for the emitting the request */
108 * Context related to the previous request.
109 * As the contexts are accessed by the hardware until the switch is
110 * completed to a new context, the hardware may still be writing
111 * to the context object after the breadcrumb is visible. We must
112 * not unpin/unbind/prune that object whilst still active and so
113 * we keep the previous context pinned until the following (this)
114 * request is retired.
116 struct i915_gem_context
*previous_context
;
118 /** Batch buffer related to this request if any (used for
119 * error state dump only).
121 struct i915_vma
*batch
;
122 struct list_head active_list
;
124 /** Time at which this request was emitted, in jiffies. */
125 unsigned long emitted_jiffies
;
127 /** engine->request_list entry for this request */
128 struct list_head link
;
130 /** ring->request_list entry for this request */
131 struct list_head ring_link
;
133 struct drm_i915_file_private
*file_priv
;
134 /** file_priv list entry for this request */
135 struct list_head client_list
;
138 * The ELSP only accepts two elements at a time, so we queue
139 * context/tail pairs on a given queue (ring->execlist_queue) until the
140 * hardware is available. The queue serves a double purpose: we also use
141 * it to keep track of the up to 2 contexts currently in the hardware
142 * (usually one in execution and the other queued up by the GPU): We
143 * only remove elements from the head of the queue when the hardware
144 * informs us that an element has been completed.
146 * All accesses to the queue are mediated by a spinlock
147 * (ring->execlist_lock).
150 /** Execlist link in the submission queue.*/
151 struct list_head execlist_link
;
153 /** Execlists no. of times this request has been sent to the ELSP */
156 /** Execlists context hardware id. */
157 unsigned int ctx_hw_id
;
160 extern const struct fence_ops i915_fence_ops
;
162 static inline bool fence_is_i915(struct fence
*fence
)
164 return fence
->ops
== &i915_fence_ops
;
167 struct drm_i915_gem_request
* __must_check
168 i915_gem_request_alloc(struct intel_engine_cs
*engine
,
169 struct i915_gem_context
*ctx
);
170 int i915_gem_request_add_to_client(struct drm_i915_gem_request
*req
,
171 struct drm_file
*file
);
172 void i915_gem_request_retire_upto(struct drm_i915_gem_request
*req
);
175 i915_gem_request_get_seqno(struct drm_i915_gem_request
*req
)
177 return req
? req
->fence
.seqno
: 0;
180 static inline struct intel_engine_cs
*
181 i915_gem_request_get_engine(struct drm_i915_gem_request
*req
)
183 return req
? req
->engine
: NULL
;
186 static inline struct drm_i915_gem_request
*
187 to_request(struct fence
*fence
)
189 /* We assume that NULL fence/request are interoperable */
190 BUILD_BUG_ON(offsetof(struct drm_i915_gem_request
, fence
) != 0);
191 GEM_BUG_ON(fence
&& !fence_is_i915(fence
));
192 return container_of(fence
, struct drm_i915_gem_request
, fence
);
195 static inline struct drm_i915_gem_request
*
196 i915_gem_request_get(struct drm_i915_gem_request
*req
)
198 return to_request(fence_get(&req
->fence
));
201 static inline struct drm_i915_gem_request
*
202 i915_gem_request_get_rcu(struct drm_i915_gem_request
*req
)
204 return to_request(fence_get_rcu(&req
->fence
));
208 i915_gem_request_put(struct drm_i915_gem_request
*req
)
210 fence_put(&req
->fence
);
213 static inline void i915_gem_request_assign(struct drm_i915_gem_request
**pdst
,
214 struct drm_i915_gem_request
*src
)
217 i915_gem_request_get(src
);
220 i915_gem_request_put(*pdst
);
225 void __i915_add_request(struct drm_i915_gem_request
*req
, bool flush_caches
);
226 #define i915_add_request(req) \
227 __i915_add_request(req, true)
228 #define i915_add_request_no_flush(req) \
229 __i915_add_request(req, false)
231 struct intel_rps_client
;
232 #define NO_WAITBOOST ERR_PTR(-1)
233 #define IS_RPS_CLIENT(p) (!IS_ERR(p))
234 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
236 int i915_wait_request(struct drm_i915_gem_request
*req
,
239 struct intel_rps_client
*rps
)
240 __attribute__((nonnull(1)));
242 static inline u32
intel_engine_get_seqno(struct intel_engine_cs
*engine
);
245 * Returns true if seq1 is later than seq2.
247 static inline bool i915_seqno_passed(u32 seq1
, u32 seq2
)
249 return (s32
)(seq1
- seq2
) >= 0;
253 i915_gem_request_started(const struct drm_i915_gem_request
*req
)
255 return i915_seqno_passed(intel_engine_get_seqno(req
->engine
),
256 req
->previous_seqno
);
260 i915_gem_request_completed(const struct drm_i915_gem_request
*req
)
262 return i915_seqno_passed(intel_engine_get_seqno(req
->engine
),
266 bool __i915_spin_request(const struct drm_i915_gem_request
*request
,
267 int state
, unsigned long timeout_us
);
268 static inline bool i915_spin_request(const struct drm_i915_gem_request
*request
,
269 int state
, unsigned long timeout_us
)
271 return (i915_gem_request_started(request
) &&
272 __i915_spin_request(request
, state
, timeout_us
));
275 /* We treat requests as fences. This is not be to confused with our
276 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
277 * We use the fences to synchronize access from the CPU with activity on the
278 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
279 * is reading them. We also track fences at a higher level to provide
280 * implicit synchronisation around GEM objects, e.g. set-domain will wait
281 * for outstanding GPU rendering before marking the object ready for CPU
282 * access, or a pageflip will wait until the GPU is complete before showing
283 * the frame on the scanout.
285 * In order to use a fence, the object must track the fence it needs to
286 * serialise with. For example, GEM objects want to track both read and
287 * write access so that we can perform concurrent read operations between
288 * the CPU and GPU engines, as well as waiting for all rendering to
289 * complete, or waiting for the last GPU user of a "fence register". The
290 * object then embeds a #i915_gem_active to track the most recent (in
291 * retirement order) request relevant for the desired mode of access.
292 * The #i915_gem_active is updated with i915_gem_active_set() to track the
293 * most recent fence request, typically this is done as part of
294 * i915_vma_move_to_active().
296 * When the #i915_gem_active completes (is retired), it will
297 * signal its completion to the owner through a callback as well as mark
298 * itself as idle (i915_gem_active.request == NULL). The owner
299 * can then perform any action, such as delayed freeing of an active
300 * resource including itself.
302 struct i915_gem_active
;
304 typedef void (*i915_gem_retire_fn
)(struct i915_gem_active
*,
305 struct drm_i915_gem_request
*);
307 struct i915_gem_active
{
308 struct drm_i915_gem_request __rcu
*request
;
309 struct list_head link
;
310 i915_gem_retire_fn retire
;
313 void i915_gem_retire_noop(struct i915_gem_active
*,
314 struct drm_i915_gem_request
*request
);
317 * init_request_active - prepares the activity tracker for use
318 * @active - the active tracker
319 * @func - a callback when then the tracker is retired (becomes idle),
322 * init_request_active() prepares the embedded @active struct for use as
323 * an activity tracker, that is for tracking the last known active request
324 * associated with it. When the last request becomes idle, when it is retired
325 * after completion, the optional callback @func is invoked.
328 init_request_active(struct i915_gem_active
*active
,
329 i915_gem_retire_fn retire
)
331 INIT_LIST_HEAD(&active
->link
);
332 active
->retire
= retire
?: i915_gem_retire_noop
;
336 * i915_gem_active_set - updates the tracker to watch the current request
337 * @active - the active tracker
338 * @request - the request to watch
340 * i915_gem_active_set() watches the given @request for completion. Whilst
341 * that @request is busy, the @active reports busy. When that @request is
342 * retired, the @active tracker is updated to report idle.
345 i915_gem_active_set(struct i915_gem_active
*active
,
346 struct drm_i915_gem_request
*request
)
348 list_move(&active
->link
, &request
->active_list
);
349 rcu_assign_pointer(active
->request
, request
);
352 static inline struct drm_i915_gem_request
*
353 __i915_gem_active_peek(const struct i915_gem_active
*active
)
355 /* Inside the error capture (running with the driver in an unknown
356 * state), we want to bend the rules slightly (a lot).
358 * Work is in progress to make it safer, in the meantime this keeps
359 * the known issue from spamming the logs.
361 return rcu_dereference_protected(active
->request
, 1);
365 * i915_gem_active_raw - return the active request
366 * @active - the active tracker
368 * i915_gem_active_raw() returns the current request being tracked, or NULL.
369 * It does not obtain a reference on the request for the caller, so the caller
370 * must hold struct_mutex.
372 static inline struct drm_i915_gem_request
*
373 i915_gem_active_raw(const struct i915_gem_active
*active
, struct mutex
*mutex
)
375 return rcu_dereference_protected(active
->request
,
376 lockdep_is_held(mutex
));
380 * i915_gem_active_peek - report the active request being monitored
381 * @active - the active tracker
383 * i915_gem_active_peek() returns the current request being tracked if
384 * still active, or NULL. It does not obtain a reference on the request
385 * for the caller, so the caller must hold struct_mutex.
387 static inline struct drm_i915_gem_request
*
388 i915_gem_active_peek(const struct i915_gem_active
*active
, struct mutex
*mutex
)
390 struct drm_i915_gem_request
*request
;
392 request
= i915_gem_active_raw(active
, mutex
);
393 if (!request
|| i915_gem_request_completed(request
))
400 * i915_gem_active_get - return a reference to the active request
401 * @active - the active tracker
403 * i915_gem_active_get() returns a reference to the active request, or NULL
404 * if the active tracker is idle. The caller must hold struct_mutex.
406 static inline struct drm_i915_gem_request
*
407 i915_gem_active_get(const struct i915_gem_active
*active
, struct mutex
*mutex
)
409 return i915_gem_request_get(i915_gem_active_peek(active
, mutex
));
413 * __i915_gem_active_get_rcu - return a reference to the active request
414 * @active - the active tracker
416 * __i915_gem_active_get() returns a reference to the active request, or NULL
417 * if the active tracker is idle. The caller must hold the RCU read lock, but
418 * the returned pointer is safe to use outside of RCU.
420 static inline struct drm_i915_gem_request
*
421 __i915_gem_active_get_rcu(const struct i915_gem_active
*active
)
423 /* Performing a lockless retrieval of the active request is super
424 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
425 * slab of request objects will not be freed whilst we hold the
426 * RCU read lock. It does not guarantee that the request itself
427 * will not be freed and then *reused*. Viz,
431 * req = active.request
432 * retire(req) -> free(req);
433 * (req is now first on the slab freelist)
434 * active.request = NULL
436 * req = new submission on a new object
439 * To prevent the request from being reused whilst the caller
440 * uses it, we take a reference like normal. Whilst acquiring
441 * the reference we check that it is not in a destroyed state
442 * (refcnt == 0). That prevents the request being reallocated
443 * whilst the caller holds on to it. To check that the request
444 * was not reallocated as we acquired the reference we have to
445 * check that our request remains the active request across
446 * the lookup, in the same manner as a seqlock. The visibility
447 * of the pointer versus the reference counting is controlled
448 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
450 * In the middle of all that, we inspect whether the request is
451 * complete. Retiring is lazy so the request may be completed long
452 * before the active tracker is updated. Querying whether the
453 * request is complete is far cheaper (as it involves no locked
454 * instructions setting cachelines to exclusive) than acquiring
455 * the reference, so we do it first. The RCU read lock ensures the
456 * pointer dereference is valid, but does not ensure that the
457 * seqno nor HWS is the right one! However, if the request was
458 * reallocated, that means the active tracker's request was complete.
459 * If the new request is also complete, then both are and we can
460 * just report the active tracker is idle. If the new request is
461 * incomplete, then we acquire a reference on it and check that
462 * it remained the active request.
464 * It is then imperative that we do not zero the request on
465 * reallocation, so that we can chase the dangling pointers!
466 * See i915_gem_request_alloc().
469 struct drm_i915_gem_request
*request
;
471 request
= rcu_dereference(active
->request
);
472 if (!request
|| i915_gem_request_completed(request
))
475 /* An especially silly compiler could decide to recompute the
476 * result of i915_gem_request_completed, more specifically
477 * re-emit the load for request->fence.seqno. A race would catch
478 * a later seqno value, which could flip the result from true to
479 * false. Which means part of the instructions below might not
480 * be executed, while later on instructions are executed. Due to
481 * barriers within the refcounting the inconsistency can't reach
482 * past the call to i915_gem_request_get_rcu, but not executing
483 * that while still executing i915_gem_request_put() creates
484 * havoc enough. Prevent this with a compiler barrier.
488 request
= i915_gem_request_get_rcu(request
);
490 /* What stops the following rcu_access_pointer() from occurring
491 * before the above i915_gem_request_get_rcu()? If we were
492 * to read the value before pausing to get the reference to
493 * the request, we may not notice a change in the active
496 * The rcu_access_pointer() is a mere compiler barrier, which
497 * means both the CPU and compiler are free to perform the
498 * memory read without constraint. The compiler only has to
499 * ensure that any operations after the rcu_access_pointer()
500 * occur afterwards in program order. This means the read may
501 * be performed earlier by an out-of-order CPU, or adventurous
504 * The atomic operation at the heart of
505 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
506 * atomic_inc_not_zero() which is only a full memory barrier
507 * when successful. That is, if i915_gem_request_get_rcu()
508 * returns the request (and so with the reference counted
509 * incremented) then the following read for rcu_access_pointer()
510 * must occur after the atomic operation and so confirm
511 * that this request is the one currently being tracked.
513 * The corresponding write barrier is part of
514 * rcu_assign_pointer().
516 if (!request
|| request
== rcu_access_pointer(active
->request
))
517 return rcu_pointer_handoff(request
);
519 i915_gem_request_put(request
);
524 * i915_gem_active_get_unlocked - return a reference to the active request
525 * @active - the active tracker
527 * i915_gem_active_get_unlocked() returns a reference to the active request,
528 * or NULL if the active tracker is idle. The reference is obtained under RCU,
529 * so no locking is required by the caller.
531 * The reference should be freed with i915_gem_request_put().
533 static inline struct drm_i915_gem_request
*
534 i915_gem_active_get_unlocked(const struct i915_gem_active
*active
)
536 struct drm_i915_gem_request
*request
;
539 request
= __i915_gem_active_get_rcu(active
);
546 * i915_gem_active_isset - report whether the active tracker is assigned
547 * @active - the active tracker
549 * i915_gem_active_isset() returns true if the active tracker is currently
550 * assigned to a request. Due to the lazy retiring, that request may be idle
551 * and this may report stale information.
554 i915_gem_active_isset(const struct i915_gem_active
*active
)
556 return rcu_access_pointer(active
->request
);
560 * i915_gem_active_is_idle - report whether the active tracker is idle
561 * @active - the active tracker
563 * i915_gem_active_is_idle() returns true if the active tracker is currently
564 * unassigned or if the request is complete (but not yet retired). Requires
565 * the caller to hold struct_mutex (but that can be relaxed if desired).
568 i915_gem_active_is_idle(const struct i915_gem_active
*active
,
571 return !i915_gem_active_peek(active
, mutex
);
575 * i915_gem_active_wait - waits until the request is completed
576 * @active - the active request on which to wait
578 * i915_gem_active_wait() waits until the request is completed before
579 * returning. Note that it does not guarantee that the request is
580 * retired first, see i915_gem_active_retire().
582 * i915_gem_active_wait() returns immediately if the active
583 * request is already complete.
585 static inline int __must_check
586 i915_gem_active_wait(const struct i915_gem_active
*active
, struct mutex
*mutex
)
588 struct drm_i915_gem_request
*request
;
590 request
= i915_gem_active_peek(active
, mutex
);
594 return i915_wait_request(request
, true, NULL
, NULL
);
598 * i915_gem_active_wait_unlocked - waits until the request is completed
599 * @active - the active request on which to wait
600 * @interruptible - whether the wait can be woken by a userspace signal
601 * @timeout - how long to wait at most
602 * @rps - userspace client to charge for a waitboost
604 * i915_gem_active_wait_unlocked() waits until the request is completed before
605 * returning, without requiring any locks to be held. Note that it does not
606 * retire any requests before returning.
608 * This function relies on RCU in order to acquire the reference to the active
609 * request without holding any locks. See __i915_gem_active_get_rcu() for the
610 * glory details on how that is managed. Once the reference is acquired, we
611 * can then wait upon the request, and afterwards release our reference,
612 * free of any locking.
614 * This function wraps i915_wait_request(), see it for the full details on
617 * Returns 0 if successful, or a negative error code.
620 i915_gem_active_wait_unlocked(const struct i915_gem_active
*active
,
623 struct intel_rps_client
*rps
)
625 struct drm_i915_gem_request
*request
;
628 request
= i915_gem_active_get_unlocked(active
);
630 ret
= i915_wait_request(request
, interruptible
, timeout
, rps
);
631 i915_gem_request_put(request
);
638 * i915_gem_active_retire - waits until the request is retired
639 * @active - the active request on which to wait
641 * i915_gem_active_retire() waits until the request is completed,
642 * and then ensures that at least the retirement handler for this
643 * @active tracker is called before returning. If the @active
644 * tracker is idle, the function returns immediately.
646 static inline int __must_check
647 i915_gem_active_retire(struct i915_gem_active
*active
,
650 struct drm_i915_gem_request
*request
;
653 request
= i915_gem_active_raw(active
, mutex
);
657 ret
= i915_wait_request(request
, true, NULL
, NULL
);
661 list_del_init(&active
->link
);
662 RCU_INIT_POINTER(active
->request
, NULL
);
664 active
->retire(active
, request
);
669 /* Convenience functions for peeking at state inside active's request whilst
670 * guarded by the struct_mutex.
673 static inline uint32_t
674 i915_gem_active_get_seqno(const struct i915_gem_active
*active
,
677 return i915_gem_request_get_seqno(i915_gem_active_peek(active
, mutex
));
680 static inline struct intel_engine_cs
*
681 i915_gem_active_get_engine(const struct i915_gem_active
*active
,
684 return i915_gem_request_get_engine(i915_gem_active_peek(active
, mutex
));
687 #define for_each_active(mask, idx) \
688 for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
690 #endif /* I915_GEM_REQUEST_H */