xref: /dragonfly/sys/dev/drm/i915/i915_gem_request.h (revision 5ca0a96d)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_GEM_REQUEST_H
26 #define I915_GEM_REQUEST_H
27 
28 #include <linux/dma-fence.h>
29 
30 #include "i915_gem.h"
31 #include "i915_sw_fence.h"
32 
33 #include <uapi/drm/i915_drm.h>
34 
35 struct drm_file;
36 struct drm_i915_gem_object;
37 struct drm_i915_gem_request;
38 
39 struct intel_wait {
40 	struct rb_node node;
41 	struct task_struct *tsk;
42 	struct drm_i915_gem_request *request;
43 	u32 seqno;
44 };
45 
46 struct intel_signal_node {
47 	struct rb_node node;
48 	struct intel_wait wait;
49 };
50 
51 struct i915_dependency {
52 	struct i915_priotree *signaler;
53 	struct list_head signal_link;
54 	struct list_head wait_link;
55 	struct list_head dfs_link;
56 	unsigned long flags;
57 #define I915_DEPENDENCY_ALLOC BIT(0)
58 };
59 
60 /* Requests exist in a complex web of interdependencies. Each request
61  * has to wait for some other request to complete before it is ready to be run
62  * (e.g. we have to wait until the pixels have been rendering into a texture
63  * before we can copy from it). We track the readiness of a request in terms
64  * of fences, but we also need to keep the dependency tree for the lifetime
65  * of the request (beyond the life of an individual fence). We use the tree
66  * at various points to reorder the requests whilst keeping the requests
67  * in order with respect to their various dependencies.
68  */
69 struct i915_priotree {
70 	struct list_head signalers_list; /* those before us, we depend upon */
71 	struct list_head waiters_list; /* those after us, they depend upon us */
72 	struct list_head link;
73 	int priority;
74 };
75 
76 enum {
77 	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
78 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
79 	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
80 
81 	I915_PRIORITY_INVALID = INT_MIN
82 };
83 
84 struct i915_gem_capture_list {
85 	struct i915_gem_capture_list *next;
86 	struct i915_vma *vma;
87 };
88 
89 /**
90  * Request queue structure.
91  *
92  * The request queue allows us to note sequence numbers that have been emitted
93  * and may be associated with active buffers to be retired.
94  *
95  * By keeping this list, we can avoid having to do questionable sequence
96  * number comparisons on buffer last_read|write_seqno. It also allows an
97  * emission time to be associated with the request for tracking how far ahead
98  * of the GPU the submission is.
99  *
100  * When modifying this structure be very aware that we perform a lockless
101  * RCU lookup of it that may race against reallocation of the struct
102  * from the slab freelist. We intentionally do not zero the structure on
103  * allocation so that the lookup can use the dangling pointers (and is
104  * cogniscent that those pointers may be wrong). Instead, everything that
105  * needs to be initialised must be done so explicitly.
106  *
107  * The requests are reference counted.
108  */
109 struct drm_i915_gem_request {
110 	struct dma_fence fence;
111 	spinlock_t lock;
112 
113 	/** On Which ring this request was generated */
114 	struct drm_i915_private *i915;
115 
116 	/**
117 	 * Context and ring buffer related to this request
118 	 * Contexts are refcounted, so when this request is associated with a
119 	 * context, we must increment the context's refcount, to guarantee that
120 	 * it persists while any request is linked to it. Requests themselves
121 	 * are also refcounted, so the request will only be freed when the last
122 	 * reference to it is dismissed, and the code in
123 	 * i915_gem_request_free() will then decrement the refcount on the
124 	 * context.
125 	 */
126 	struct i915_gem_context *ctx;
127 	struct intel_engine_cs *engine;
128 	struct intel_ring *ring;
129 	struct intel_timeline *timeline;
130 	struct intel_signal_node signaling;
131 
132 	/* Fences for the various phases in the request's lifetime.
133 	 *
134 	 * The submit fence is used to await upon all of the request's
135 	 * dependencies. When it is signaled, the request is ready to run.
136 	 * It is used by the driver to then queue the request for execution.
137 	 */
138 	struct i915_sw_fence submit;
139 	wait_queue_entry_t submitq;
140 	wait_queue_head_t execute;
141 
142 	/* A list of everyone we wait upon, and everyone who waits upon us.
143 	 * Even though we will not be submitted to the hardware before the
144 	 * submit fence is signaled (it waits for all external events as well
145 	 * as our own requests), the scheduler still needs to know the
146 	 * dependency tree for the lifetime of the request (from execbuf
147 	 * to retirement), i.e. bidirectional dependency information for the
148 	 * request not tied to individual fences.
149 	 */
150 	struct i915_priotree priotree;
151 	struct i915_dependency dep;
152 
153 	/** GEM sequence number associated with this request on the
154 	 * global execution timeline. It is zero when the request is not
155 	 * on the HW queue (i.e. not on the engine timeline list).
156 	 * Its value is guarded by the timeline spinlock.
157 	 */
158 	u32 global_seqno;
159 
160 	/** Position in the ring of the start of the request */
161 	u32 head;
162 
163 	/**
164 	 * Position in the ring of the start of the postfix.
165 	 * This is required to calculate the maximum available ring space
166 	 * without overwriting the postfix.
167 	 */
168 	u32 postfix;
169 
170 	/** Position in the ring of the end of the whole request */
171 	u32 tail;
172 
173 	/** Position in the ring of the end of any workarounds after the tail */
174 	u32 wa_tail;
175 
176 	/** Preallocate space in the ring for the emitting the request */
177 	u32 reserved_space;
178 
179 	/** Batch buffer related to this request if any (used for
180 	 * error state dump only).
181 	 */
182 	struct i915_vma *batch;
183 	/** Additional buffers requested by userspace to be captured upon
184 	 * a GPU hang. The vma/obj on this list are protected by their
185 	 * active reference - all objects on this list must also be
186 	 * on the active_list (of their final request).
187 	 */
188 	struct i915_gem_capture_list *capture_list;
189 	struct list_head active_list;
190 
191 	/** Time at which this request was emitted, in jiffies. */
192 	unsigned long emitted_jiffies;
193 
194 	bool waitboost;
195 
196 	/** engine->request_list entry for this request */
197 	struct list_head link;
198 
199 	/** ring->request_list entry for this request */
200 	struct list_head ring_link;
201 
202 	struct drm_i915_file_private *file_priv;
203 	/** file_priv list entry for this request */
204 	struct list_head client_link;
205 };
206 
207 extern const struct dma_fence_ops i915_fence_ops;
208 
209 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
210 {
211 	return fence->ops == &i915_fence_ops;
212 }
213 
214 struct drm_i915_gem_request * __must_check
215 i915_gem_request_alloc(struct intel_engine_cs *engine,
216 		       struct i915_gem_context *ctx);
217 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
218 
219 static inline struct drm_i915_gem_request *
220 to_request(struct dma_fence *fence)
221 {
222 	/* We assume that NULL fence/request are interoperable */
223 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
224 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
225 	return container_of(fence, struct drm_i915_gem_request, fence);
226 }
227 
228 static inline struct drm_i915_gem_request *
229 i915_gem_request_get(struct drm_i915_gem_request *req)
230 {
231 	return to_request(dma_fence_get(&req->fence));
232 }
233 
234 static inline struct drm_i915_gem_request *
235 i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
236 {
237 	return to_request(dma_fence_get_rcu(&req->fence));
238 }
239 
240 static inline void
241 i915_gem_request_put(struct drm_i915_gem_request *req)
242 {
243 	dma_fence_put(&req->fence);
244 }
245 
246 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
247 					   struct drm_i915_gem_request *src)
248 {
249 	if (src)
250 		i915_gem_request_get(src);
251 
252 	if (*pdst)
253 		i915_gem_request_put(*pdst);
254 
255 	*pdst = src;
256 }
257 
258 /**
259  * i915_gem_request_global_seqno - report the current global seqno
260  * @request - the request
261  *
262  * A request is assigned a global seqno only when it is on the hardware
263  * execution queue. The global seqno can be used to maintain a list of
264  * requests on the same engine in retirement order, for example for
265  * constructing a priority queue for waiting. Prior to its execution, or
266  * if it is subsequently removed in the event of preemption, its global
267  * seqno is zero. As both insertion and removal from the execution queue
268  * may operate in IRQ context, it is not guarded by the usual struct_mutex
269  * BKL. Instead those relying on the global seqno must be prepared for its
270  * value to change between reads. Only when the request is complete can
271  * the global seqno be stable (due to the memory barriers on submitting
272  * the commands to the hardware to write the breadcrumb, if the HWS shows
273  * that it has passed the global seqno and the global seqno is unchanged
274  * after the read, it is indeed complete).
275  */
276 static u32
277 i915_gem_request_global_seqno(const struct drm_i915_gem_request *request)
278 {
279 	return READ_ONCE(request->global_seqno);
280 }
281 
282 int
283 i915_gem_request_await_object(struct drm_i915_gem_request *to,
284 			      struct drm_i915_gem_object *obj,
285 			      bool write);
286 int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
287 				     struct dma_fence *fence);
288 
289 void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
290 #define i915_add_request(req) \
291 	__i915_add_request(req, false)
292 
293 void __i915_gem_request_submit(struct drm_i915_gem_request *request);
294 void i915_gem_request_submit(struct drm_i915_gem_request *request);
295 
296 void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
297 void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
298 
299 struct intel_rps_client;
300 #define NO_WAITBOOST ERR_PTR(-1)
301 #define IS_RPS_CLIENT(p) (!IS_ERR(p))
302 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
303 
304 long i915_wait_request(struct drm_i915_gem_request *req,
305 		       unsigned int flags,
306 		       long timeout)
307 	__attribute__((nonnull(1)));
308 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
309 #define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
310 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
311 
312 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
313 
314 /**
315  * Returns true if seq1 is later than seq2.
316  */
317 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
318 {
319 	return (s32)(seq1 - seq2) >= 0;
320 }
321 
322 static inline bool
323 __i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
324 {
325 	GEM_BUG_ON(!seqno);
326 	return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) &&
327 		seqno == i915_gem_request_global_seqno(req);
328 }
329 
330 static inline bool
331 i915_gem_request_completed(const struct drm_i915_gem_request *req)
332 {
333 	u32 seqno;
334 
335 	seqno = i915_gem_request_global_seqno(req);
336 	if (!seqno)
337 		return false;
338 
339 	return __i915_gem_request_completed(req, seqno);
340 }
341 
342 /* We treat requests as fences. This is not be to confused with our
343  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
344  * We use the fences to synchronize access from the CPU with activity on the
345  * GPU, for example, we should not rewrite an object's PTE whilst the GPU
346  * is reading them. We also track fences at a higher level to provide
347  * implicit synchronisation around GEM objects, e.g. set-domain will wait
348  * for outstanding GPU rendering before marking the object ready for CPU
349  * access, or a pageflip will wait until the GPU is complete before showing
350  * the frame on the scanout.
351  *
352  * In order to use a fence, the object must track the fence it needs to
353  * serialise with. For example, GEM objects want to track both read and
354  * write access so that we can perform concurrent read operations between
355  * the CPU and GPU engines, as well as waiting for all rendering to
356  * complete, or waiting for the last GPU user of a "fence register". The
357  * object then embeds a #i915_gem_active to track the most recent (in
358  * retirement order) request relevant for the desired mode of access.
359  * The #i915_gem_active is updated with i915_gem_active_set() to track the
360  * most recent fence request, typically this is done as part of
361  * i915_vma_move_to_active().
362  *
363  * When the #i915_gem_active completes (is retired), it will
364  * signal its completion to the owner through a callback as well as mark
365  * itself as idle (i915_gem_active.request == NULL). The owner
366  * can then perform any action, such as delayed freeing of an active
367  * resource including itself.
368  */
369 struct i915_gem_active;
370 
371 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
372 				   struct drm_i915_gem_request *);
373 
374 struct i915_gem_active {
375 	struct drm_i915_gem_request __rcu *request;
376 	struct list_head link;
377 	i915_gem_retire_fn retire;
378 };
379 
380 void i915_gem_retire_noop(struct i915_gem_active *,
381 			  struct drm_i915_gem_request *request);
382 
383 /**
384  * init_request_active - prepares the activity tracker for use
385  * @active - the active tracker
386  * @func - a callback when then the tracker is retired (becomes idle),
387  *         can be NULL
388  *
389  * init_request_active() prepares the embedded @active struct for use as
390  * an activity tracker, that is for tracking the last known active request
391  * associated with it. When the last request becomes idle, when it is retired
392  * after completion, the optional callback @func is invoked.
393  */
394 static inline void
395 init_request_active(struct i915_gem_active *active,
396 		    i915_gem_retire_fn retire)
397 {
398 	INIT_LIST_HEAD(&active->link);
399 	active->retire = retire ?: i915_gem_retire_noop;
400 }
401 
402 /**
403  * i915_gem_active_set - updates the tracker to watch the current request
404  * @active - the active tracker
405  * @request - the request to watch
406  *
407  * i915_gem_active_set() watches the given @request for completion. Whilst
408  * that @request is busy, the @active reports busy. When that @request is
409  * retired, the @active tracker is updated to report idle.
410  */
411 static inline void
412 i915_gem_active_set(struct i915_gem_active *active,
413 		    struct drm_i915_gem_request *request)
414 {
415 	list_move(&active->link, &request->active_list);
416 	rcu_assign_pointer(active->request, request);
417 }
418 
419 /**
420  * i915_gem_active_set_retire_fn - updates the retirement callback
421  * @active - the active tracker
422  * @fn - the routine called when the request is retired
423  * @mutex - struct_mutex used to guard retirements
424  *
425  * i915_gem_active_set_retire_fn() updates the function pointer that
426  * is called when the final request associated with the @active tracker
427  * is retired.
428  */
429 static inline void
430 i915_gem_active_set_retire_fn(struct i915_gem_active *active,
431 			      i915_gem_retire_fn fn,
432 			      struct lock *mutex)
433 {
434 	lockdep_assert_held(mutex);
435 	active->retire = fn ?: i915_gem_retire_noop;
436 }
437 
438 static inline struct drm_i915_gem_request *
439 __i915_gem_active_peek(const struct i915_gem_active *active)
440 {
441 	/* Inside the error capture (running with the driver in an unknown
442 	 * state), we want to bend the rules slightly (a lot).
443 	 *
444 	 * Work is in progress to make it safer, in the meantime this keeps
445 	 * the known issue from spamming the logs.
446 	 */
447 	return rcu_dereference_protected(active->request, 1);
448 }
449 
450 /**
451  * i915_gem_active_raw - return the active request
452  * @active - the active tracker
453  *
454  * i915_gem_active_raw() returns the current request being tracked, or NULL.
455  * It does not obtain a reference on the request for the caller, so the caller
456  * must hold struct_mutex.
457  */
458 static inline struct drm_i915_gem_request *
459 i915_gem_active_raw(const struct i915_gem_active *active, struct lock *mutex)
460 {
461 	return rcu_dereference_protected(active->request,
462 					 lockdep_is_held(mutex));
463 }
464 
465 /**
466  * i915_gem_active_peek - report the active request being monitored
467  * @active - the active tracker
468  *
469  * i915_gem_active_peek() returns the current request being tracked if
470  * still active, or NULL. It does not obtain a reference on the request
471  * for the caller, so the caller must hold struct_mutex.
472  */
473 static inline struct drm_i915_gem_request *
474 i915_gem_active_peek(const struct i915_gem_active *active, struct lock *mutex)
475 {
476 	struct drm_i915_gem_request *request;
477 
478 	request = i915_gem_active_raw(active, mutex);
479 	if (!request || i915_gem_request_completed(request))
480 		return NULL;
481 
482 	return request;
483 }
484 
485 /**
486  * i915_gem_active_get - return a reference to the active request
487  * @active - the active tracker
488  *
489  * i915_gem_active_get() returns a reference to the active request, or NULL
490  * if the active tracker is idle. The caller must hold struct_mutex.
491  */
492 static inline struct drm_i915_gem_request *
493 i915_gem_active_get(const struct i915_gem_active *active, struct lock *mutex)
494 {
495 	return i915_gem_request_get(i915_gem_active_peek(active, mutex));
496 }
497 
498 /**
499  * __i915_gem_active_get_rcu - return a reference to the active request
500  * @active - the active tracker
501  *
502  * __i915_gem_active_get() returns a reference to the active request, or NULL
503  * if the active tracker is idle. The caller must hold the RCU read lock, but
504  * the returned pointer is safe to use outside of RCU.
505  */
506 static inline struct drm_i915_gem_request *
507 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
508 {
509 	/* Performing a lockless retrieval of the active request is super
510 	 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
511 	 * slab of request objects will not be freed whilst we hold the
512 	 * RCU read lock. It does not guarantee that the request itself
513 	 * will not be freed and then *reused*. Viz,
514 	 *
515 	 * Thread A			Thread B
516 	 *
517 	 * req = active.request
518 	 *				retire(req) -> free(req);
519 	 *				(req is now first on the slab freelist)
520 	 *				active.request = NULL
521 	 *
522 	 *				req = new submission on a new object
523 	 * ref(req)
524 	 *
525 	 * To prevent the request from being reused whilst the caller
526 	 * uses it, we take a reference like normal. Whilst acquiring
527 	 * the reference we check that it is not in a destroyed state
528 	 * (refcnt == 0). That prevents the request being reallocated
529 	 * whilst the caller holds on to it. To check that the request
530 	 * was not reallocated as we acquired the reference we have to
531 	 * check that our request remains the active request across
532 	 * the lookup, in the same manner as a seqlock. The visibility
533 	 * of the pointer versus the reference counting is controlled
534 	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
535 	 *
536 	 * In the middle of all that, we inspect whether the request is
537 	 * complete. Retiring is lazy so the request may be completed long
538 	 * before the active tracker is updated. Querying whether the
539 	 * request is complete is far cheaper (as it involves no locked
540 	 * instructions setting cachelines to exclusive) than acquiring
541 	 * the reference, so we do it first. The RCU read lock ensures the
542 	 * pointer dereference is valid, but does not ensure that the
543 	 * seqno nor HWS is the right one! However, if the request was
544 	 * reallocated, that means the active tracker's request was complete.
545 	 * If the new request is also complete, then both are and we can
546 	 * just report the active tracker is idle. If the new request is
547 	 * incomplete, then we acquire a reference on it and check that
548 	 * it remained the active request.
549 	 *
550 	 * It is then imperative that we do not zero the request on
551 	 * reallocation, so that we can chase the dangling pointers!
552 	 * See i915_gem_request_alloc().
553 	 */
554 	do {
555 		struct drm_i915_gem_request *request;
556 
557 		request = rcu_dereference(active->request);
558 		if (!request || i915_gem_request_completed(request))
559 			return NULL;
560 
561 		/* An especially silly compiler could decide to recompute the
562 		 * result of i915_gem_request_completed, more specifically
563 		 * re-emit the load for request->fence.seqno. A race would catch
564 		 * a later seqno value, which could flip the result from true to
565 		 * false. Which means part of the instructions below might not
566 		 * be executed, while later on instructions are executed. Due to
567 		 * barriers within the refcounting the inconsistency can't reach
568 		 * past the call to i915_gem_request_get_rcu, but not executing
569 		 * that while still executing i915_gem_request_put() creates
570 		 * havoc enough.  Prevent this with a compiler barrier.
571 		 */
572 		barrier();
573 
574 		request = i915_gem_request_get_rcu(request);
575 
576 		/* What stops the following rcu_access_pointer() from occurring
577 		 * before the above i915_gem_request_get_rcu()? If we were
578 		 * to read the value before pausing to get the reference to
579 		 * the request, we may not notice a change in the active
580 		 * tracker.
581 		 *
582 		 * The rcu_access_pointer() is a mere compiler barrier, which
583 		 * means both the CPU and compiler are free to perform the
584 		 * memory read without constraint. The compiler only has to
585 		 * ensure that any operations after the rcu_access_pointer()
586 		 * occur afterwards in program order. This means the read may
587 		 * be performed earlier by an out-of-order CPU, or adventurous
588 		 * compiler.
589 		 *
590 		 * The atomic operation at the heart of
591 		 * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
592 		 * atomic_inc_not_zero() which is only a full memory barrier
593 		 * when successful. That is, if i915_gem_request_get_rcu()
594 		 * returns the request (and so with the reference counted
595 		 * incremented) then the following read for rcu_access_pointer()
596 		 * must occur after the atomic operation and so confirm
597 		 * that this request is the one currently being tracked.
598 		 *
599 		 * The corresponding write barrier is part of
600 		 * rcu_assign_pointer().
601 		 */
602 		if (!request || request == rcu_access_pointer(active->request))
603 			return rcu_pointer_handoff(request);
604 
605 		i915_gem_request_put(request);
606 	} while (1);
607 }
608 
609 /**
610  * i915_gem_active_get_unlocked - return a reference to the active request
611  * @active - the active tracker
612  *
613  * i915_gem_active_get_unlocked() returns a reference to the active request,
614  * or NULL if the active tracker is idle. The reference is obtained under RCU,
615  * so no locking is required by the caller.
616  *
617  * The reference should be freed with i915_gem_request_put().
618  */
619 static inline struct drm_i915_gem_request *
620 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
621 {
622 	struct drm_i915_gem_request *request;
623 
624 	rcu_read_lock();
625 	request = __i915_gem_active_get_rcu(active);
626 	rcu_read_unlock();
627 
628 	return request;
629 }
630 
631 /**
632  * i915_gem_active_isset - report whether the active tracker is assigned
633  * @active - the active tracker
634  *
635  * i915_gem_active_isset() returns true if the active tracker is currently
636  * assigned to a request. Due to the lazy retiring, that request may be idle
637  * and this may report stale information.
638  */
639 static inline bool
640 i915_gem_active_isset(const struct i915_gem_active *active)
641 {
642 	return rcu_access_pointer(active->request);
643 }
644 
645 /**
646  * i915_gem_active_wait - waits until the request is completed
647  * @active - the active request on which to wait
648  * @flags - how to wait
649  * @timeout - how long to wait at most
650  * @rps - userspace client to charge for a waitboost
651  *
652  * i915_gem_active_wait() waits until the request is completed before
653  * returning, without requiring any locks to be held. Note that it does not
654  * retire any requests before returning.
655  *
656  * This function relies on RCU in order to acquire the reference to the active
657  * request without holding any locks. See __i915_gem_active_get_rcu() for the
658  * glory details on how that is managed. Once the reference is acquired, we
659  * can then wait upon the request, and afterwards release our reference,
660  * free of any locking.
661  *
662  * This function wraps i915_wait_request(), see it for the full details on
663  * the arguments.
664  *
665  * Returns 0 if successful, or a negative error code.
666  */
667 static inline int
668 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
669 {
670 	struct drm_i915_gem_request *request;
671 	long ret = 0;
672 
673 	request = i915_gem_active_get_unlocked(active);
674 	if (request) {
675 		ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
676 		i915_gem_request_put(request);
677 	}
678 
679 	return ret < 0 ? ret : 0;
680 }
681 
682 /**
683  * i915_gem_active_retire - waits until the request is retired
684  * @active - the active request on which to wait
685  *
686  * i915_gem_active_retire() waits until the request is completed,
687  * and then ensures that at least the retirement handler for this
688  * @active tracker is called before returning. If the @active
689  * tracker is idle, the function returns immediately.
690  */
691 static inline int __must_check
692 i915_gem_active_retire(struct i915_gem_active *active,
693 		       struct lock *mutex)
694 {
695 	struct drm_i915_gem_request *request;
696 	long ret;
697 
698 	request = i915_gem_active_raw(active, mutex);
699 	if (!request)
700 		return 0;
701 
702 	ret = i915_wait_request(request,
703 				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
704 				MAX_SCHEDULE_TIMEOUT);
705 	if (ret < 0)
706 		return ret;
707 
708 	list_del_init(&active->link);
709 	RCU_INIT_POINTER(active->request, NULL);
710 
711 	active->retire(active, request);
712 
713 	return 0;
714 }
715 
716 #define for_each_active(mask, idx) \
717 	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
718 
719 #endif /* I915_GEM_REQUEST_H */
720