xref: /dragonfly/sys/dev/drm/i915/i915_gem_request.h (revision 4be47400)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef I915_GEM_REQUEST_H
26 #define I915_GEM_REQUEST_H
27 
28 #include <linux/dma-fence.h>
29 
30 #include "i915_gem.h"
31 #include "i915_sw_fence.h"
32 
33 struct drm_file;
34 struct drm_i915_gem_object;
35 
36 struct intel_wait {
37 	struct rb_node node;
38 	struct task_struct *tsk;
39 	u32 seqno;
40 };
41 
42 struct intel_signal_node {
43 	struct rb_node node;
44 	struct intel_wait wait;
45 };
46 
47 struct i915_dependency {
48 	struct i915_priotree *signaler;
49 	struct list_head signal_link;
50 	struct list_head wait_link;
51 	struct list_head dfs_link;
52 	unsigned long flags;
53 #define I915_DEPENDENCY_ALLOC BIT(0)
54 };
55 
56 /* Requests exist in a complex web of interdependencies. Each request
57  * has to wait for some other request to complete before it is ready to be run
58  * (e.g. we have to wait until the pixels have been rendering into a texture
59  * before we can copy from it). We track the readiness of a request in terms
60  * of fences, but we also need to keep the dependency tree for the lifetime
61  * of the request (beyond the life of an individual fence). We use the tree
62  * at various points to reorder the requests whilst keeping the requests
63  * in order with respect to their various dependencies.
64  */
65 struct i915_priotree {
66 	struct list_head signalers_list; /* those before us, we depend upon */
67 	struct list_head waiters_list; /* those after us, they depend upon us */
68 	struct rb_node node;
69 	int priority;
70 #define I915_PRIORITY_MAX 1024
71 #define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
72 };
73 
74 /**
75  * Request queue structure.
76  *
77  * The request queue allows us to note sequence numbers that have been emitted
78  * and may be associated with active buffers to be retired.
79  *
80  * By keeping this list, we can avoid having to do questionable sequence
81  * number comparisons on buffer last_read|write_seqno. It also allows an
82  * emission time to be associated with the request for tracking how far ahead
83  * of the GPU the submission is.
84  *
85  * When modifying this structure be very aware that we perform a lockless
86  * RCU lookup of it that may race against reallocation of the struct
87  * from the slab freelist. We intentionally do not zero the structure on
88  * allocation so that the lookup can use the dangling pointers (and is
89  * cogniscent that those pointers may be wrong). Instead, everything that
90  * needs to be initialised must be done so explicitly.
91  *
92  * The requests are reference counted.
93  */
94 struct drm_i915_gem_request {
95 	struct dma_fence fence;
96 	spinlock_t lock;
97 
98 	/** On Which ring this request was generated */
99 	struct drm_i915_private *i915;
100 
101 	/**
102 	 * Context and ring buffer related to this request
103 	 * Contexts are refcounted, so when this request is associated with a
104 	 * context, we must increment the context's refcount, to guarantee that
105 	 * it persists while any request is linked to it. Requests themselves
106 	 * are also refcounted, so the request will only be freed when the last
107 	 * reference to it is dismissed, and the code in
108 	 * i915_gem_request_free() will then decrement the refcount on the
109 	 * context.
110 	 */
111 	struct i915_gem_context *ctx;
112 	struct intel_engine_cs *engine;
113 	struct intel_ring *ring;
114 	struct intel_timeline *timeline;
115 	struct intel_signal_node signaling;
116 
117 	/* Fences for the various phases in the request's lifetime.
118 	 *
119 	 * The submit fence is used to await upon all of the request's
120 	 * dependencies. When it is signaled, the request is ready to run.
121 	 * It is used by the driver to then queue the request for execution.
122 	 *
123 	 * The execute fence is used to signal when the request has been
124 	 * sent to hardware.
125 	 *
126 	 * It is illegal for the submit fence of one request to wait upon the
127 	 * execute fence of an earlier request. It should be sufficient to
128 	 * wait upon the submit fence of the earlier request.
129 	 */
130 	struct i915_sw_fence submit;
131 	struct i915_sw_fence execute;
132 	wait_queue_t submitq;
133 	wait_queue_t execq;
134 
135 	/* A list of everyone we wait upon, and everyone who waits upon us.
136 	 * Even though we will not be submitted to the hardware before the
137 	 * submit fence is signaled (it waits for all external events as well
138 	 * as our own requests), the scheduler still needs to know the
139 	 * dependency tree for the lifetime of the request (from execbuf
140 	 * to retirement), i.e. bidirectional dependency information for the
141 	 * request not tied to individual fences.
142 	 */
143 	struct i915_priotree priotree;
144 	struct i915_dependency dep;
145 
146 	u32 global_seqno;
147 
148 	/** GEM sequence number associated with the previous request,
149 	 * when the HWS breadcrumb is equal to this the GPU is processing
150 	 * this request.
151 	 */
152 	u32 previous_seqno;
153 
154 	/** Position in the ring of the start of the request */
155 	u32 head;
156 
157 	/**
158 	 * Position in the ring of the start of the postfix.
159 	 * This is required to calculate the maximum available ring space
160 	 * without overwriting the postfix.
161 	 */
162 	u32 postfix;
163 
164 	/** Position in the ring of the end of the whole request */
165 	u32 tail;
166 
167 	/** Position in the ring of the end of any workarounds after the tail */
168 	u32 wa_tail;
169 
170 	/** Preallocate space in the ring for the emitting the request */
171 	u32 reserved_space;
172 
173 	/**
174 	 * Context related to the previous request.
175 	 * As the contexts are accessed by the hardware until the switch is
176 	 * completed to a new context, the hardware may still be writing
177 	 * to the context object after the breadcrumb is visible. We must
178 	 * not unpin/unbind/prune that object whilst still active and so
179 	 * we keep the previous context pinned until the following (this)
180 	 * request is retired.
181 	 */
182 	struct i915_gem_context *previous_context;
183 
184 	/** Batch buffer related to this request if any (used for
185 	 * error state dump only).
186 	 */
187 	struct i915_vma *batch;
188 	struct list_head active_list;
189 
190 	/** Time at which this request was emitted, in jiffies. */
191 	unsigned long emitted_jiffies;
192 
193 	/** engine->request_list entry for this request */
194 	struct list_head link;
195 
196 	/** ring->request_list entry for this request */
197 	struct list_head ring_link;
198 
199 	struct drm_i915_file_private *file_priv;
200 	/** file_priv list entry for this request */
201 	struct list_head client_list;
202 };
203 
204 extern const struct dma_fence_ops i915_fence_ops;
205 
206 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
207 {
208 	return fence->ops == &i915_fence_ops;
209 }
210 
211 struct drm_i915_gem_request * __must_check
212 i915_gem_request_alloc(struct intel_engine_cs *engine,
213 		       struct i915_gem_context *ctx);
214 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
215 				   struct drm_file *file);
216 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
217 
218 static inline struct drm_i915_gem_request *
219 to_request(struct dma_fence *fence)
220 {
221 	/* We assume that NULL fence/request are interoperable */
222 	BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
223 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
224 	return container_of(fence, struct drm_i915_gem_request, fence);
225 }
226 
227 static inline struct drm_i915_gem_request *
228 i915_gem_request_get(struct drm_i915_gem_request *req)
229 {
230 	return to_request(dma_fence_get(&req->fence));
231 }
232 
233 static inline struct drm_i915_gem_request *
234 i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
235 {
236 	return to_request(dma_fence_get_rcu(&req->fence));
237 }
238 
239 static inline void
240 i915_gem_request_put(struct drm_i915_gem_request *req)
241 {
242 	dma_fence_put(&req->fence);
243 }
244 
245 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
246 					   struct drm_i915_gem_request *src)
247 {
248 	if (src)
249 		i915_gem_request_get(src);
250 
251 	if (*pdst)
252 		i915_gem_request_put(*pdst);
253 
254 	*pdst = src;
255 }
256 
257 int
258 i915_gem_request_await_object(struct drm_i915_gem_request *to,
259 			      struct drm_i915_gem_object *obj,
260 			      bool write);
261 int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
262 				     struct dma_fence *fence);
263 
264 void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
265 #define i915_add_request(req) \
266 	__i915_add_request(req, true)
267 #define i915_add_request_no_flush(req) \
268 	__i915_add_request(req, false)
269 
270 void __i915_gem_request_submit(struct drm_i915_gem_request *request);
271 void i915_gem_request_submit(struct drm_i915_gem_request *request);
272 
273 struct intel_rps_client;
274 #define NO_WAITBOOST ERR_PTR(-1)
275 #define IS_RPS_CLIENT(p) (!IS_ERR(p))
276 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
277 
278 long i915_wait_request(struct drm_i915_gem_request *req,
279 		       unsigned int flags,
280 		       long timeout)
281 	__attribute__((nonnull(1)));
282 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
283 #define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
284 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
285 
286 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
287 
288 /**
289  * Returns true if seq1 is later than seq2.
290  */
291 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
292 {
293 	return (s32)(seq1 - seq2) >= 0;
294 }
295 
296 static inline bool
297 __i915_gem_request_started(const struct drm_i915_gem_request *req)
298 {
299 	GEM_BUG_ON(!req->global_seqno);
300 	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
301 				 req->previous_seqno);
302 }
303 
304 static inline bool
305 i915_gem_request_started(const struct drm_i915_gem_request *req)
306 {
307 	if (!req->global_seqno)
308 		return false;
309 
310 	return __i915_gem_request_started(req);
311 }
312 
313 static inline bool
314 __i915_gem_request_completed(const struct drm_i915_gem_request *req)
315 {
316 	GEM_BUG_ON(!req->global_seqno);
317 	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
318 				 req->global_seqno);
319 }
320 
321 static inline bool
322 i915_gem_request_completed(const struct drm_i915_gem_request *req)
323 {
324 	if (!req->global_seqno)
325 		return false;
326 
327 	return __i915_gem_request_completed(req);
328 }
329 
330 bool __i915_spin_request(const struct drm_i915_gem_request *request,
331 			 int state, unsigned long timeout_us);
332 static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
333 				     int state, unsigned long timeout_us)
334 {
335 	return (__i915_gem_request_started(request) &&
336 		__i915_spin_request(request, state, timeout_us));
337 }
338 
339 /* We treat requests as fences. This is not be to confused with our
340  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
341  * We use the fences to synchronize access from the CPU with activity on the
342  * GPU, for example, we should not rewrite an object's PTE whilst the GPU
343  * is reading them. We also track fences at a higher level to provide
344  * implicit synchronisation around GEM objects, e.g. set-domain will wait
345  * for outstanding GPU rendering before marking the object ready for CPU
346  * access, or a pageflip will wait until the GPU is complete before showing
347  * the frame on the scanout.
348  *
349  * In order to use a fence, the object must track the fence it needs to
350  * serialise with. For example, GEM objects want to track both read and
351  * write access so that we can perform concurrent read operations between
352  * the CPU and GPU engines, as well as waiting for all rendering to
353  * complete, or waiting for the last GPU user of a "fence register". The
354  * object then embeds a #i915_gem_active to track the most recent (in
355  * retirement order) request relevant for the desired mode of access.
356  * The #i915_gem_active is updated with i915_gem_active_set() to track the
357  * most recent fence request, typically this is done as part of
358  * i915_vma_move_to_active().
359  *
360  * When the #i915_gem_active completes (is retired), it will
361  * signal its completion to the owner through a callback as well as mark
362  * itself as idle (i915_gem_active.request == NULL). The owner
363  * can then perform any action, such as delayed freeing of an active
364  * resource including itself.
365  */
366 struct i915_gem_active;
367 
368 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
369 				   struct drm_i915_gem_request *);
370 
371 struct i915_gem_active {
372 	struct drm_i915_gem_request __rcu *request;
373 	struct list_head link;
374 	i915_gem_retire_fn retire;
375 };
376 
377 void i915_gem_retire_noop(struct i915_gem_active *,
378 			  struct drm_i915_gem_request *request);
379 
380 /**
381  * init_request_active - prepares the activity tracker for use
382  * @active - the active tracker
383  * @func - a callback when then the tracker is retired (becomes idle),
384  *         can be NULL
385  *
386  * init_request_active() prepares the embedded @active struct for use as
387  * an activity tracker, that is for tracking the last known active request
388  * associated with it. When the last request becomes idle, when it is retired
389  * after completion, the optional callback @func is invoked.
390  */
391 static inline void
392 init_request_active(struct i915_gem_active *active,
393 		    i915_gem_retire_fn retire)
394 {
395 	INIT_LIST_HEAD(&active->link);
396 	active->retire = retire ?: i915_gem_retire_noop;
397 }
398 
399 /**
400  * i915_gem_active_set - updates the tracker to watch the current request
401  * @active - the active tracker
402  * @request - the request to watch
403  *
404  * i915_gem_active_set() watches the given @request for completion. Whilst
405  * that @request is busy, the @active reports busy. When that @request is
406  * retired, the @active tracker is updated to report idle.
407  */
408 static inline void
409 i915_gem_active_set(struct i915_gem_active *active,
410 		    struct drm_i915_gem_request *request)
411 {
412 	list_move(&active->link, &request->active_list);
413 	rcu_assign_pointer(active->request, request);
414 }
415 
416 /**
417  * i915_gem_active_set_retire_fn - updates the retirement callback
418  * @active - the active tracker
419  * @fn - the routine called when the request is retired
420  * @mutex - struct_mutex used to guard retirements
421  *
422  * i915_gem_active_set_retire_fn() updates the function pointer that
423  * is called when the final request associated with the @active tracker
424  * is retired.
425  */
426 static inline void
427 i915_gem_active_set_retire_fn(struct i915_gem_active *active,
428 			      i915_gem_retire_fn fn,
429 			      struct lock *mutex)
430 {
431 	lockdep_assert_held(mutex);
432 	active->retire = fn ?: i915_gem_retire_noop;
433 }
434 
435 static inline struct drm_i915_gem_request *
436 __i915_gem_active_peek(const struct i915_gem_active *active)
437 {
438 	/* Inside the error capture (running with the driver in an unknown
439 	 * state), we want to bend the rules slightly (a lot).
440 	 *
441 	 * Work is in progress to make it safer, in the meantime this keeps
442 	 * the known issue from spamming the logs.
443 	 */
444 	return rcu_dereference_protected(active->request, 1);
445 }
446 
447 /**
448  * i915_gem_active_raw - return the active request
449  * @active - the active tracker
450  *
451  * i915_gem_active_raw() returns the current request being tracked, or NULL.
452  * It does not obtain a reference on the request for the caller, so the caller
453  * must hold struct_mutex.
454  */
455 static inline struct drm_i915_gem_request *
456 i915_gem_active_raw(const struct i915_gem_active *active, struct lock *mutex)
457 {
458 	return rcu_dereference_protected(active->request,
459 					 lockdep_is_held(mutex));
460 }
461 
462 /**
463  * i915_gem_active_peek - report the active request being monitored
464  * @active - the active tracker
465  *
466  * i915_gem_active_peek() returns the current request being tracked if
467  * still active, or NULL. It does not obtain a reference on the request
468  * for the caller, so the caller must hold struct_mutex.
469  */
470 static inline struct drm_i915_gem_request *
471 i915_gem_active_peek(const struct i915_gem_active *active, struct lock *mutex)
472 {
473 	struct drm_i915_gem_request *request;
474 
475 	request = i915_gem_active_raw(active, mutex);
476 	if (!request || i915_gem_request_completed(request))
477 		return NULL;
478 
479 	return request;
480 }
481 
482 /**
483  * i915_gem_active_get - return a reference to the active request
484  * @active - the active tracker
485  *
486  * i915_gem_active_get() returns a reference to the active request, or NULL
487  * if the active tracker is idle. The caller must hold struct_mutex.
488  */
489 static inline struct drm_i915_gem_request *
490 i915_gem_active_get(const struct i915_gem_active *active, struct lock *mutex)
491 {
492 	return i915_gem_request_get(i915_gem_active_peek(active, mutex));
493 }
494 
495 /**
496  * __i915_gem_active_get_rcu - return a reference to the active request
497  * @active - the active tracker
498  *
499  * __i915_gem_active_get() returns a reference to the active request, or NULL
500  * if the active tracker is idle. The caller must hold the RCU read lock, but
501  * the returned pointer is safe to use outside of RCU.
502  */
503 static inline struct drm_i915_gem_request *
504 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
505 {
506 	/* Performing a lockless retrieval of the active request is super
507 	 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
508 	 * slab of request objects will not be freed whilst we hold the
509 	 * RCU read lock. It does not guarantee that the request itself
510 	 * will not be freed and then *reused*. Viz,
511 	 *
512 	 * Thread A			Thread B
513 	 *
514 	 * req = active.request
515 	 *				retire(req) -> free(req);
516 	 *				(req is now first on the slab freelist)
517 	 *				active.request = NULL
518 	 *
519 	 *				req = new submission on a new object
520 	 * ref(req)
521 	 *
522 	 * To prevent the request from being reused whilst the caller
523 	 * uses it, we take a reference like normal. Whilst acquiring
524 	 * the reference we check that it is not in a destroyed state
525 	 * (refcnt == 0). That prevents the request being reallocated
526 	 * whilst the caller holds on to it. To check that the request
527 	 * was not reallocated as we acquired the reference we have to
528 	 * check that our request remains the active request across
529 	 * the lookup, in the same manner as a seqlock. The visibility
530 	 * of the pointer versus the reference counting is controlled
531 	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
532 	 *
533 	 * In the middle of all that, we inspect whether the request is
534 	 * complete. Retiring is lazy so the request may be completed long
535 	 * before the active tracker is updated. Querying whether the
536 	 * request is complete is far cheaper (as it involves no locked
537 	 * instructions setting cachelines to exclusive) than acquiring
538 	 * the reference, so we do it first. The RCU read lock ensures the
539 	 * pointer dereference is valid, but does not ensure that the
540 	 * seqno nor HWS is the right one! However, if the request was
541 	 * reallocated, that means the active tracker's request was complete.
542 	 * If the new request is also complete, then both are and we can
543 	 * just report the active tracker is idle. If the new request is
544 	 * incomplete, then we acquire a reference on it and check that
545 	 * it remained the active request.
546 	 *
547 	 * It is then imperative that we do not zero the request on
548 	 * reallocation, so that we can chase the dangling pointers!
549 	 * See i915_gem_request_alloc().
550 	 */
551 	do {
552 		struct drm_i915_gem_request *request;
553 
554 		request = rcu_dereference(active->request);
555 		if (!request || i915_gem_request_completed(request))
556 			return NULL;
557 
558 		/* An especially silly compiler could decide to recompute the
559 		 * result of i915_gem_request_completed, more specifically
560 		 * re-emit the load for request->fence.seqno. A race would catch
561 		 * a later seqno value, which could flip the result from true to
562 		 * false. Which means part of the instructions below might not
563 		 * be executed, while later on instructions are executed. Due to
564 		 * barriers within the refcounting the inconsistency can't reach
565 		 * past the call to i915_gem_request_get_rcu, but not executing
566 		 * that while still executing i915_gem_request_put() creates
567 		 * havoc enough.  Prevent this with a compiler barrier.
568 		 */
569 		barrier();
570 
571 		request = i915_gem_request_get_rcu(request);
572 
573 		/* What stops the following rcu_access_pointer() from occurring
574 		 * before the above i915_gem_request_get_rcu()? If we were
575 		 * to read the value before pausing to get the reference to
576 		 * the request, we may not notice a change in the active
577 		 * tracker.
578 		 *
579 		 * The rcu_access_pointer() is a mere compiler barrier, which
580 		 * means both the CPU and compiler are free to perform the
581 		 * memory read without constraint. The compiler only has to
582 		 * ensure that any operations after the rcu_access_pointer()
583 		 * occur afterwards in program order. This means the read may
584 		 * be performed earlier by an out-of-order CPU, or adventurous
585 		 * compiler.
586 		 *
587 		 * The atomic operation at the heart of
588 		 * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
589 		 * atomic_inc_not_zero() which is only a full memory barrier
590 		 * when successful. That is, if i915_gem_request_get_rcu()
591 		 * returns the request (and so with the reference counted
592 		 * incremented) then the following read for rcu_access_pointer()
593 		 * must occur after the atomic operation and so confirm
594 		 * that this request is the one currently being tracked.
595 		 *
596 		 * The corresponding write barrier is part of
597 		 * rcu_assign_pointer().
598 		 */
599 		if (!request || request == rcu_access_pointer(active->request))
600 			return rcu_pointer_handoff(request);
601 
602 		i915_gem_request_put(request);
603 	} while (1);
604 }
605 
606 /**
607  * i915_gem_active_get_unlocked - return a reference to the active request
608  * @active - the active tracker
609  *
610  * i915_gem_active_get_unlocked() returns a reference to the active request,
611  * or NULL if the active tracker is idle. The reference is obtained under RCU,
612  * so no locking is required by the caller.
613  *
614  * The reference should be freed with i915_gem_request_put().
615  */
616 static inline struct drm_i915_gem_request *
617 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
618 {
619 	struct drm_i915_gem_request *request;
620 
621 	rcu_read_lock();
622 	request = __i915_gem_active_get_rcu(active);
623 	rcu_read_unlock();
624 
625 	return request;
626 }
627 
628 /**
629  * i915_gem_active_isset - report whether the active tracker is assigned
630  * @active - the active tracker
631  *
632  * i915_gem_active_isset() returns true if the active tracker is currently
633  * assigned to a request. Due to the lazy retiring, that request may be idle
634  * and this may report stale information.
635  */
636 static inline bool
637 i915_gem_active_isset(const struct i915_gem_active *active)
638 {
639 	return rcu_access_pointer(active->request);
640 }
641 
642 /**
643  * i915_gem_active_wait - waits until the request is completed
644  * @active - the active request on which to wait
645  * @flags - how to wait
646  * @timeout - how long to wait at most
647  * @rps - userspace client to charge for a waitboost
648  *
649  * i915_gem_active_wait() waits until the request is completed before
650  * returning, without requiring any locks to be held. Note that it does not
651  * retire any requests before returning.
652  *
653  * This function relies on RCU in order to acquire the reference to the active
654  * request without holding any locks. See __i915_gem_active_get_rcu() for the
655  * glory details on how that is managed. Once the reference is acquired, we
656  * can then wait upon the request, and afterwards release our reference,
657  * free of any locking.
658  *
659  * This function wraps i915_wait_request(), see it for the full details on
660  * the arguments.
661  *
662  * Returns 0 if successful, or a negative error code.
663  */
664 static inline int
665 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
666 {
667 	struct drm_i915_gem_request *request;
668 	long ret = 0;
669 
670 	request = i915_gem_active_get_unlocked(active);
671 	if (request) {
672 		ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
673 		i915_gem_request_put(request);
674 	}
675 
676 	return ret < 0 ? ret : 0;
677 }
678 
679 /**
680  * i915_gem_active_retire - waits until the request is retired
681  * @active - the active request on which to wait
682  *
683  * i915_gem_active_retire() waits until the request is completed,
684  * and then ensures that at least the retirement handler for this
685  * @active tracker is called before returning. If the @active
686  * tracker is idle, the function returns immediately.
687  */
688 static inline int __must_check
689 i915_gem_active_retire(struct i915_gem_active *active,
690 		       struct lock *mutex)
691 {
692 	struct drm_i915_gem_request *request;
693 	long ret;
694 
695 	request = i915_gem_active_raw(active, mutex);
696 	if (!request)
697 		return 0;
698 
699 	ret = i915_wait_request(request,
700 				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
701 				MAX_SCHEDULE_TIMEOUT);
702 	if (ret < 0)
703 		return ret;
704 
705 	list_del_init(&active->link);
706 	RCU_INIT_POINTER(active->request, NULL);
707 
708 	active->retire(active, request);
709 
710 	return 0;
711 }
712 
713 #define for_each_active(mask, idx) \
714 	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
715 
716 #endif /* I915_GEM_REQUEST_H */
717