1 /* $NetBSD: i915_request.h,v 1.4 2021/12/19 11:36:17 riastradh Exp $ */
2
3 /*
4 * Copyright © 2008-2018 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 */
26
27 #ifndef I915_REQUEST_H
28 #define I915_REQUEST_H
29
30 #include <linux/dma-fence.h>
31 #include <linux/lockdep.h>
32
33 #include "gem/i915_gem_context_types.h"
34 #include "gt/intel_context_types.h"
35 #include "gt/intel_engine_types.h"
36 #include "gt/intel_timeline_types.h"
37
38 #include "i915_gem.h"
39 #include "i915_scheduler.h"
40 #include "i915_selftest.h"
41 #include "i915_sw_fence.h"
42
43 #include <uapi/drm/i915_drm.h>
44
45 struct drm_file;
46 struct drm_i915_gem_object;
47 struct i915_request;
48
49 struct i915_capture_list {
50 struct i915_capture_list *next;
51 struct i915_vma *vma;
52 };
53
54 #define RQ_TRACE(rq, fmt, ...) do { \
55 const struct i915_request *rq__ = (rq); \
56 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
57 rq__->fence.context, rq__->fence.seqno, \
58 hwsp_seqno(rq__), ##__VA_ARGS__); \
59 } while (0)
60
61 enum {
62 /*
63 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
64 *
65 * Set by __i915_request_submit() on handing over to HW, and cleared
66 * by __i915_request_unsubmit() if we preempt this request.
67 *
68 * Finally cleared for consistency on retiring the request, when
69 * we know the HW is no longer running this request.
70 *
71 * See i915_request_is_active()
72 */
73 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
74
75 /*
76 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
77 *
78 * Using the scheduler, when a request is ready for execution it is put
79 * into the priority queue, and removed from that queue when transferred
80 * to the HW runlists. We want to track its membership within the
81 * priority queue so that we can easily check before rescheduling.
82 *
83 * See i915_request_in_priority_queue()
84 */
85 I915_FENCE_FLAG_PQUEUE,
86
87 /*
88 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
89 *
90 * Internal bookkeeping used by the breadcrumb code to track when
91 * a request is on the various signal_list.
92 */
93 I915_FENCE_FLAG_SIGNAL,
94
95 /*
96 * I915_FENCE_FLAG_HOLD - this request is currently on hold
97 *
98 * This request has been suspended, pending an ongoing investigation.
99 */
100 I915_FENCE_FLAG_HOLD,
101
102 /*
103 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
104 *
105 * The execution of some requests should not be interrupted. This is
106 * a sensitive operation as it makes the request super important,
107 * blocking other higher priority work. Abuse of this flag will
108 * lead to quality of service issues.
109 */
110 I915_FENCE_FLAG_NOPREEMPT,
111
112 /*
113 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
114 *
115 * A high priority sentinel request may be submitted to clear the
116 * submission queue. As it will be the only request in-flight, upon
117 * execution all other active requests will have been preempted and
118 * unsubmitted. This preemptive pulse is used to re-evaluate the
119 * in-flight requests, particularly in cases where an active context
120 * is banned and those active requests need to be cancelled.
121 */
122 I915_FENCE_FLAG_SENTINEL,
123
124 /*
125 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
126 *
127 * Some requests are more important than others! In particular, a
128 * request that the user is waiting on is typically required for
129 * interactive latency, for which we want to minimise by upclocking
130 * the GPU. Here we track such boost requests on a per-request basis.
131 */
132 I915_FENCE_FLAG_BOOST,
133 };
134
135 /**
136 * Request queue structure.
137 *
138 * The request queue allows us to note sequence numbers that have been emitted
139 * and may be associated with active buffers to be retired.
140 *
141 * By keeping this list, we can avoid having to do questionable sequence
142 * number comparisons on buffer last_read|write_seqno. It also allows an
143 * emission time to be associated with the request for tracking how far ahead
144 * of the GPU the submission is.
145 *
146 * When modifying this structure be very aware that we perform a lockless
147 * RCU lookup of it that may race against reallocation of the struct
148 * from the slab freelist. We intentionally do not zero the structure on
149 * allocation so that the lookup can use the dangling pointers (and is
150 * cogniscent that those pointers may be wrong). Instead, everything that
151 * needs to be initialised must be done so explicitly.
152 *
153 * The requests are reference counted.
154 */
155 struct i915_request {
156 struct dma_fence fence;
157 spinlock_t lock;
158
159 /** On Which ring this request was generated */
160 struct drm_i915_private *i915;
161
162 /**
163 * Context and ring buffer related to this request
164 * Contexts are refcounted, so when this request is associated with a
165 * context, we must increment the context's refcount, to guarantee that
166 * it persists while any request is linked to it. Requests themselves
167 * are also refcounted, so the request will only be freed when the last
168 * reference to it is dismissed, and the code in
169 * i915_request_free() will then decrement the refcount on the
170 * context.
171 */
172 struct intel_engine_cs *engine;
173 struct intel_context *context;
174 struct intel_ring *ring;
175 struct intel_timeline __rcu *timeline;
176 struct list_head signal_link;
177
178 /*
179 * The rcu epoch of when this request was allocated. Used to judiciously
180 * apply backpressure on future allocations to ensure that under
181 * mempressure there is sufficient RCU ticks for us to reclaim our
182 * RCU protected slabs.
183 */
184 unsigned long rcustate;
185
186 /*
187 * We pin the timeline->mutex while constructing the request to
188 * ensure that no caller accidentally drops it during construction.
189 * The timeline->mutex must be held to ensure that only this caller
190 * can use the ring and manipulate the associated timeline during
191 * construction.
192 */
193 struct pin_cookie cookie;
194
195 /*
196 * Fences for the various phases in the request's lifetime.
197 *
198 * The submit fence is used to await upon all of the request's
199 * dependencies. When it is signaled, the request is ready to run.
200 * It is used by the driver to then queue the request for execution.
201 */
202 struct i915_sw_fence submit;
203 union {
204 #ifdef __NetBSD__ /* XXX */
205 struct i915_sw_fence_waiter submitq;
206 #else
207 wait_queue_entry_t submitq;
208 #endif
209 struct i915_sw_dma_fence_cb dmaq;
210 struct i915_request_duration_cb {
211 struct dma_fence_cb cb;
212 ktime_t emitted;
213 } duration;
214 };
215 struct list_head execute_cb;
216 struct i915_sw_fence semaphore;
217
218 /*
219 * A list of everyone we wait upon, and everyone who waits upon us.
220 * Even though we will not be submitted to the hardware before the
221 * submit fence is signaled (it waits for all external events as well
222 * as our own requests), the scheduler still needs to know the
223 * dependency tree for the lifetime of the request (from execbuf
224 * to retirement), i.e. bidirectional dependency information for the
225 * request not tied to individual fences.
226 */
227 struct i915_sched_node sched;
228 struct i915_dependency dep;
229 intel_engine_mask_t execution_mask;
230
231 /*
232 * A convenience pointer to the current breadcrumb value stored in
233 * the HW status page (or our timeline's local equivalent). The full
234 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
235 */
236 const u32 *hwsp_seqno;
237
238 /*
239 * If we need to access the timeline's seqno for this request in
240 * another request, we need to keep a read reference to this associated
241 * cacheline, so that we do not free and recycle it before the foreign
242 * observers have completed. Hence, we keep a pointer to the cacheline
243 * inside the timeline's HWSP vma, but it is only valid while this
244 * request has not completed and guarded by the timeline mutex.
245 */
246 struct intel_timeline_cacheline __rcu *hwsp_cacheline;
247
248 /** Position in the ring of the start of the request */
249 u32 head;
250
251 /** Position in the ring of the start of the user packets */
252 u32 infix;
253
254 /**
255 * Position in the ring of the start of the postfix.
256 * This is required to calculate the maximum available ring space
257 * without overwriting the postfix.
258 */
259 u32 postfix;
260
261 /** Position in the ring of the end of the whole request */
262 u32 tail;
263
264 /** Position in the ring of the end of any workarounds after the tail */
265 u32 wa_tail;
266
267 /** Preallocate space in the ring for the emitting the request */
268 u32 reserved_space;
269
270 /** Batch buffer related to this request if any (used for
271 * error state dump only).
272 */
273 struct i915_vma *batch;
274 /**
275 * Additional buffers requested by userspace to be captured upon
276 * a GPU hang. The vma/obj on this list are protected by their
277 * active reference - all objects on this list must also be
278 * on the active_list (of their final request).
279 */
280 struct i915_capture_list *capture_list;
281
282 /** Time at which this request was emitted, in jiffies. */
283 unsigned long emitted_jiffies;
284
285 /** timeline->request entry for this request */
286 struct list_head link;
287
288 struct drm_i915_file_private *file_priv;
289 /** file_priv list entry for this request */
290 struct list_head client_link;
291
292 I915_SELFTEST_DECLARE(struct {
293 struct list_head link;
294 unsigned long delay;
295 } mock;)
296 };
297
298 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
299
300 extern const struct dma_fence_ops i915_fence_ops;
301
dma_fence_is_i915(const struct dma_fence * fence)302 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
303 {
304 return fence->ops == &i915_fence_ops;
305 }
306
307 struct i915_request * __must_check
308 __i915_request_create(struct intel_context *ce, gfp_t gfp);
309 struct i915_request * __must_check
310 i915_request_create(struct intel_context *ce);
311
312 struct i915_request *__i915_request_commit(struct i915_request *request);
313 void __i915_request_queue(struct i915_request *rq,
314 const struct i915_sched_attr *attr);
315
316 bool i915_request_retire(struct i915_request *rq);
317 void i915_request_retire_upto(struct i915_request *rq);
318
319 static inline struct i915_request *
to_request(struct dma_fence * fence)320 to_request(struct dma_fence *fence)
321 {
322 /* We assume that NULL fence/request are interoperable */
323 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
324 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
325 return container_of(fence, struct i915_request, fence);
326 }
327
328 static inline struct i915_request *
i915_request_get(struct i915_request * rq)329 i915_request_get(struct i915_request *rq)
330 {
331 return to_request(dma_fence_get(&rq->fence));
332 }
333
334 static inline struct i915_request *
i915_request_get_rcu(struct i915_request * rq)335 i915_request_get_rcu(struct i915_request *rq)
336 {
337 return to_request(dma_fence_get_rcu(&rq->fence));
338 }
339
340 static inline void
i915_request_put(struct i915_request * rq)341 i915_request_put(struct i915_request *rq)
342 {
343 dma_fence_put(&rq->fence);
344 }
345
346 int i915_request_await_object(struct i915_request *to,
347 struct drm_i915_gem_object *obj,
348 bool write);
349 int i915_request_await_dma_fence(struct i915_request *rq,
350 struct dma_fence *fence);
351 int i915_request_await_execution(struct i915_request *rq,
352 struct dma_fence *fence,
353 void (*hook)(struct i915_request *rq,
354 struct dma_fence *signal));
355
356 void i915_request_add(struct i915_request *rq);
357
358 bool __i915_request_submit(struct i915_request *request);
359 void i915_request_submit(struct i915_request *request);
360
361 void i915_request_skip(struct i915_request *request, int error);
362
363 void __i915_request_unsubmit(struct i915_request *request);
364 void i915_request_unsubmit(struct i915_request *request);
365
366 /* Note: part of the intel_breadcrumbs family */
367 bool i915_request_enable_breadcrumb(struct i915_request *request);
368 void i915_request_cancel_breadcrumb(struct i915_request *request);
369
370 long i915_request_wait(struct i915_request *rq,
371 unsigned int flags,
372 long timeout)
373 __attribute__((nonnull(1)));
374 #define I915_WAIT_INTERRUPTIBLE BIT(0)
375 #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
376 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
377
i915_request_signaled(const struct i915_request * rq)378 static inline bool i915_request_signaled(const struct i915_request *rq)
379 {
380 /* The request may live longer than its HWSP, so check flags first! */
381 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
382 }
383
i915_request_is_active(const struct i915_request * rq)384 static inline bool i915_request_is_active(const struct i915_request *rq)
385 {
386 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
387 }
388
i915_request_in_priority_queue(const struct i915_request * rq)389 static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
390 {
391 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
392 }
393
394 /**
395 * Returns true if seq1 is later than seq2.
396 */
i915_seqno_passed(u32 seq1,u32 seq2)397 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
398 {
399 return (s32)(seq1 - seq2) >= 0;
400 }
401
__hwsp_seqno(const struct i915_request * rq)402 static inline u32 __hwsp_seqno(const struct i915_request *rq)
403 {
404 return READ_ONCE(*rq->hwsp_seqno);
405 }
406
407 /**
408 * hwsp_seqno - the current breadcrumb value in the HW status page
409 * @rq: the request, to chase the relevant HW status page
410 *
411 * The emphasis in naming here is that hwsp_seqno() is not a property of the
412 * request, but an indication of the current HW state (associated with this
413 * request). Its value will change as the GPU executes more requests.
414 *
415 * Returns the current breadcrumb value in the associated HW status page (or
416 * the local timeline's equivalent) for this request. The request itself
417 * has the associated breadcrumb value of rq->fence.seqno, when the HW
418 * status page has that breadcrumb or later, this request is complete.
419 */
hwsp_seqno(const struct i915_request * rq)420 static inline u32 hwsp_seqno(const struct i915_request *rq)
421 {
422 u32 seqno;
423
424 rcu_read_lock(); /* the HWSP may be freed at runtime */
425 seqno = __hwsp_seqno(rq);
426 rcu_read_unlock();
427
428 return seqno;
429 }
430
__i915_request_has_started(const struct i915_request * rq)431 static inline bool __i915_request_has_started(const struct i915_request *rq)
432 {
433 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
434 }
435
436 /**
437 * i915_request_started - check if the request has begun being executed
438 * @rq: the request
439 *
440 * If the timeline is not using initial breadcrumbs, a request is
441 * considered started if the previous request on its timeline (i.e.
442 * context) has been signaled.
443 *
444 * If the timeline is using semaphores, it will also be emitting an
445 * "initial breadcrumb" after the semaphores are complete and just before
446 * it began executing the user payload. A request can therefore be active
447 * on the HW and not yet started as it is still busywaiting on its
448 * dependencies (via HW semaphores).
449 *
450 * If the request has started, its dependencies will have been signaled
451 * (either by fences or by semaphores) and it will have begun processing
452 * the user payload.
453 *
454 * However, even if a request has started, it may have been preempted and
455 * so no longer active, or it may have already completed.
456 *
457 * See also i915_request_is_active().
458 *
459 * Returns true if the request has begun executing the user payload, or
460 * has completed:
461 */
i915_request_started(const struct i915_request * rq)462 static inline bool i915_request_started(const struct i915_request *rq)
463 {
464 if (i915_request_signaled(rq))
465 return true;
466
467 /* Remember: started but may have since been preempted! */
468 return __i915_request_has_started(rq);
469 }
470
471 /**
472 * i915_request_is_running - check if the request may actually be executing
473 * @rq: the request
474 *
475 * Returns true if the request is currently submitted to hardware, has passed
476 * its start point (i.e. the context is setup and not busywaiting). Note that
477 * it may no longer be running by the time the function returns!
478 */
i915_request_is_running(const struct i915_request * rq)479 static inline bool i915_request_is_running(const struct i915_request *rq)
480 {
481 if (!i915_request_is_active(rq))
482 return false;
483
484 return __i915_request_has_started(rq);
485 }
486
487 /**
488 * i915_request_is_running - check if the request is ready for execution
489 * @rq: the request
490 *
491 * Upon construction, the request is instructed to wait upon various
492 * signals before it is ready to be executed by the HW. That is, we do
493 * not want to start execution and read data before it is written. In practice,
494 * this is controlled with a mixture of interrupts and semaphores. Once
495 * the submit fence is completed, the backend scheduler will place the
496 * request into its queue and from there submit it for execution. So we
497 * can detect when a request is eligible for execution (and is under control
498 * of the scheduler) by querying where it is in any of the scheduler's lists.
499 *
500 * Returns true if the request is ready for execution (it may be inflight),
501 * false otherwise.
502 */
i915_request_is_ready(const struct i915_request * rq)503 static inline bool i915_request_is_ready(const struct i915_request *rq)
504 {
505 return !list_empty(&rq->sched.link);
506 }
507
i915_request_completed(const struct i915_request * rq)508 static inline bool i915_request_completed(const struct i915_request *rq)
509 {
510 if (i915_request_signaled(rq))
511 return true;
512
513 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
514 }
515
i915_request_mark_complete(struct i915_request * rq)516 static inline void i915_request_mark_complete(struct i915_request *rq)
517 {
518 rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
519 }
520
i915_request_has_waitboost(const struct i915_request * rq)521 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
522 {
523 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
524 }
525
i915_request_has_nopreempt(const struct i915_request * rq)526 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
527 {
528 /* Preemption should only be disabled very rarely */
529 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
530 }
531
i915_request_has_sentinel(const struct i915_request * rq)532 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
533 {
534 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
535 }
536
i915_request_on_hold(const struct i915_request * rq)537 static inline bool i915_request_on_hold(const struct i915_request *rq)
538 {
539 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
540 }
541
i915_request_set_hold(struct i915_request * rq)542 static inline void i915_request_set_hold(struct i915_request *rq)
543 {
544 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
545 }
546
i915_request_clear_hold(struct i915_request * rq)547 static inline void i915_request_clear_hold(struct i915_request *rq)
548 {
549 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
550 }
551
552 static inline struct intel_timeline *
i915_request_timeline(struct i915_request * rq)553 i915_request_timeline(struct i915_request *rq)
554 {
555 /* Valid only while the request is being constructed (or retired). */
556 return rcu_dereference_protected(rq->timeline,
557 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
558 }
559
560 static inline struct i915_gem_context *
i915_request_gem_context(struct i915_request * rq)561 i915_request_gem_context(struct i915_request *rq)
562 {
563 /* Valid only while the request is being constructed (or retired). */
564 return rcu_dereference_protected(rq->context->gem_context, true);
565 }
566
567 static inline struct intel_timeline *
i915_request_active_timeline(struct i915_request * rq)568 i915_request_active_timeline(struct i915_request *rq)
569 {
570 /*
571 * When in use during submission, we are protected by a guarantee that
572 * the context/timeline is pinned and must remain pinned until after
573 * this submission.
574 */
575 return rcu_dereference_protected(rq->timeline,
576 lockdep_is_held(&rq->engine->active.lock));
577 }
578
579 #endif /* I915_REQUEST_H */
580