1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014 Intel Corporation
4 */
5
6 #include <linux/circ_buf.h>
7
8 #include "gem/i915_gem_context.h"
9 #include "gem/i915_gem_lmem.h"
10 #include "gt/gen8_engine_cs.h"
11 #include "gt/intel_breadcrumbs.h"
12 #include "gt/intel_context.h"
13 #include "gt/intel_engine_heartbeat.h"
14 #include "gt/intel_engine_pm.h"
15 #include "gt/intel_engine_regs.h"
16 #include "gt/intel_gpu_commands.h"
17 #include "gt/intel_gt.h"
18 #include "gt/intel_gt_clock_utils.h"
19 #include "gt/intel_gt_irq.h"
20 #include "gt/intel_gt_pm.h"
21 #include "gt/intel_gt_regs.h"
22 #include "gt/intel_gt_requests.h"
23 #include "gt/intel_lrc.h"
24 #include "gt/intel_lrc_reg.h"
25 #include "gt/intel_mocs.h"
26 #include "gt/intel_ring.h"
27
28 #include "intel_guc_ads.h"
29 #include "intel_guc_capture.h"
30 #include "intel_guc_print.h"
31 #include "intel_guc_submission.h"
32
33 #include "i915_drv.h"
34 #include "i915_reg.h"
35 #include "i915_irq.h"
36 #include "i915_trace.h"
37
38 /**
39 * DOC: GuC-based command submission
40 *
41 * The Scratch registers:
42 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
43 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
44 * triggers an interrupt on the GuC via another register write (0xC4C8).
45 * Firmware writes a success/fail code back to the action register after
46 * processes the request. The kernel driver polls waiting for this update and
47 * then proceeds.
48 *
49 * Command Transport buffers (CTBs):
50 * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
51 * - G2H) are a message interface between the i915 and GuC.
52 *
53 * Context registration:
54 * Before a context can be submitted it must be registered with the GuC via a
55 * H2G. A unique guc_id is associated with each context. The context is either
56 * registered at request creation time (normal operation) or at submission time
57 * (abnormal operation, e.g. after a reset).
58 *
59 * Context submission:
60 * The i915 updates the LRC tail value in memory. The i915 must enable the
61 * scheduling of the context within the GuC for the GuC to actually consider it.
62 * Therefore, the first time a disabled context is submitted we use a schedule
63 * enable H2G, while follow up submissions are done via the context submit H2G,
64 * which informs the GuC that a previously enabled context has new work
65 * available.
66 *
67 * Context unpin:
68 * To unpin a context a H2G is used to disable scheduling. When the
69 * corresponding G2H returns indicating the scheduling disable operation has
70 * completed it is safe to unpin the context. While a disable is in flight it
71 * isn't safe to resubmit the context so a fence is used to stall all future
72 * requests of that context until the G2H is returned. Because this interaction
73 * with the GuC takes a non-zero amount of time we delay the disabling of
74 * scheduling after the pin count goes to zero by a configurable period of time
75 * (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of
76 * time to resubmit something on the context before doing this costly operation.
77 * This delay is only done if the context isn't closed and the guc_id usage is
78 * less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD).
79 *
80 * Context deregistration:
81 * Before a context can be destroyed or if we steal its guc_id we must
82 * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
83 * safe to submit anything to this guc_id until the deregister completes so a
84 * fence is used to stall all requests associated with this guc_id until the
85 * corresponding G2H returns indicating the guc_id has been deregistered.
86 *
87 * submission_state.guc_ids:
88 * Unique number associated with private GuC context data passed in during
89 * context registration / submission / deregistration. 64k available. Simple ida
90 * is used for allocation.
91 *
92 * Stealing guc_ids:
93 * If no guc_ids are available they can be stolen from another context at
94 * request creation time if that context is unpinned. If a guc_id can't be found
95 * we punt this problem to the user as we believe this is near impossible to hit
96 * during normal use cases.
97 *
98 * Locking:
99 * In the GuC submission code we have 3 basic spin locks which protect
100 * everything. Details about each below.
101 *
102 * sched_engine->lock
103 * This is the submission lock for all contexts that share an i915 schedule
104 * engine (sched_engine), thus only one of the contexts which share a
105 * sched_engine can be submitting at a time. Currently only one sched_engine is
106 * used for all of GuC submission but that could change in the future.
107 *
108 * guc->submission_state.lock
109 * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
110 * list.
111 *
112 * ce->guc_state.lock
113 * Protects everything under ce->guc_state. Ensures that a context is in the
114 * correct state before issuing a H2G. e.g. We don't issue a schedule disable
115 * on a disabled context (bad idea), we don't issue a schedule enable when a
116 * schedule disable is in flight, etc... Also protects list of inflight requests
117 * on the context and the priority management state. Lock is individual to each
118 * context.
119 *
120 * Lock ordering rules:
121 * sched_engine->lock -> ce->guc_state.lock
122 * guc->submission_state.lock -> ce->guc_state.lock
123 *
124 * Reset races:
125 * When a full GT reset is triggered it is assumed that some G2H responses to
126 * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
127 * fatal as we do certain operations upon receiving a G2H (e.g. destroy
128 * contexts, release guc_ids, etc...). When this occurs we can scrub the
129 * context state and cleanup appropriately, however this is quite racey.
130 * To avoid races, the reset code must disable submission before scrubbing for
131 * the missing G2H, while the submission code must check for submission being
132 * disabled and skip sending H2Gs and updating context states when it is. Both
133 * sides must also make sure to hold the relevant locks.
134 */
135
136 /* GuC Virtual Engine */
137 struct guc_virtual_engine {
138 struct intel_engine_cs base;
139 struct intel_context context;
140 };
141
142 static struct intel_context *
143 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
144 unsigned long flags);
145
146 static struct intel_context *
147 guc_create_parallel(struct intel_engine_cs **engines,
148 unsigned int num_siblings,
149 unsigned int width);
150
151 #define GUC_REQUEST_SIZE 64 /* bytes */
152
153 /*
154 * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
155 * per the GuC submission interface. A different allocation algorithm is used
156 * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
157 * partition the guc_id space. We believe the number of multi-lrc contexts in
158 * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
159 * multi-lrc.
160 */
161 #define NUMBER_MULTI_LRC_GUC_ID(guc) \
162 ((guc)->submission_state.num_guc_ids / 16)
163
164 /*
165 * Below is a set of functions which control the GuC scheduling state which
166 * require a lock.
167 */
168 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
169 #define SCHED_STATE_DESTROYED BIT(1)
170 #define SCHED_STATE_PENDING_DISABLE BIT(2)
171 #define SCHED_STATE_BANNED BIT(3)
172 #define SCHED_STATE_ENABLED BIT(4)
173 #define SCHED_STATE_PENDING_ENABLE BIT(5)
174 #define SCHED_STATE_REGISTERED BIT(6)
175 #define SCHED_STATE_POLICY_REQUIRED BIT(7)
176 #define SCHED_STATE_CLOSED BIT(8)
177 #define SCHED_STATE_BLOCKED_SHIFT 9
178 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
179 #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
180
init_sched_state(struct intel_context * ce)181 static inline void init_sched_state(struct intel_context *ce)
182 {
183 lockdep_assert_held(&ce->guc_state.lock);
184 ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
185 }
186
187 /*
188 * Kernel contexts can have SCHED_STATE_REGISTERED after suspend.
189 * A context close can race with the submission path, so SCHED_STATE_CLOSED
190 * can be set immediately before we try to register.
191 */
192 #define SCHED_STATE_VALID_INIT \
193 (SCHED_STATE_BLOCKED_MASK | \
194 SCHED_STATE_CLOSED | \
195 SCHED_STATE_REGISTERED)
196
197 __maybe_unused
sched_state_is_init(struct intel_context * ce)198 static bool sched_state_is_init(struct intel_context *ce)
199 {
200 return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT);
201 }
202
203 static inline bool
context_wait_for_deregister_to_register(struct intel_context * ce)204 context_wait_for_deregister_to_register(struct intel_context *ce)
205 {
206 return ce->guc_state.sched_state &
207 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
208 }
209
210 static inline void
set_context_wait_for_deregister_to_register(struct intel_context * ce)211 set_context_wait_for_deregister_to_register(struct intel_context *ce)
212 {
213 lockdep_assert_held(&ce->guc_state.lock);
214 ce->guc_state.sched_state |=
215 SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
216 }
217
218 static inline void
clr_context_wait_for_deregister_to_register(struct intel_context * ce)219 clr_context_wait_for_deregister_to_register(struct intel_context *ce)
220 {
221 lockdep_assert_held(&ce->guc_state.lock);
222 ce->guc_state.sched_state &=
223 ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
224 }
225
226 static inline bool
context_destroyed(struct intel_context * ce)227 context_destroyed(struct intel_context *ce)
228 {
229 return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
230 }
231
232 static inline void
set_context_destroyed(struct intel_context * ce)233 set_context_destroyed(struct intel_context *ce)
234 {
235 lockdep_assert_held(&ce->guc_state.lock);
236 ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
237 }
238
239 static inline void
clr_context_destroyed(struct intel_context * ce)240 clr_context_destroyed(struct intel_context *ce)
241 {
242 lockdep_assert_held(&ce->guc_state.lock);
243 ce->guc_state.sched_state &= ~SCHED_STATE_DESTROYED;
244 }
245
context_pending_disable(struct intel_context * ce)246 static inline bool context_pending_disable(struct intel_context *ce)
247 {
248 return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
249 }
250
set_context_pending_disable(struct intel_context * ce)251 static inline void set_context_pending_disable(struct intel_context *ce)
252 {
253 lockdep_assert_held(&ce->guc_state.lock);
254 ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
255 }
256
clr_context_pending_disable(struct intel_context * ce)257 static inline void clr_context_pending_disable(struct intel_context *ce)
258 {
259 lockdep_assert_held(&ce->guc_state.lock);
260 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
261 }
262
context_banned(struct intel_context * ce)263 static inline bool context_banned(struct intel_context *ce)
264 {
265 return ce->guc_state.sched_state & SCHED_STATE_BANNED;
266 }
267
set_context_banned(struct intel_context * ce)268 static inline void set_context_banned(struct intel_context *ce)
269 {
270 lockdep_assert_held(&ce->guc_state.lock);
271 ce->guc_state.sched_state |= SCHED_STATE_BANNED;
272 }
273
clr_context_banned(struct intel_context * ce)274 static inline void clr_context_banned(struct intel_context *ce)
275 {
276 lockdep_assert_held(&ce->guc_state.lock);
277 ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
278 }
279
context_enabled(struct intel_context * ce)280 static inline bool context_enabled(struct intel_context *ce)
281 {
282 return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
283 }
284
set_context_enabled(struct intel_context * ce)285 static inline void set_context_enabled(struct intel_context *ce)
286 {
287 lockdep_assert_held(&ce->guc_state.lock);
288 ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
289 }
290
clr_context_enabled(struct intel_context * ce)291 static inline void clr_context_enabled(struct intel_context *ce)
292 {
293 lockdep_assert_held(&ce->guc_state.lock);
294 ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
295 }
296
context_pending_enable(struct intel_context * ce)297 static inline bool context_pending_enable(struct intel_context *ce)
298 {
299 return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
300 }
301
set_context_pending_enable(struct intel_context * ce)302 static inline void set_context_pending_enable(struct intel_context *ce)
303 {
304 lockdep_assert_held(&ce->guc_state.lock);
305 ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
306 }
307
clr_context_pending_enable(struct intel_context * ce)308 static inline void clr_context_pending_enable(struct intel_context *ce)
309 {
310 lockdep_assert_held(&ce->guc_state.lock);
311 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
312 }
313
context_registered(struct intel_context * ce)314 static inline bool context_registered(struct intel_context *ce)
315 {
316 return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
317 }
318
set_context_registered(struct intel_context * ce)319 static inline void set_context_registered(struct intel_context *ce)
320 {
321 lockdep_assert_held(&ce->guc_state.lock);
322 ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
323 }
324
clr_context_registered(struct intel_context * ce)325 static inline void clr_context_registered(struct intel_context *ce)
326 {
327 lockdep_assert_held(&ce->guc_state.lock);
328 ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
329 }
330
context_policy_required(struct intel_context * ce)331 static inline bool context_policy_required(struct intel_context *ce)
332 {
333 return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
334 }
335
set_context_policy_required(struct intel_context * ce)336 static inline void set_context_policy_required(struct intel_context *ce)
337 {
338 lockdep_assert_held(&ce->guc_state.lock);
339 ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
340 }
341
clr_context_policy_required(struct intel_context * ce)342 static inline void clr_context_policy_required(struct intel_context *ce)
343 {
344 lockdep_assert_held(&ce->guc_state.lock);
345 ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
346 }
347
context_close_done(struct intel_context * ce)348 static inline bool context_close_done(struct intel_context *ce)
349 {
350 return ce->guc_state.sched_state & SCHED_STATE_CLOSED;
351 }
352
set_context_close_done(struct intel_context * ce)353 static inline void set_context_close_done(struct intel_context *ce)
354 {
355 lockdep_assert_held(&ce->guc_state.lock);
356 ce->guc_state.sched_state |= SCHED_STATE_CLOSED;
357 }
358
context_blocked(struct intel_context * ce)359 static inline u32 context_blocked(struct intel_context *ce)
360 {
361 return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
362 SCHED_STATE_BLOCKED_SHIFT;
363 }
364
incr_context_blocked(struct intel_context * ce)365 static inline void incr_context_blocked(struct intel_context *ce)
366 {
367 lockdep_assert_held(&ce->guc_state.lock);
368
369 ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
370
371 GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
372 }
373
decr_context_blocked(struct intel_context * ce)374 static inline void decr_context_blocked(struct intel_context *ce)
375 {
376 lockdep_assert_held(&ce->guc_state.lock);
377
378 GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
379
380 ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
381 }
382
383 static struct intel_context *
request_to_scheduling_context(struct i915_request * rq)384 request_to_scheduling_context(struct i915_request *rq)
385 {
386 return intel_context_to_parent(rq->context);
387 }
388
context_guc_id_invalid(struct intel_context * ce)389 static inline bool context_guc_id_invalid(struct intel_context *ce)
390 {
391 return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
392 }
393
set_context_guc_id_invalid(struct intel_context * ce)394 static inline void set_context_guc_id_invalid(struct intel_context *ce)
395 {
396 ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
397 }
398
ce_to_guc(struct intel_context * ce)399 static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
400 {
401 return gt_to_guc(ce->engine->gt);
402 }
403
to_priolist(struct rb_node * rb)404 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
405 {
406 return rb_entry(rb, struct i915_priolist, node);
407 }
408
409 /*
410 * When using multi-lrc submission a scratch memory area is reserved in the
411 * parent's context state for the process descriptor, work queue, and handshake
412 * between the parent + children contexts to insert safe preemption points
413 * between each of the BBs. Currently the scratch area is sized to a page.
414 *
415 * The layout of this scratch area is below:
416 * 0 guc_process_desc
417 * + sizeof(struct guc_process_desc) child go
418 * + CACHELINE_BYTES child join[0]
419 * ...
420 * + CACHELINE_BYTES child join[n - 1]
421 * ... unused
422 * PARENT_SCRATCH_SIZE / 2 work queue start
423 * ... work queue
424 * PARENT_SCRATCH_SIZE - 1 work queue end
425 */
426 #define WQ_SIZE (PARENT_SCRATCH_SIZE / 2)
427 #define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
428
429 struct sync_semaphore {
430 u32 semaphore;
431 u8 unused[CACHELINE_BYTES - sizeof(u32)];
432 };
433
434 struct parent_scratch {
435 union guc_descs {
436 struct guc_sched_wq_desc wq_desc;
437 struct guc_process_desc_v69 pdesc;
438 } descs;
439
440 struct sync_semaphore go;
441 struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
442
443 u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
444 sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
445
446 u32 wq[WQ_SIZE / sizeof(u32)];
447 };
448
__get_parent_scratch_offset(struct intel_context * ce)449 static u32 __get_parent_scratch_offset(struct intel_context *ce)
450 {
451 GEM_BUG_ON(!ce->parallel.guc.parent_page);
452
453 return ce->parallel.guc.parent_page * PAGE_SIZE;
454 }
455
__get_wq_offset(struct intel_context * ce)456 static u32 __get_wq_offset(struct intel_context *ce)
457 {
458 BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
459
460 return __get_parent_scratch_offset(ce) + WQ_OFFSET;
461 }
462
463 static struct parent_scratch *
__get_parent_scratch(struct intel_context * ce)464 __get_parent_scratch(struct intel_context *ce)
465 {
466 BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
467 BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
468
469 /*
470 * Need to subtract LRC_STATE_OFFSET here as the
471 * parallel.guc.parent_page is the offset into ce->state while
472 * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
473 */
474 return (struct parent_scratch *)
475 (ce->lrc_reg_state +
476 ((__get_parent_scratch_offset(ce) -
477 LRC_STATE_OFFSET) / sizeof(u32)));
478 }
479
480 static struct guc_process_desc_v69 *
__get_process_desc_v69(struct intel_context * ce)481 __get_process_desc_v69(struct intel_context *ce)
482 {
483 struct parent_scratch *ps = __get_parent_scratch(ce);
484
485 return &ps->descs.pdesc;
486 }
487
488 static struct guc_sched_wq_desc *
__get_wq_desc_v70(struct intel_context * ce)489 __get_wq_desc_v70(struct intel_context *ce)
490 {
491 struct parent_scratch *ps = __get_parent_scratch(ce);
492
493 return &ps->descs.wq_desc;
494 }
495
get_wq_pointer(struct intel_context * ce,u32 wqi_size)496 static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
497 {
498 /*
499 * Check for space in work queue. Caching a value of head pointer in
500 * intel_context structure in order reduce the number accesses to shared
501 * GPU memory which may be across a PCIe bus.
502 */
503 #define AVAILABLE_SPACE \
504 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
505 if (wqi_size > AVAILABLE_SPACE) {
506 ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
507
508 if (wqi_size > AVAILABLE_SPACE)
509 return NULL;
510 }
511 #undef AVAILABLE_SPACE
512
513 return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
514 }
515
__get_context(struct intel_guc * guc,u32 id)516 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
517 {
518 struct intel_context *ce = xa_load(&guc->context_lookup, id);
519
520 GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID);
521
522 return ce;
523 }
524
__get_lrc_desc_v69(struct intel_guc * guc,u32 index)525 static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
526 {
527 struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
528
529 if (!base)
530 return NULL;
531
532 GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
533
534 return &base[index];
535 }
536
guc_lrc_desc_pool_create_v69(struct intel_guc * guc)537 static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
538 {
539 u32 size;
540 int ret;
541
542 size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
543 GUC_MAX_CONTEXT_ID);
544 ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
545 (void **)&guc->lrc_desc_pool_vaddr_v69);
546 if (ret)
547 return ret;
548
549 return 0;
550 }
551
guc_lrc_desc_pool_destroy_v69(struct intel_guc * guc)552 static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
553 {
554 if (!guc->lrc_desc_pool_vaddr_v69)
555 return;
556
557 guc->lrc_desc_pool_vaddr_v69 = NULL;
558 i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
559 }
560
guc_submission_initialized(struct intel_guc * guc)561 static inline bool guc_submission_initialized(struct intel_guc *guc)
562 {
563 return guc->submission_initialized;
564 }
565
_reset_lrc_desc_v69(struct intel_guc * guc,u32 id)566 static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
567 {
568 struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
569
570 if (desc)
571 memset(desc, 0, sizeof(*desc));
572 }
573
ctx_id_mapped(struct intel_guc * guc,u32 id)574 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
575 {
576 return __get_context(guc, id);
577 }
578
set_ctx_id_mapping(struct intel_guc * guc,u32 id,struct intel_context * ce)579 static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
580 struct intel_context *ce)
581 {
582 unsigned long flags;
583
584 /*
585 * xarray API doesn't have xa_save_irqsave wrapper, so calling the
586 * lower level functions directly.
587 */
588 xa_lock_irqsave(&guc->context_lookup, flags);
589 __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
590 xa_unlock_irqrestore(&guc->context_lookup, flags);
591 }
592
clr_ctx_id_mapping(struct intel_guc * guc,u32 id)593 static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
594 {
595 unsigned long flags;
596
597 if (unlikely(!guc_submission_initialized(guc)))
598 return;
599
600 _reset_lrc_desc_v69(guc, id);
601
602 /*
603 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
604 * the lower level functions directly.
605 */
606 xa_lock_irqsave(&guc->context_lookup, flags);
607 __xa_erase(&guc->context_lookup, id);
608 xa_unlock_irqrestore(&guc->context_lookup, flags);
609 }
610
decr_outstanding_submission_g2h(struct intel_guc * guc)611 static void decr_outstanding_submission_g2h(struct intel_guc *guc)
612 {
613 if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
614 wake_up_all(&guc->ct.wq);
615 }
616
guc_submission_send_busy_loop(struct intel_guc * guc,const u32 * action,u32 len,u32 g2h_len_dw,bool loop)617 static int guc_submission_send_busy_loop(struct intel_guc *guc,
618 const u32 *action,
619 u32 len,
620 u32 g2h_len_dw,
621 bool loop)
622 {
623 int ret;
624
625 /*
626 * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
627 * so we don't handle the case where we don't get a reply because we
628 * aborted the send due to the channel being busy.
629 */
630 GEM_BUG_ON(g2h_len_dw && !loop);
631
632 if (g2h_len_dw)
633 atomic_inc(&guc->outstanding_submission_g2h);
634
635 ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
636 if (ret)
637 atomic_dec(&guc->outstanding_submission_g2h);
638
639 return ret;
640 }
641
intel_guc_wait_for_pending_msg(struct intel_guc * guc,atomic_t * wait_var,bool interruptible,long timeout)642 int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
643 atomic_t *wait_var,
644 bool interruptible,
645 long timeout)
646 {
647 const int state = interruptible ?
648 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
649 DEFINE_WAIT(wait);
650
651 might_sleep();
652 GEM_BUG_ON(timeout < 0);
653
654 if (!atomic_read(wait_var))
655 return 0;
656
657 if (!timeout)
658 return -ETIME;
659
660 for (;;) {
661 prepare_to_wait(&guc->ct.wq, &wait, state);
662
663 if (!atomic_read(wait_var))
664 break;
665
666 if (signal_pending_state(state, current)) {
667 timeout = -EINTR;
668 break;
669 }
670
671 if (!timeout) {
672 timeout = -ETIME;
673 break;
674 }
675
676 timeout = io_schedule_timeout(timeout);
677 }
678 finish_wait(&guc->ct.wq, &wait);
679
680 return (timeout < 0) ? timeout : 0;
681 }
682
intel_guc_wait_for_idle(struct intel_guc * guc,long timeout)683 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
684 {
685 if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
686 return 0;
687
688 return intel_guc_wait_for_pending_msg(guc,
689 &guc->outstanding_submission_g2h,
690 true, timeout);
691 }
692
693 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
694 static int try_context_registration(struct intel_context *ce, bool loop);
695
__guc_add_request(struct intel_guc * guc,struct i915_request * rq)696 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
697 {
698 int err = 0;
699 struct intel_context *ce = request_to_scheduling_context(rq);
700 u32 action[3];
701 int len = 0;
702 u32 g2h_len_dw = 0;
703 bool enabled;
704
705 lockdep_assert_held(&rq->engine->sched_engine->lock);
706
707 /*
708 * Corner case where requests were sitting in the priority list or a
709 * request resubmitted after the context was banned.
710 */
711 if (unlikely(!intel_context_is_schedulable(ce))) {
712 i915_request_put(i915_request_mark_eio(rq));
713 intel_engine_signal_breadcrumbs(ce->engine);
714 return 0;
715 }
716
717 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
718 GEM_BUG_ON(context_guc_id_invalid(ce));
719
720 if (context_policy_required(ce)) {
721 err = guc_context_policy_init_v70(ce, false);
722 if (err)
723 return err;
724 }
725
726 spin_lock(&ce->guc_state.lock);
727
728 /*
729 * The request / context will be run on the hardware when scheduling
730 * gets enabled in the unblock. For multi-lrc we still submit the
731 * context to move the LRC tails.
732 */
733 if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
734 goto out;
735
736 enabled = context_enabled(ce) || context_blocked(ce);
737
738 if (!enabled) {
739 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
740 action[len++] = ce->guc_id.id;
741 action[len++] = GUC_CONTEXT_ENABLE;
742 set_context_pending_enable(ce);
743 intel_context_get(ce);
744 g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
745 } else {
746 action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
747 action[len++] = ce->guc_id.id;
748 }
749
750 err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
751 if (!enabled && !err) {
752 trace_intel_context_sched_enable(ce);
753 atomic_inc(&guc->outstanding_submission_g2h);
754 set_context_enabled(ce);
755
756 /*
757 * Without multi-lrc KMD does the submission step (moving the
758 * lrc tail) so enabling scheduling is sufficient to submit the
759 * context. This isn't the case in multi-lrc submission as the
760 * GuC needs to move the tails, hence the need for another H2G
761 * to submit a multi-lrc context after enabling scheduling.
762 */
763 if (intel_context_is_parent(ce)) {
764 action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
765 err = intel_guc_send_nb(guc, action, len - 1, 0);
766 }
767 } else if (!enabled) {
768 clr_context_pending_enable(ce);
769 intel_context_put(ce);
770 }
771 if (likely(!err))
772 trace_i915_request_guc_submit(rq);
773
774 out:
775 spin_unlock(&ce->guc_state.lock);
776 return err;
777 }
778
guc_add_request(struct intel_guc * guc,struct i915_request * rq)779 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
780 {
781 int ret = __guc_add_request(guc, rq);
782
783 if (unlikely(ret == -EBUSY)) {
784 guc->stalled_request = rq;
785 guc->submission_stall_reason = STALL_ADD_REQUEST;
786 }
787
788 return ret;
789 }
790
guc_set_lrc_tail(struct i915_request * rq)791 static inline void guc_set_lrc_tail(struct i915_request *rq)
792 {
793 rq->context->lrc_reg_state[CTX_RING_TAIL] =
794 intel_ring_set_tail(rq->ring, rq->tail);
795 }
796
rq_prio(const struct i915_request * rq)797 static inline int rq_prio(const struct i915_request *rq)
798 {
799 return rq->sched.attr.priority;
800 }
801
is_multi_lrc_rq(struct i915_request * rq)802 static bool is_multi_lrc_rq(struct i915_request *rq)
803 {
804 return intel_context_is_parallel(rq->context);
805 }
806
can_merge_rq(struct i915_request * rq,struct i915_request * last)807 static bool can_merge_rq(struct i915_request *rq,
808 struct i915_request *last)
809 {
810 return request_to_scheduling_context(rq) ==
811 request_to_scheduling_context(last);
812 }
813
wq_space_until_wrap(struct intel_context * ce)814 static u32 wq_space_until_wrap(struct intel_context *ce)
815 {
816 return (WQ_SIZE - ce->parallel.guc.wqi_tail);
817 }
818
write_wqi(struct intel_context * ce,u32 wqi_size)819 static void write_wqi(struct intel_context *ce, u32 wqi_size)
820 {
821 BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
822
823 /*
824 * Ensure WQI are visible before updating tail
825 */
826 intel_guc_write_barrier(ce_to_guc(ce));
827
828 ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
829 (WQ_SIZE - 1);
830 WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
831 }
832
guc_wq_noop_append(struct intel_context * ce)833 static int guc_wq_noop_append(struct intel_context *ce)
834 {
835 u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
836 u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
837
838 if (!wqi)
839 return -EBUSY;
840
841 GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
842
843 *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
844 FIELD_PREP(WQ_LEN_MASK, len_dw);
845 ce->parallel.guc.wqi_tail = 0;
846
847 return 0;
848 }
849
__guc_wq_item_append(struct i915_request * rq)850 static int __guc_wq_item_append(struct i915_request *rq)
851 {
852 struct intel_context *ce = request_to_scheduling_context(rq);
853 struct intel_context *child;
854 unsigned int wqi_size = (ce->parallel.number_children + 4) *
855 sizeof(u32);
856 u32 *wqi;
857 u32 len_dw = (wqi_size / sizeof(u32)) - 1;
858 int ret;
859
860 /* Ensure context is in correct state updating work queue */
861 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
862 GEM_BUG_ON(context_guc_id_invalid(ce));
863 GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
864 GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
865
866 /* Insert NOOP if this work queue item will wrap the tail pointer. */
867 if (wqi_size > wq_space_until_wrap(ce)) {
868 ret = guc_wq_noop_append(ce);
869 if (ret)
870 return ret;
871 }
872
873 wqi = get_wq_pointer(ce, wqi_size);
874 if (!wqi)
875 return -EBUSY;
876
877 GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
878
879 *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
880 FIELD_PREP(WQ_LEN_MASK, len_dw);
881 *wqi++ = ce->lrc.lrca;
882 *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
883 FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
884 *wqi++ = 0; /* fence_id */
885 for_each_child(ce, child)
886 *wqi++ = child->ring->tail / sizeof(u64);
887
888 write_wqi(ce, wqi_size);
889
890 return 0;
891 }
892
guc_wq_item_append(struct intel_guc * guc,struct i915_request * rq)893 static int guc_wq_item_append(struct intel_guc *guc,
894 struct i915_request *rq)
895 {
896 struct intel_context *ce = request_to_scheduling_context(rq);
897 int ret;
898
899 if (unlikely(!intel_context_is_schedulable(ce)))
900 return 0;
901
902 ret = __guc_wq_item_append(rq);
903 if (unlikely(ret == -EBUSY)) {
904 guc->stalled_request = rq;
905 guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
906 }
907
908 return ret;
909 }
910
multi_lrc_submit(struct i915_request * rq)911 static bool multi_lrc_submit(struct i915_request *rq)
912 {
913 struct intel_context *ce = request_to_scheduling_context(rq);
914
915 intel_ring_set_tail(rq->ring, rq->tail);
916
917 /*
918 * We expect the front end (execbuf IOCTL) to set this flag on the last
919 * request generated from a multi-BB submission. This indicates to the
920 * backend (GuC interface) that we should submit this context thus
921 * submitting all the requests generated in parallel.
922 */
923 return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
924 !intel_context_is_schedulable(ce);
925 }
926
guc_dequeue_one_context(struct intel_guc * guc)927 static int guc_dequeue_one_context(struct intel_guc *guc)
928 {
929 struct i915_sched_engine * const sched_engine = guc->sched_engine;
930 struct i915_request *last = NULL;
931 bool submit = false;
932 struct rb_node *rb;
933 int ret;
934
935 lockdep_assert_held(&sched_engine->lock);
936
937 if (guc->stalled_request) {
938 submit = true;
939 last = guc->stalled_request;
940
941 switch (guc->submission_stall_reason) {
942 case STALL_REGISTER_CONTEXT:
943 goto register_context;
944 case STALL_MOVE_LRC_TAIL:
945 goto move_lrc_tail;
946 case STALL_ADD_REQUEST:
947 goto add_request;
948 default:
949 MISSING_CASE(guc->submission_stall_reason);
950 }
951 }
952
953 while ((rb = rb_first_cached(&sched_engine->queue))) {
954 struct i915_priolist *p = to_priolist(rb);
955 struct i915_request *rq, *rn;
956
957 priolist_for_each_request_consume(rq, rn, p) {
958 if (last && !can_merge_rq(rq, last))
959 goto register_context;
960
961 list_del_init(&rq->sched.link);
962
963 __i915_request_submit(rq);
964
965 trace_i915_request_in(rq, 0);
966 last = rq;
967
968 if (is_multi_lrc_rq(rq)) {
969 /*
970 * We need to coalesce all multi-lrc requests in
971 * a relationship into a single H2G. We are
972 * guaranteed that all of these requests will be
973 * submitted sequentially.
974 */
975 if (multi_lrc_submit(rq)) {
976 submit = true;
977 goto register_context;
978 }
979 } else {
980 submit = true;
981 }
982 }
983
984 rb_erase_cached(&p->node, &sched_engine->queue);
985 i915_priolist_free(p);
986 }
987
988 register_context:
989 if (submit) {
990 struct intel_context *ce = request_to_scheduling_context(last);
991
992 if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
993 intel_context_is_schedulable(ce))) {
994 ret = try_context_registration(ce, false);
995 if (unlikely(ret == -EPIPE)) {
996 goto deadlk;
997 } else if (ret == -EBUSY) {
998 guc->stalled_request = last;
999 guc->submission_stall_reason =
1000 STALL_REGISTER_CONTEXT;
1001 goto schedule_tasklet;
1002 } else if (ret != 0) {
1003 GEM_WARN_ON(ret); /* Unexpected */
1004 goto deadlk;
1005 }
1006 }
1007
1008 move_lrc_tail:
1009 if (is_multi_lrc_rq(last)) {
1010 ret = guc_wq_item_append(guc, last);
1011 if (ret == -EBUSY) {
1012 goto schedule_tasklet;
1013 } else if (ret != 0) {
1014 GEM_WARN_ON(ret); /* Unexpected */
1015 goto deadlk;
1016 }
1017 } else {
1018 guc_set_lrc_tail(last);
1019 }
1020
1021 add_request:
1022 ret = guc_add_request(guc, last);
1023 if (unlikely(ret == -EPIPE)) {
1024 goto deadlk;
1025 } else if (ret == -EBUSY) {
1026 goto schedule_tasklet;
1027 } else if (ret != 0) {
1028 GEM_WARN_ON(ret); /* Unexpected */
1029 goto deadlk;
1030 }
1031 }
1032
1033 guc->stalled_request = NULL;
1034 guc->submission_stall_reason = STALL_NONE;
1035 return submit;
1036
1037 deadlk:
1038 sched_engine->tasklet.callback = NULL;
1039 tasklet_disable_nosync(&sched_engine->tasklet);
1040 return false;
1041
1042 schedule_tasklet:
1043 tasklet_schedule(&sched_engine->tasklet);
1044 return false;
1045 }
1046
guc_submission_tasklet(struct tasklet_struct * t)1047 static void guc_submission_tasklet(struct tasklet_struct *t)
1048 {
1049 struct i915_sched_engine *sched_engine =
1050 from_tasklet(sched_engine, t, tasklet);
1051 unsigned long flags;
1052 bool loop;
1053
1054 spin_lock_irqsave(&sched_engine->lock, flags);
1055
1056 do {
1057 loop = guc_dequeue_one_context(sched_engine->private_data);
1058 } while (loop);
1059
1060 i915_sched_engine_reset_on_empty(sched_engine);
1061
1062 spin_unlock_irqrestore(&sched_engine->lock, flags);
1063 }
1064
cs_irq_handler(struct intel_engine_cs * engine,u16 iir)1065 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
1066 {
1067 if (iir & GT_RENDER_USER_INTERRUPT)
1068 intel_engine_signal_breadcrumbs(engine);
1069 }
1070
1071 static void __guc_context_destroy(struct intel_context *ce);
1072 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1073 static void guc_signal_context_fence(struct intel_context *ce);
1074 static void guc_cancel_context_requests(struct intel_context *ce);
1075 static void guc_blocked_fence_complete(struct intel_context *ce);
1076
scrub_guc_desc_for_outstanding_g2h(struct intel_guc * guc)1077 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
1078 {
1079 struct intel_context *ce;
1080 unsigned long index, flags;
1081 bool pending_disable, pending_enable, deregister, destroyed, banned;
1082
1083 xa_lock_irqsave(&guc->context_lookup, flags);
1084 xa_for_each(&guc->context_lookup, index, ce) {
1085 /*
1086 * Corner case where the ref count on the object is zero but and
1087 * deregister G2H was lost. In this case we don't touch the ref
1088 * count and finish the destroy of the context.
1089 */
1090 bool do_put = kref_get_unless_zero(&ce->ref);
1091
1092 xa_unlock(&guc->context_lookup);
1093
1094 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
1095 (cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) {
1096 /* successful cancel so jump straight to close it */
1097 intel_context_sched_disable_unpin(ce);
1098 }
1099
1100 spin_lock(&ce->guc_state.lock);
1101
1102 /*
1103 * Once we are at this point submission_disabled() is guaranteed
1104 * to be visible to all callers who set the below flags (see above
1105 * flush and flushes in reset_prepare). If submission_disabled()
1106 * is set, the caller shouldn't set these flags.
1107 */
1108
1109 destroyed = context_destroyed(ce);
1110 pending_enable = context_pending_enable(ce);
1111 pending_disable = context_pending_disable(ce);
1112 deregister = context_wait_for_deregister_to_register(ce);
1113 banned = context_banned(ce);
1114 init_sched_state(ce);
1115
1116 spin_unlock(&ce->guc_state.lock);
1117
1118 if (pending_enable || destroyed || deregister) {
1119 decr_outstanding_submission_g2h(guc);
1120 if (deregister)
1121 guc_signal_context_fence(ce);
1122 if (destroyed) {
1123 intel_gt_pm_put_async_untracked(guc_to_gt(guc));
1124 release_guc_id(guc, ce);
1125 __guc_context_destroy(ce);
1126 }
1127 if (pending_enable || deregister)
1128 intel_context_put(ce);
1129 }
1130
1131 /* Not mutualy exclusive with above if statement. */
1132 if (pending_disable) {
1133 guc_signal_context_fence(ce);
1134 if (banned) {
1135 guc_cancel_context_requests(ce);
1136 intel_engine_signal_breadcrumbs(ce->engine);
1137 }
1138 intel_context_sched_disable_unpin(ce);
1139 decr_outstanding_submission_g2h(guc);
1140
1141 spin_lock(&ce->guc_state.lock);
1142 guc_blocked_fence_complete(ce);
1143 spin_unlock(&ce->guc_state.lock);
1144
1145 intel_context_put(ce);
1146 }
1147
1148 if (do_put)
1149 intel_context_put(ce);
1150 xa_lock(&guc->context_lookup);
1151 }
1152 xa_unlock_irqrestore(&guc->context_lookup, flags);
1153 }
1154
1155 /*
1156 * GuC stores busyness stats for each engine at context in/out boundaries. A
1157 * context 'in' logs execution start time, 'out' adds in -> out delta to total.
1158 * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
1159 * GuC.
1160 *
1161 * __i915_pmu_event_read samples engine busyness. When sampling, if context id
1162 * is valid (!= ~0) and start is non-zero, the engine is considered to be
1163 * active. For an active engine total busyness = total + (now - start), where
1164 * 'now' is the time at which the busyness is sampled. For inactive engine,
1165 * total busyness = total.
1166 *
1167 * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
1168 *
1169 * The start and total values provided by GuC are 32 bits and wrap around in a
1170 * few minutes. Since perf pmu provides busyness as 64 bit monotonically
1171 * increasing ns values, there is a need for this implementation to account for
1172 * overflows and extend the GuC provided values to 64 bits before returning
1173 * busyness to the user. In order to do that, a worker runs periodically at
1174 * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
1175 * 27 seconds for a gt clock frequency of 19.2 MHz).
1176 */
1177
1178 #define WRAP_TIME_CLKS U32_MAX
1179 #define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
1180
1181 static void
__extend_last_switch(struct intel_guc * guc,u64 * prev_start,u32 new_start)1182 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
1183 {
1184 u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1185 u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
1186
1187 if (new_start == lower_32_bits(*prev_start))
1188 return;
1189
1190 /*
1191 * When gt is unparked, we update the gt timestamp and start the ping
1192 * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
1193 * is unparked, all switched in contexts will have a start time that is
1194 * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
1195 *
1196 * If neither gt_stamp nor new_start has rolled over, then the
1197 * gt_stamp_hi does not need to be adjusted, however if one of them has
1198 * rolled over, we need to adjust gt_stamp_hi accordingly.
1199 *
1200 * The below conditions address the cases of new_start rollover and
1201 * gt_stamp_last rollover respectively.
1202 */
1203 if (new_start < gt_stamp_last &&
1204 (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
1205 gt_stamp_hi++;
1206
1207 if (new_start > gt_stamp_last &&
1208 (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
1209 gt_stamp_hi--;
1210
1211 *prev_start = ((u64)gt_stamp_hi << 32) | new_start;
1212 }
1213
1214 #define record_read(map_, field_) \
1215 iosys_map_rd_field(map_, 0, struct guc_engine_usage_record, field_)
1216
1217 /*
1218 * GuC updates shared memory and KMD reads it. Since this is not synchronized,
1219 * we run into a race where the value read is inconsistent. Sometimes the
1220 * inconsistency is in reading the upper MSB bytes of the last_in value when
1221 * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
1222 * 24 bits are zero. Since these are non-zero values, it is non-trivial to
1223 * determine validity of these values. Instead we read the values multiple times
1224 * until they are consistent. In test runs, 3 attempts results in consistent
1225 * values. The upper bound is set to 6 attempts and may need to be tuned as per
1226 * any new occurences.
1227 */
__get_engine_usage_record(struct intel_engine_cs * engine,u32 * last_in,u32 * id,u32 * total)1228 static void __get_engine_usage_record(struct intel_engine_cs *engine,
1229 u32 *last_in, u32 *id, u32 *total)
1230 {
1231 struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
1232 int i = 0;
1233
1234 do {
1235 *last_in = record_read(&rec_map, last_switch_in_stamp);
1236 *id = record_read(&rec_map, current_context_index);
1237 *total = record_read(&rec_map, total_runtime);
1238
1239 if (record_read(&rec_map, last_switch_in_stamp) == *last_in &&
1240 record_read(&rec_map, current_context_index) == *id &&
1241 record_read(&rec_map, total_runtime) == *total)
1242 break;
1243 } while (++i < 6);
1244 }
1245
guc_update_engine_gt_clks(struct intel_engine_cs * engine)1246 static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
1247 {
1248 struct intel_engine_guc_stats *stats = &engine->stats.guc;
1249 struct intel_guc *guc = gt_to_guc(engine->gt);
1250 u32 last_switch, ctx_id, total;
1251
1252 lockdep_assert_held(&guc->timestamp.lock);
1253
1254 __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
1255
1256 stats->running = ctx_id != ~0U && last_switch;
1257 if (stats->running)
1258 __extend_last_switch(guc, &stats->start_gt_clk, last_switch);
1259
1260 /*
1261 * Instead of adjusting the total for overflow, just add the
1262 * difference from previous sample stats->total_gt_clks
1263 */
1264 if (total && total != ~0U) {
1265 stats->total_gt_clks += (u32)(total - stats->prev_total);
1266 stats->prev_total = total;
1267 }
1268 }
1269
gpm_timestamp_shift(struct intel_gt * gt)1270 static u32 gpm_timestamp_shift(struct intel_gt *gt)
1271 {
1272 intel_wakeref_t wakeref;
1273 u32 reg, shift;
1274
1275 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
1276 reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
1277
1278 shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
1279 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
1280
1281 return 3 - shift;
1282 }
1283
guc_update_pm_timestamp(struct intel_guc * guc,ktime_t * now)1284 static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
1285 {
1286 struct intel_gt *gt = guc_to_gt(guc);
1287 u32 gt_stamp_lo, gt_stamp_hi;
1288 u64 gpm_ts;
1289
1290 lockdep_assert_held(&guc->timestamp.lock);
1291
1292 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1293 gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0,
1294 MISC_STATUS1) >> guc->timestamp.shift;
1295 gt_stamp_lo = lower_32_bits(gpm_ts);
1296 *now = ktime_get();
1297
1298 if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
1299 gt_stamp_hi++;
1300
1301 guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
1302 }
1303
1304 /*
1305 * Unlike the execlist mode of submission total and active times are in terms of
1306 * gt clocks. The *now parameter is retained to return the cpu time at which the
1307 * busyness was sampled.
1308 */
guc_engine_busyness(struct intel_engine_cs * engine,ktime_t * now)1309 static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
1310 {
1311 struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
1312 struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
1313 struct intel_gt *gt = engine->gt;
1314 struct intel_guc *guc = gt_to_guc(gt);
1315 u64 total, gt_stamp_saved;
1316 unsigned long flags;
1317 u32 reset_count;
1318 bool in_reset;
1319 intel_wakeref_t wakeref;
1320
1321 spin_lock_irqsave(&guc->timestamp.lock, flags);
1322
1323 /*
1324 * If a reset happened, we risk reading partially updated engine
1325 * busyness from GuC, so we just use the driver stored copy of busyness.
1326 * Synchronize with gt reset using reset_count and the
1327 * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
1328 * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
1329 * usable by checking the flag afterwards.
1330 */
1331 reset_count = i915_reset_count(gpu_error);
1332 in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags);
1333
1334 *now = ktime_get();
1335
1336 /*
1337 * The active busyness depends on start_gt_clk and gt_stamp.
1338 * gt_stamp is updated by i915 only when gt is awake and the
1339 * start_gt_clk is derived from GuC state. To get a consistent
1340 * view of activity, we query the GuC state only if gt is awake.
1341 */
1342 wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
1343 if (wakeref) {
1344 stats_saved = *stats;
1345 gt_stamp_saved = guc->timestamp.gt_stamp;
1346 /*
1347 * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
1348 * start_gt_clk' calculation below for active engines.
1349 */
1350 guc_update_engine_gt_clks(engine);
1351 guc_update_pm_timestamp(guc, now);
1352 intel_gt_pm_put_async(gt, wakeref);
1353 if (i915_reset_count(gpu_error) != reset_count) {
1354 *stats = stats_saved;
1355 guc->timestamp.gt_stamp = gt_stamp_saved;
1356 }
1357 }
1358
1359 total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
1360 if (stats->running) {
1361 u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
1362
1363 total += intel_gt_clock_interval_to_ns(gt, clk);
1364 }
1365
1366 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1367
1368 return ns_to_ktime(total);
1369 }
1370
guc_enable_busyness_worker(struct intel_guc * guc)1371 static void guc_enable_busyness_worker(struct intel_guc *guc)
1372 {
1373 mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay);
1374 }
1375
guc_cancel_busyness_worker(struct intel_guc * guc)1376 static void guc_cancel_busyness_worker(struct intel_guc *guc)
1377 {
1378 /*
1379 * There are many different call stacks that can get here. Some of them
1380 * hold the reset mutex. The busyness worker also attempts to acquire the
1381 * reset mutex. Synchronously flushing a worker thread requires acquiring
1382 * the worker mutex. Lockdep sees this as a conflict. It thinks that the
1383 * flush can deadlock because it holds the worker mutex while waiting for
1384 * the reset mutex, but another thread is holding the reset mutex and might
1385 * attempt to use other worker functions.
1386 *
1387 * In practice, this scenario does not exist because the busyness worker
1388 * does not block waiting for the reset mutex. It does a try-lock on it and
1389 * immediately exits if the lock is already held. Unfortunately, the mutex
1390 * in question (I915_RESET_BACKOFF) is an i915 implementation which has lockdep
1391 * annotation but not to the extent of explaining the 'might lock' is also a
1392 * 'does not need to lock'. So one option would be to add more complex lockdep
1393 * annotations to ignore the issue (if at all possible). A simpler option is to
1394 * just not flush synchronously when a rest in progress. Given that the worker
1395 * will just early exit and re-schedule itself anyway, there is no advantage
1396 * to running it immediately.
1397 *
1398 * If a reset is not in progress, then the synchronous flush may be required.
1399 * As noted many call stacks lead here, some during suspend and driver unload
1400 * which do require a synchronous flush to make sure the worker is stopped
1401 * before memory is freed.
1402 *
1403 * Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
1404 * every possible call stack is unfeasible. It would be too intrusive to many
1405 * areas that really don't care about the GuC backend. However, there is the
1406 * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
1407 * So just use those. Note that testing both is required due to the hideously
1408 * complex nature of the i915 driver's reset code paths.
1409 *
1410 * And note that in the case of a reset occurring during driver unload
1411 * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
1412 * reset flag/mutex are set) is fine because there is another explicit cancel in
1413 * intel_guc_submission_fini (when the reset flag/mutex are not).
1414 */
1415 if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
1416 test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
1417 cancel_delayed_work(&guc->timestamp.work);
1418 else
1419 cancel_delayed_work_sync(&guc->timestamp.work);
1420 }
1421
__reset_guc_busyness_stats(struct intel_guc * guc)1422 static void __reset_guc_busyness_stats(struct intel_guc *guc)
1423 {
1424 struct intel_gt *gt = guc_to_gt(guc);
1425 struct intel_engine_cs *engine;
1426 enum intel_engine_id id;
1427 unsigned long flags;
1428 ktime_t unused;
1429
1430 spin_lock_irqsave(&guc->timestamp.lock, flags);
1431
1432 guc_update_pm_timestamp(guc, &unused);
1433 for_each_engine(engine, gt, id) {
1434 guc_update_engine_gt_clks(engine);
1435 engine->stats.guc.prev_total = 0;
1436 }
1437
1438 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1439 }
1440
__update_guc_busyness_stats(struct intel_guc * guc)1441 static void __update_guc_busyness_stats(struct intel_guc *guc)
1442 {
1443 struct intel_gt *gt = guc_to_gt(guc);
1444 struct intel_engine_cs *engine;
1445 enum intel_engine_id id;
1446 unsigned long flags;
1447 ktime_t unused;
1448
1449 guc->timestamp.last_stat_jiffies = jiffies;
1450
1451 spin_lock_irqsave(&guc->timestamp.lock, flags);
1452
1453 guc_update_pm_timestamp(guc, &unused);
1454 for_each_engine(engine, gt, id)
1455 guc_update_engine_gt_clks(engine);
1456
1457 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1458 }
1459
__guc_context_update_stats(struct intel_context * ce)1460 static void __guc_context_update_stats(struct intel_context *ce)
1461 {
1462 struct intel_guc *guc = ce_to_guc(ce);
1463 unsigned long flags;
1464
1465 spin_lock_irqsave(&guc->timestamp.lock, flags);
1466 lrc_update_runtime(ce);
1467 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1468 }
1469
guc_context_update_stats(struct intel_context * ce)1470 static void guc_context_update_stats(struct intel_context *ce)
1471 {
1472 if (!intel_context_pin_if_active(ce))
1473 return;
1474
1475 __guc_context_update_stats(ce);
1476 intel_context_unpin(ce);
1477 }
1478
guc_timestamp_ping(struct work_struct * wrk)1479 static void guc_timestamp_ping(struct work_struct *wrk)
1480 {
1481 struct intel_guc *guc = container_of(wrk, typeof(*guc),
1482 timestamp.work.work);
1483 struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
1484 struct intel_gt *gt = guc_to_gt(guc);
1485 struct intel_context *ce;
1486 intel_wakeref_t wakeref;
1487 unsigned long index;
1488 int srcu, ret;
1489
1490 /*
1491 * Ideally the busyness worker should take a gt pm wakeref because the
1492 * worker only needs to be active while gt is awake. However, the
1493 * gt_park path cancels the worker synchronously and this complicates
1494 * the flow if the worker is also running at the same time. The cancel
1495 * waits for the worker and when the worker releases the wakeref, that
1496 * would call gt_park and would lead to a deadlock.
1497 *
1498 * The resolution is to take the global pm wakeref if runtime pm is
1499 * already active. If not, we don't need to update the busyness stats as
1500 * the stats would already be updated when the gt was parked.
1501 *
1502 * Note:
1503 * - We do not requeue the worker if we cannot take a reference to runtime
1504 * pm since intel_guc_busyness_unpark would requeue the worker in the
1505 * resume path.
1506 *
1507 * - If the gt was parked longer than time taken for GT timestamp to roll
1508 * over, we ignore those rollovers since we don't care about tracking
1509 * the exact GT time. We only care about roll overs when the gt is
1510 * active and running workloads.
1511 *
1512 * - There is a window of time between gt_park and runtime suspend,
1513 * where the worker may run. This is acceptable since the worker will
1514 * not find any new data to update busyness.
1515 */
1516 wakeref = intel_runtime_pm_get_if_active(>->i915->runtime_pm);
1517 if (!wakeref)
1518 return;
1519
1520 /*
1521 * Synchronize with gt reset to make sure the worker does not
1522 * corrupt the engine/guc stats. NB: can't actually block waiting
1523 * for a reset to complete as the reset requires flushing out
1524 * this worker thread if started. So waiting would deadlock.
1525 */
1526 ret = intel_gt_reset_trylock(gt, &srcu);
1527 if (ret)
1528 goto err_trylock;
1529
1530 __update_guc_busyness_stats(guc);
1531
1532 /* adjust context stats for overflow */
1533 xa_for_each(&guc->context_lookup, index, ce)
1534 guc_context_update_stats(ce);
1535
1536 intel_gt_reset_unlock(gt, srcu);
1537
1538 guc_enable_busyness_worker(guc);
1539
1540 err_trylock:
1541 intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
1542 }
1543
guc_action_enable_usage_stats(struct intel_guc * guc)1544 static int guc_action_enable_usage_stats(struct intel_guc *guc)
1545 {
1546 u32 offset = intel_guc_engine_usage_offset(guc);
1547 u32 action[] = {
1548 INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
1549 offset,
1550 0,
1551 };
1552
1553 return intel_guc_send(guc, action, ARRAY_SIZE(action));
1554 }
1555
guc_init_engine_stats(struct intel_guc * guc)1556 static int guc_init_engine_stats(struct intel_guc *guc)
1557 {
1558 struct intel_gt *gt = guc_to_gt(guc);
1559 intel_wakeref_t wakeref;
1560 int ret;
1561
1562 with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
1563 ret = guc_action_enable_usage_stats(guc);
1564
1565 if (ret)
1566 guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
1567 else
1568 guc_enable_busyness_worker(guc);
1569
1570 return ret;
1571 }
1572
guc_fini_engine_stats(struct intel_guc * guc)1573 static void guc_fini_engine_stats(struct intel_guc *guc)
1574 {
1575 guc_cancel_busyness_worker(guc);
1576 }
1577
intel_guc_busyness_park(struct intel_gt * gt)1578 void intel_guc_busyness_park(struct intel_gt *gt)
1579 {
1580 struct intel_guc *guc = gt_to_guc(gt);
1581
1582 if (!guc_submission_initialized(guc))
1583 return;
1584
1585 /*
1586 * There is a race with suspend flow where the worker runs after suspend
1587 * and causes an unclaimed register access warning. Cancel the worker
1588 * synchronously here.
1589 */
1590 guc_cancel_busyness_worker(guc);
1591
1592 /*
1593 * Before parking, we should sample engine busyness stats if we need to.
1594 * We can skip it if we are less than half a ping from the last time we
1595 * sampled the busyness stats.
1596 */
1597 if (guc->timestamp.last_stat_jiffies &&
1598 !time_after(jiffies, guc->timestamp.last_stat_jiffies +
1599 (guc->timestamp.ping_delay / 2)))
1600 return;
1601
1602 __update_guc_busyness_stats(guc);
1603 }
1604
intel_guc_busyness_unpark(struct intel_gt * gt)1605 void intel_guc_busyness_unpark(struct intel_gt *gt)
1606 {
1607 struct intel_guc *guc = gt_to_guc(gt);
1608 unsigned long flags;
1609 ktime_t unused;
1610
1611 if (!guc_submission_initialized(guc))
1612 return;
1613
1614 spin_lock_irqsave(&guc->timestamp.lock, flags);
1615 guc_update_pm_timestamp(guc, &unused);
1616 spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1617 guc_enable_busyness_worker(guc);
1618 }
1619
1620 static inline bool
submission_disabled(struct intel_guc * guc)1621 submission_disabled(struct intel_guc *guc)
1622 {
1623 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1624
1625 return unlikely(!sched_engine ||
1626 !__tasklet_is_enabled(&sched_engine->tasklet) ||
1627 intel_gt_is_wedged(guc_to_gt(guc)));
1628 }
1629
disable_submission(struct intel_guc * guc)1630 static void disable_submission(struct intel_guc *guc)
1631 {
1632 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1633
1634 if (__tasklet_is_enabled(&sched_engine->tasklet)) {
1635 GEM_BUG_ON(!guc->ct.enabled);
1636 __tasklet_disable_sync_once(&sched_engine->tasklet);
1637 sched_engine->tasklet.callback = NULL;
1638 }
1639 }
1640
enable_submission(struct intel_guc * guc)1641 static void enable_submission(struct intel_guc *guc)
1642 {
1643 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1644 unsigned long flags;
1645
1646 spin_lock_irqsave(&guc->sched_engine->lock, flags);
1647 sched_engine->tasklet.callback = guc_submission_tasklet;
1648 wmb(); /* Make sure callback visible */
1649 if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
1650 __tasklet_enable(&sched_engine->tasklet)) {
1651 GEM_BUG_ON(!guc->ct.enabled);
1652
1653 /* And kick in case we missed a new request submission. */
1654 tasklet_hi_schedule(&sched_engine->tasklet);
1655 }
1656 spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
1657 }
1658
guc_flush_submissions(struct intel_guc * guc)1659 static void guc_flush_submissions(struct intel_guc *guc)
1660 {
1661 struct i915_sched_engine * const sched_engine = guc->sched_engine;
1662 unsigned long flags;
1663
1664 spin_lock_irqsave(&sched_engine->lock, flags);
1665 spin_unlock_irqrestore(&sched_engine->lock, flags);
1666 }
1667
intel_guc_submission_flush_work(struct intel_guc * guc)1668 void intel_guc_submission_flush_work(struct intel_guc *guc)
1669 {
1670 flush_work(&guc->submission_state.destroyed_worker);
1671 }
1672
1673 static void guc_flush_destroyed_contexts(struct intel_guc *guc);
1674
intel_guc_submission_reset_prepare(struct intel_guc * guc)1675 void intel_guc_submission_reset_prepare(struct intel_guc *guc)
1676 {
1677 if (unlikely(!guc_submission_initialized(guc))) {
1678 /* Reset called during driver load? GuC not yet initialised! */
1679 return;
1680 }
1681
1682 intel_gt_park_heartbeats(guc_to_gt(guc));
1683 disable_submission(guc);
1684 guc->interrupts.disable(guc);
1685 __reset_guc_busyness_stats(guc);
1686
1687 /* Flush IRQ handler */
1688 spin_lock_irq(guc_to_gt(guc)->irq_lock);
1689 spin_unlock_irq(guc_to_gt(guc)->irq_lock);
1690
1691 guc_flush_submissions(guc);
1692 guc_flush_destroyed_contexts(guc);
1693 flush_work(&guc->ct.requests.worker);
1694
1695 scrub_guc_desc_for_outstanding_g2h(guc);
1696 }
1697
1698 static struct intel_engine_cs *
guc_virtual_get_sibling(struct intel_engine_cs * ve,unsigned int sibling)1699 guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
1700 {
1701 struct intel_engine_cs *engine;
1702 intel_engine_mask_t tmp, mask = ve->mask;
1703 unsigned int num_siblings = 0;
1704
1705 for_each_engine_masked(engine, ve->gt, mask, tmp)
1706 if (num_siblings++ == sibling)
1707 return engine;
1708
1709 return NULL;
1710 }
1711
1712 static inline struct intel_engine_cs *
__context_to_physical_engine(struct intel_context * ce)1713 __context_to_physical_engine(struct intel_context *ce)
1714 {
1715 struct intel_engine_cs *engine = ce->engine;
1716
1717 if (intel_engine_is_virtual(engine))
1718 engine = guc_virtual_get_sibling(engine, 0);
1719
1720 return engine;
1721 }
1722
guc_reset_state(struct intel_context * ce,u32 head,bool scrub)1723 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
1724 {
1725 struct intel_engine_cs *engine = __context_to_physical_engine(ce);
1726
1727 if (!intel_context_is_schedulable(ce))
1728 return;
1729
1730 GEM_BUG_ON(!intel_context_is_pinned(ce));
1731
1732 /*
1733 * We want a simple context + ring to execute the breadcrumb update.
1734 * We cannot rely on the context being intact across the GPU hang,
1735 * so clear it and rebuild just what we need for the breadcrumb.
1736 * All pending requests for this context will be zapped, and any
1737 * future request will be after userspace has had the opportunity
1738 * to recreate its own state.
1739 */
1740 if (scrub)
1741 lrc_init_regs(ce, engine, true);
1742
1743 /* Rerun the request; its payload has been neutered (if guilty). */
1744 lrc_update_regs(ce, engine, head);
1745 }
1746
guc_engine_reset_prepare(struct intel_engine_cs * engine)1747 static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
1748 {
1749 /*
1750 * Wa_22011802037: In addition to stopping the cs, we need
1751 * to wait for any pending mi force wakeups
1752 */
1753 if (intel_engine_reset_needs_wa_22011802037(engine->gt)) {
1754 intel_engine_stop_cs(engine);
1755 intel_engine_wait_for_pending_mi_fw(engine);
1756 }
1757 }
1758
guc_reset_nop(struct intel_engine_cs * engine)1759 static void guc_reset_nop(struct intel_engine_cs *engine)
1760 {
1761 }
1762
guc_rewind_nop(struct intel_engine_cs * engine,bool stalled)1763 static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
1764 {
1765 }
1766
1767 static void
__unwind_incomplete_requests(struct intel_context * ce)1768 __unwind_incomplete_requests(struct intel_context *ce)
1769 {
1770 struct i915_request *rq, *rn;
1771 struct list_head *pl;
1772 int prio = I915_PRIORITY_INVALID;
1773 struct i915_sched_engine * const sched_engine =
1774 ce->engine->sched_engine;
1775 unsigned long flags;
1776
1777 spin_lock_irqsave(&sched_engine->lock, flags);
1778 spin_lock(&ce->guc_state.lock);
1779 list_for_each_entry_safe_reverse(rq, rn,
1780 &ce->guc_state.requests,
1781 sched.link) {
1782 if (i915_request_completed(rq))
1783 continue;
1784
1785 list_del_init(&rq->sched.link);
1786 __i915_request_unsubmit(rq);
1787
1788 /* Push the request back into the queue for later resubmission. */
1789 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
1790 if (rq_prio(rq) != prio) {
1791 prio = rq_prio(rq);
1792 pl = i915_sched_lookup_priolist(sched_engine, prio);
1793 }
1794 GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
1795
1796 list_add(&rq->sched.link, pl);
1797 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1798 }
1799 spin_unlock(&ce->guc_state.lock);
1800 spin_unlock_irqrestore(&sched_engine->lock, flags);
1801 }
1802
__guc_reset_context(struct intel_context * ce,intel_engine_mask_t stalled)1803 static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
1804 {
1805 bool guilty;
1806 struct i915_request *rq;
1807 unsigned long flags;
1808 u32 head;
1809 int i, number_children = ce->parallel.number_children;
1810 struct intel_context *parent = ce;
1811
1812 GEM_BUG_ON(intel_context_is_child(ce));
1813
1814 intel_context_get(ce);
1815
1816 /*
1817 * GuC will implicitly mark the context as non-schedulable when it sends
1818 * the reset notification. Make sure our state reflects this change. The
1819 * context will be marked enabled on resubmission.
1820 */
1821 spin_lock_irqsave(&ce->guc_state.lock, flags);
1822 clr_context_enabled(ce);
1823 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1824
1825 /*
1826 * For each context in the relationship find the hanging request
1827 * resetting each context / request as needed
1828 */
1829 for (i = 0; i < number_children + 1; ++i) {
1830 if (!intel_context_is_pinned(ce))
1831 goto next_context;
1832
1833 guilty = false;
1834 rq = intel_context_get_active_request(ce);
1835 if (!rq) {
1836 head = ce->ring->tail;
1837 goto out_replay;
1838 }
1839
1840 if (i915_request_started(rq))
1841 guilty = stalled & ce->engine->mask;
1842
1843 GEM_BUG_ON(i915_active_is_idle(&ce->active));
1844 head = intel_ring_wrap(ce->ring, rq->head);
1845
1846 __i915_request_reset(rq, guilty);
1847 i915_request_put(rq);
1848 out_replay:
1849 guc_reset_state(ce, head, guilty);
1850 next_context:
1851 if (i != number_children)
1852 ce = list_next_entry(ce, parallel.child_link);
1853 }
1854
1855 __unwind_incomplete_requests(parent);
1856 intel_context_put(parent);
1857 }
1858
wake_up_all_tlb_invalidate(struct intel_guc * guc)1859 void wake_up_all_tlb_invalidate(struct intel_guc *guc)
1860 {
1861 struct intel_guc_tlb_wait *wait;
1862 unsigned long i;
1863
1864 if (!intel_guc_tlb_invalidation_is_available(guc))
1865 return;
1866
1867 xa_lock_irq(&guc->tlb_lookup);
1868 xa_for_each(&guc->tlb_lookup, i, wait)
1869 wake_up(&wait->wq);
1870 xa_unlock_irq(&guc->tlb_lookup);
1871 }
1872
intel_guc_submission_reset(struct intel_guc * guc,intel_engine_mask_t stalled)1873 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
1874 {
1875 struct intel_context *ce;
1876 unsigned long index;
1877 unsigned long flags;
1878
1879 if (unlikely(!guc_submission_initialized(guc))) {
1880 /* Reset called during driver load? GuC not yet initialised! */
1881 return;
1882 }
1883
1884 xa_lock_irqsave(&guc->context_lookup, flags);
1885 xa_for_each(&guc->context_lookup, index, ce) {
1886 if (!kref_get_unless_zero(&ce->ref))
1887 continue;
1888
1889 xa_unlock(&guc->context_lookup);
1890
1891 if (intel_context_is_pinned(ce) &&
1892 !intel_context_is_child(ce))
1893 __guc_reset_context(ce, stalled);
1894
1895 intel_context_put(ce);
1896
1897 xa_lock(&guc->context_lookup);
1898 }
1899 xa_unlock_irqrestore(&guc->context_lookup, flags);
1900
1901 /* GuC is blown away, drop all references to contexts */
1902 xa_destroy(&guc->context_lookup);
1903 }
1904
guc_cancel_context_requests(struct intel_context * ce)1905 static void guc_cancel_context_requests(struct intel_context *ce)
1906 {
1907 struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
1908 struct i915_request *rq;
1909 unsigned long flags;
1910
1911 /* Mark all executing requests as skipped. */
1912 spin_lock_irqsave(&sched_engine->lock, flags);
1913 spin_lock(&ce->guc_state.lock);
1914 list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
1915 i915_request_put(i915_request_mark_eio(rq));
1916 spin_unlock(&ce->guc_state.lock);
1917 spin_unlock_irqrestore(&sched_engine->lock, flags);
1918 }
1919
1920 static void
guc_cancel_sched_engine_requests(struct i915_sched_engine * sched_engine)1921 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
1922 {
1923 struct i915_request *rq, *rn;
1924 struct rb_node *rb;
1925 unsigned long flags;
1926
1927 /* Can be called during boot if GuC fails to load */
1928 if (!sched_engine)
1929 return;
1930
1931 /*
1932 * Before we call engine->cancel_requests(), we should have exclusive
1933 * access to the submission state. This is arranged for us by the
1934 * caller disabling the interrupt generation, the tasklet and other
1935 * threads that may then access the same state, giving us a free hand
1936 * to reset state. However, we still need to let lockdep be aware that
1937 * we know this state may be accessed in hardirq context, so we
1938 * disable the irq around this manipulation and we want to keep
1939 * the spinlock focused on its duties and not accidentally conflate
1940 * coverage to the submission's irq state. (Similarly, although we
1941 * shouldn't need to disable irq around the manipulation of the
1942 * submission's irq state, we also wish to remind ourselves that
1943 * it is irq state.)
1944 */
1945 spin_lock_irqsave(&sched_engine->lock, flags);
1946
1947 /* Flush the queued requests to the timeline list (for retiring). */
1948 while ((rb = rb_first_cached(&sched_engine->queue))) {
1949 struct i915_priolist *p = to_priolist(rb);
1950
1951 priolist_for_each_request_consume(rq, rn, p) {
1952 list_del_init(&rq->sched.link);
1953
1954 __i915_request_submit(rq);
1955
1956 i915_request_put(i915_request_mark_eio(rq));
1957 }
1958
1959 rb_erase_cached(&p->node, &sched_engine->queue);
1960 i915_priolist_free(p);
1961 }
1962
1963 /* Remaining _unready_ requests will be nop'ed when submitted */
1964
1965 sched_engine->queue_priority_hint = INT_MIN;
1966 sched_engine->queue = RB_ROOT_CACHED;
1967
1968 spin_unlock_irqrestore(&sched_engine->lock, flags);
1969 }
1970
intel_guc_submission_cancel_requests(struct intel_guc * guc)1971 void intel_guc_submission_cancel_requests(struct intel_guc *guc)
1972 {
1973 struct intel_context *ce;
1974 unsigned long index;
1975 unsigned long flags;
1976
1977 xa_lock_irqsave(&guc->context_lookup, flags);
1978 xa_for_each(&guc->context_lookup, index, ce) {
1979 if (!kref_get_unless_zero(&ce->ref))
1980 continue;
1981
1982 xa_unlock(&guc->context_lookup);
1983
1984 if (intel_context_is_pinned(ce) &&
1985 !intel_context_is_child(ce))
1986 guc_cancel_context_requests(ce);
1987
1988 intel_context_put(ce);
1989
1990 xa_lock(&guc->context_lookup);
1991 }
1992 xa_unlock_irqrestore(&guc->context_lookup, flags);
1993
1994 guc_cancel_sched_engine_requests(guc->sched_engine);
1995
1996 /* GuC is blown away, drop all references to contexts */
1997 xa_destroy(&guc->context_lookup);
1998
1999 /*
2000 * Wedged GT won't respond to any TLB invalidation request. Simply
2001 * release all the blocked waiters.
2002 */
2003 wake_up_all_tlb_invalidate(guc);
2004 }
2005
intel_guc_submission_reset_finish(struct intel_guc * guc)2006 void intel_guc_submission_reset_finish(struct intel_guc *guc)
2007 {
2008 /* Reset called during driver load or during wedge? */
2009 if (unlikely(!guc_submission_initialized(guc) ||
2010 !intel_guc_is_fw_running(guc) ||
2011 intel_gt_is_wedged(guc_to_gt(guc)))) {
2012 return;
2013 }
2014
2015 /*
2016 * Technically possible for either of these values to be non-zero here,
2017 * but very unlikely + harmless. Regardless let's add a warn so we can
2018 * see in CI if this happens frequently / a precursor to taking down the
2019 * machine.
2020 */
2021 GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
2022 atomic_set(&guc->outstanding_submission_g2h, 0);
2023
2024 intel_guc_global_policies_update(guc);
2025 enable_submission(guc);
2026 intel_gt_unpark_heartbeats(guc_to_gt(guc));
2027
2028 /*
2029 * The full GT reset will have cleared the TLB caches and flushed the
2030 * G2H message queue; we can release all the blocked waiters.
2031 */
2032 wake_up_all_tlb_invalidate(guc);
2033 }
2034
2035 static void destroyed_worker_func(struct work_struct *w);
2036 static void reset_fail_worker_func(struct work_struct *w);
2037
intel_guc_tlb_invalidation_is_available(struct intel_guc * guc)2038 bool intel_guc_tlb_invalidation_is_available(struct intel_guc *guc)
2039 {
2040 return HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915) &&
2041 intel_guc_is_ready(guc);
2042 }
2043
init_tlb_lookup(struct intel_guc * guc)2044 static int init_tlb_lookup(struct intel_guc *guc)
2045 {
2046 struct intel_guc_tlb_wait *wait;
2047 int err;
2048
2049 if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915))
2050 return 0;
2051
2052 xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC);
2053
2054 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
2055 if (!wait)
2056 return -ENOMEM;
2057
2058 init_waitqueue_head(&wait->wq);
2059
2060 /* Preallocate a shared id for use under memory pressure. */
2061 err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait,
2062 xa_limit_32b, &guc->next_seqno, GFP_KERNEL);
2063 if (err < 0) {
2064 kfree(wait);
2065 return err;
2066 }
2067
2068 return 0;
2069 }
2070
fini_tlb_lookup(struct intel_guc * guc)2071 static void fini_tlb_lookup(struct intel_guc *guc)
2072 {
2073 struct intel_guc_tlb_wait *wait;
2074
2075 if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915))
2076 return;
2077
2078 wait = xa_load(&guc->tlb_lookup, guc->serial_slot);
2079 if (wait && wait->busy)
2080 guc_err(guc, "Unexpected busy item in tlb_lookup on fini\n");
2081 kfree(wait);
2082
2083 xa_destroy(&guc->tlb_lookup);
2084 }
2085
2086 /*
2087 * Set up the memory resources to be shared with the GuC (via the GGTT)
2088 * at firmware loading time.
2089 */
intel_guc_submission_init(struct intel_guc * guc)2090 int intel_guc_submission_init(struct intel_guc *guc)
2091 {
2092 struct intel_gt *gt = guc_to_gt(guc);
2093 int ret;
2094
2095 if (guc->submission_initialized)
2096 return 0;
2097
2098 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 0, 0)) {
2099 ret = guc_lrc_desc_pool_create_v69(guc);
2100 if (ret)
2101 return ret;
2102 }
2103
2104 ret = init_tlb_lookup(guc);
2105 if (ret)
2106 goto destroy_pool;
2107
2108 guc->submission_state.guc_ids_bitmap =
2109 bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
2110 if (!guc->submission_state.guc_ids_bitmap) {
2111 ret = -ENOMEM;
2112 goto destroy_tlb;
2113 }
2114
2115 guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
2116 guc->timestamp.shift = gpm_timestamp_shift(gt);
2117 guc->submission_initialized = true;
2118
2119 return 0;
2120
2121 destroy_tlb:
2122 fini_tlb_lookup(guc);
2123 destroy_pool:
2124 guc_lrc_desc_pool_destroy_v69(guc);
2125 return ret;
2126 }
2127
intel_guc_submission_fini(struct intel_guc * guc)2128 void intel_guc_submission_fini(struct intel_guc *guc)
2129 {
2130 if (!guc->submission_initialized)
2131 return;
2132
2133 guc_fini_engine_stats(guc);
2134 guc_flush_destroyed_contexts(guc);
2135 guc_lrc_desc_pool_destroy_v69(guc);
2136 i915_sched_engine_put(guc->sched_engine);
2137 bitmap_free(guc->submission_state.guc_ids_bitmap);
2138 fini_tlb_lookup(guc);
2139 guc->submission_initialized = false;
2140 }
2141
queue_request(struct i915_sched_engine * sched_engine,struct i915_request * rq,int prio)2142 static inline void queue_request(struct i915_sched_engine *sched_engine,
2143 struct i915_request *rq,
2144 int prio)
2145 {
2146 GEM_BUG_ON(!list_empty(&rq->sched.link));
2147 list_add_tail(&rq->sched.link,
2148 i915_sched_lookup_priolist(sched_engine, prio));
2149 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
2150 tasklet_hi_schedule(&sched_engine->tasklet);
2151 }
2152
guc_bypass_tasklet_submit(struct intel_guc * guc,struct i915_request * rq)2153 static int guc_bypass_tasklet_submit(struct intel_guc *guc,
2154 struct i915_request *rq)
2155 {
2156 int ret = 0;
2157
2158 __i915_request_submit(rq);
2159
2160 trace_i915_request_in(rq, 0);
2161
2162 if (is_multi_lrc_rq(rq)) {
2163 if (multi_lrc_submit(rq)) {
2164 ret = guc_wq_item_append(guc, rq);
2165 if (!ret)
2166 ret = guc_add_request(guc, rq);
2167 }
2168 } else {
2169 guc_set_lrc_tail(rq);
2170 ret = guc_add_request(guc, rq);
2171 }
2172
2173 if (unlikely(ret == -EPIPE))
2174 disable_submission(guc);
2175
2176 return ret;
2177 }
2178
need_tasklet(struct intel_guc * guc,struct i915_request * rq)2179 static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
2180 {
2181 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
2182 struct intel_context *ce = request_to_scheduling_context(rq);
2183
2184 return submission_disabled(guc) || guc->stalled_request ||
2185 !i915_sched_engine_is_empty(sched_engine) ||
2186 !ctx_id_mapped(guc, ce->guc_id.id);
2187 }
2188
guc_submit_request(struct i915_request * rq)2189 static void guc_submit_request(struct i915_request *rq)
2190 {
2191 struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
2192 struct intel_guc *guc = gt_to_guc(rq->engine->gt);
2193 unsigned long flags;
2194
2195 /* Will be called from irq-context when using foreign fences. */
2196 spin_lock_irqsave(&sched_engine->lock, flags);
2197
2198 if (need_tasklet(guc, rq))
2199 queue_request(sched_engine, rq, rq_prio(rq));
2200 else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
2201 tasklet_hi_schedule(&sched_engine->tasklet);
2202
2203 spin_unlock_irqrestore(&sched_engine->lock, flags);
2204 }
2205
new_guc_id(struct intel_guc * guc,struct intel_context * ce)2206 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
2207 {
2208 int ret;
2209
2210 GEM_BUG_ON(intel_context_is_child(ce));
2211
2212 if (intel_context_is_parent(ce))
2213 ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
2214 NUMBER_MULTI_LRC_GUC_ID(guc),
2215 order_base_2(ce->parallel.number_children
2216 + 1));
2217 else
2218 ret = ida_alloc_range(&guc->submission_state.guc_ids,
2219 NUMBER_MULTI_LRC_GUC_ID(guc),
2220 guc->submission_state.num_guc_ids - 1,
2221 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
2222 if (unlikely(ret < 0))
2223 return ret;
2224
2225 if (!intel_context_is_parent(ce))
2226 ++guc->submission_state.guc_ids_in_use;
2227
2228 ce->guc_id.id = ret;
2229 return 0;
2230 }
2231
__release_guc_id(struct intel_guc * guc,struct intel_context * ce)2232 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
2233 {
2234 GEM_BUG_ON(intel_context_is_child(ce));
2235
2236 if (!context_guc_id_invalid(ce)) {
2237 if (intel_context_is_parent(ce)) {
2238 bitmap_release_region(guc->submission_state.guc_ids_bitmap,
2239 ce->guc_id.id,
2240 order_base_2(ce->parallel.number_children
2241 + 1));
2242 } else {
2243 --guc->submission_state.guc_ids_in_use;
2244 ida_free(&guc->submission_state.guc_ids,
2245 ce->guc_id.id);
2246 }
2247 clr_ctx_id_mapping(guc, ce->guc_id.id);
2248 set_context_guc_id_invalid(ce);
2249 }
2250 if (!list_empty(&ce->guc_id.link))
2251 list_del_init(&ce->guc_id.link);
2252 }
2253
release_guc_id(struct intel_guc * guc,struct intel_context * ce)2254 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
2255 {
2256 unsigned long flags;
2257
2258 spin_lock_irqsave(&guc->submission_state.lock, flags);
2259 __release_guc_id(guc, ce);
2260 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2261 }
2262
steal_guc_id(struct intel_guc * guc,struct intel_context * ce)2263 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
2264 {
2265 struct intel_context *cn;
2266
2267 lockdep_assert_held(&guc->submission_state.lock);
2268 GEM_BUG_ON(intel_context_is_child(ce));
2269 GEM_BUG_ON(intel_context_is_parent(ce));
2270
2271 if (!list_empty(&guc->submission_state.guc_id_list)) {
2272 cn = list_first_entry(&guc->submission_state.guc_id_list,
2273 struct intel_context,
2274 guc_id.link);
2275
2276 GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
2277 GEM_BUG_ON(context_guc_id_invalid(cn));
2278 GEM_BUG_ON(intel_context_is_child(cn));
2279 GEM_BUG_ON(intel_context_is_parent(cn));
2280
2281 list_del_init(&cn->guc_id.link);
2282 ce->guc_id.id = cn->guc_id.id;
2283
2284 spin_lock(&cn->guc_state.lock);
2285 clr_context_registered(cn);
2286 spin_unlock(&cn->guc_state.lock);
2287
2288 set_context_guc_id_invalid(cn);
2289
2290 #ifdef CONFIG_DRM_I915_SELFTEST
2291 guc->number_guc_id_stolen++;
2292 #endif
2293
2294 return 0;
2295 } else {
2296 return -EAGAIN;
2297 }
2298 }
2299
assign_guc_id(struct intel_guc * guc,struct intel_context * ce)2300 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
2301 {
2302 int ret;
2303
2304 lockdep_assert_held(&guc->submission_state.lock);
2305 GEM_BUG_ON(intel_context_is_child(ce));
2306
2307 ret = new_guc_id(guc, ce);
2308 if (unlikely(ret < 0)) {
2309 if (intel_context_is_parent(ce))
2310 return -ENOSPC;
2311
2312 ret = steal_guc_id(guc, ce);
2313 if (ret < 0)
2314 return ret;
2315 }
2316
2317 if (intel_context_is_parent(ce)) {
2318 struct intel_context *child;
2319 int i = 1;
2320
2321 for_each_child(ce, child)
2322 child->guc_id.id = ce->guc_id.id + i++;
2323 }
2324
2325 return 0;
2326 }
2327
2328 #define PIN_GUC_ID_TRIES 4
pin_guc_id(struct intel_guc * guc,struct intel_context * ce)2329 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2330 {
2331 int ret = 0;
2332 unsigned long flags, tries = PIN_GUC_ID_TRIES;
2333
2334 GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
2335
2336 try_again:
2337 spin_lock_irqsave(&guc->submission_state.lock, flags);
2338
2339 might_lock(&ce->guc_state.lock);
2340
2341 if (context_guc_id_invalid(ce)) {
2342 ret = assign_guc_id(guc, ce);
2343 if (ret)
2344 goto out_unlock;
2345 ret = 1; /* Indidcates newly assigned guc_id */
2346 }
2347 if (!list_empty(&ce->guc_id.link))
2348 list_del_init(&ce->guc_id.link);
2349 atomic_inc(&ce->guc_id.ref);
2350
2351 out_unlock:
2352 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2353
2354 /*
2355 * -EAGAIN indicates no guc_id are available, let's retire any
2356 * outstanding requests to see if that frees up a guc_id. If the first
2357 * retire didn't help, insert a sleep with the timeslice duration before
2358 * attempting to retire more requests. Double the sleep period each
2359 * subsequent pass before finally giving up. The sleep period has max of
2360 * 100ms and minimum of 1ms.
2361 */
2362 if (ret == -EAGAIN && --tries) {
2363 if (PIN_GUC_ID_TRIES - tries > 1) {
2364 unsigned int timeslice_shifted =
2365 ce->engine->props.timeslice_duration_ms <<
2366 (PIN_GUC_ID_TRIES - tries - 2);
2367 unsigned int max = min_t(unsigned int, 100,
2368 timeslice_shifted);
2369
2370 msleep(max_t(unsigned int, max, 1));
2371 }
2372 intel_gt_retire_requests(guc_to_gt(guc));
2373 goto try_again;
2374 }
2375
2376 return ret;
2377 }
2378
unpin_guc_id(struct intel_guc * guc,struct intel_context * ce)2379 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2380 {
2381 unsigned long flags;
2382
2383 GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
2384 GEM_BUG_ON(intel_context_is_child(ce));
2385
2386 if (unlikely(context_guc_id_invalid(ce) ||
2387 intel_context_is_parent(ce)))
2388 return;
2389
2390 spin_lock_irqsave(&guc->submission_state.lock, flags);
2391 if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
2392 !atomic_read(&ce->guc_id.ref))
2393 list_add_tail(&ce->guc_id.link,
2394 &guc->submission_state.guc_id_list);
2395 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2396 }
2397
__guc_action_register_multi_lrc_v69(struct intel_guc * guc,struct intel_context * ce,u32 guc_id,u32 offset,bool loop)2398 static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
2399 struct intel_context *ce,
2400 u32 guc_id,
2401 u32 offset,
2402 bool loop)
2403 {
2404 struct intel_context *child;
2405 u32 action[4 + MAX_ENGINE_INSTANCE];
2406 int len = 0;
2407
2408 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2409
2410 action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2411 action[len++] = guc_id;
2412 action[len++] = ce->parallel.number_children + 1;
2413 action[len++] = offset;
2414 for_each_child(ce, child) {
2415 offset += sizeof(struct guc_lrc_desc_v69);
2416 action[len++] = offset;
2417 }
2418
2419 return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2420 }
2421
__guc_action_register_multi_lrc_v70(struct intel_guc * guc,struct intel_context * ce,struct guc_ctxt_registration_info * info,bool loop)2422 static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
2423 struct intel_context *ce,
2424 struct guc_ctxt_registration_info *info,
2425 bool loop)
2426 {
2427 struct intel_context *child;
2428 u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
2429 int len = 0;
2430 u32 next_id;
2431
2432 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2433
2434 action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2435 action[len++] = info->flags;
2436 action[len++] = info->context_idx;
2437 action[len++] = info->engine_class;
2438 action[len++] = info->engine_submit_mask;
2439 action[len++] = info->wq_desc_lo;
2440 action[len++] = info->wq_desc_hi;
2441 action[len++] = info->wq_base_lo;
2442 action[len++] = info->wq_base_hi;
2443 action[len++] = info->wq_size;
2444 action[len++] = ce->parallel.number_children + 1;
2445 action[len++] = info->hwlrca_lo;
2446 action[len++] = info->hwlrca_hi;
2447
2448 next_id = info->context_idx + 1;
2449 for_each_child(ce, child) {
2450 GEM_BUG_ON(next_id++ != child->guc_id.id);
2451
2452 /*
2453 * NB: GuC interface supports 64 bit LRCA even though i915/HW
2454 * only supports 32 bit currently.
2455 */
2456 action[len++] = lower_32_bits(child->lrc.lrca);
2457 action[len++] = upper_32_bits(child->lrc.lrca);
2458 }
2459
2460 GEM_BUG_ON(len > ARRAY_SIZE(action));
2461
2462 return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2463 }
2464
__guc_action_register_context_v69(struct intel_guc * guc,u32 guc_id,u32 offset,bool loop)2465 static int __guc_action_register_context_v69(struct intel_guc *guc,
2466 u32 guc_id,
2467 u32 offset,
2468 bool loop)
2469 {
2470 u32 action[] = {
2471 INTEL_GUC_ACTION_REGISTER_CONTEXT,
2472 guc_id,
2473 offset,
2474 };
2475
2476 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2477 0, loop);
2478 }
2479
__guc_action_register_context_v70(struct intel_guc * guc,struct guc_ctxt_registration_info * info,bool loop)2480 static int __guc_action_register_context_v70(struct intel_guc *guc,
2481 struct guc_ctxt_registration_info *info,
2482 bool loop)
2483 {
2484 u32 action[] = {
2485 INTEL_GUC_ACTION_REGISTER_CONTEXT,
2486 info->flags,
2487 info->context_idx,
2488 info->engine_class,
2489 info->engine_submit_mask,
2490 info->wq_desc_lo,
2491 info->wq_desc_hi,
2492 info->wq_base_lo,
2493 info->wq_base_hi,
2494 info->wq_size,
2495 info->hwlrca_lo,
2496 info->hwlrca_hi,
2497 };
2498
2499 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2500 0, loop);
2501 }
2502
2503 static void prepare_context_registration_info_v69(struct intel_context *ce);
2504 static void prepare_context_registration_info_v70(struct intel_context *ce,
2505 struct guc_ctxt_registration_info *info);
2506
2507 static int
register_context_v69(struct intel_guc * guc,struct intel_context * ce,bool loop)2508 register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
2509 {
2510 u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
2511 ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
2512
2513 prepare_context_registration_info_v69(ce);
2514
2515 if (intel_context_is_parent(ce))
2516 return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
2517 offset, loop);
2518 else
2519 return __guc_action_register_context_v69(guc, ce->guc_id.id,
2520 offset, loop);
2521 }
2522
2523 static int
register_context_v70(struct intel_guc * guc,struct intel_context * ce,bool loop)2524 register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
2525 {
2526 struct guc_ctxt_registration_info info;
2527
2528 prepare_context_registration_info_v70(ce, &info);
2529
2530 if (intel_context_is_parent(ce))
2531 return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
2532 else
2533 return __guc_action_register_context_v70(guc, &info, loop);
2534 }
2535
register_context(struct intel_context * ce,bool loop)2536 static int register_context(struct intel_context *ce, bool loop)
2537 {
2538 struct intel_guc *guc = ce_to_guc(ce);
2539 int ret;
2540
2541 GEM_BUG_ON(intel_context_is_child(ce));
2542 trace_intel_context_register(ce);
2543
2544 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
2545 ret = register_context_v70(guc, ce, loop);
2546 else
2547 ret = register_context_v69(guc, ce, loop);
2548
2549 if (likely(!ret)) {
2550 unsigned long flags;
2551
2552 spin_lock_irqsave(&ce->guc_state.lock, flags);
2553 set_context_registered(ce);
2554 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2555
2556 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
2557 guc_context_policy_init_v70(ce, loop);
2558 }
2559
2560 return ret;
2561 }
2562
__guc_action_deregister_context(struct intel_guc * guc,u32 guc_id)2563 static int __guc_action_deregister_context(struct intel_guc *guc,
2564 u32 guc_id)
2565 {
2566 u32 action[] = {
2567 INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
2568 guc_id,
2569 };
2570
2571 return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2572 G2H_LEN_DW_DEREGISTER_CONTEXT,
2573 true);
2574 }
2575
deregister_context(struct intel_context * ce,u32 guc_id)2576 static int deregister_context(struct intel_context *ce, u32 guc_id)
2577 {
2578 struct intel_guc *guc = ce_to_guc(ce);
2579
2580 GEM_BUG_ON(intel_context_is_child(ce));
2581 trace_intel_context_deregister(ce);
2582
2583 return __guc_action_deregister_context(guc, guc_id);
2584 }
2585
clear_children_join_go_memory(struct intel_context * ce)2586 static inline void clear_children_join_go_memory(struct intel_context *ce)
2587 {
2588 struct parent_scratch *ps = __get_parent_scratch(ce);
2589 int i;
2590
2591 ps->go.semaphore = 0;
2592 for (i = 0; i < ce->parallel.number_children + 1; ++i)
2593 ps->join[i].semaphore = 0;
2594 }
2595
get_children_go_value(struct intel_context * ce)2596 static inline u32 get_children_go_value(struct intel_context *ce)
2597 {
2598 return __get_parent_scratch(ce)->go.semaphore;
2599 }
2600
get_children_join_value(struct intel_context * ce,u8 child_index)2601 static inline u32 get_children_join_value(struct intel_context *ce,
2602 u8 child_index)
2603 {
2604 return __get_parent_scratch(ce)->join[child_index].semaphore;
2605 }
2606
2607 struct context_policy {
2608 u32 count;
2609 struct guc_update_context_policy h2g;
2610 };
2611
__guc_context_policy_action_size(struct context_policy * policy)2612 static u32 __guc_context_policy_action_size(struct context_policy *policy)
2613 {
2614 size_t bytes = sizeof(policy->h2g.header) +
2615 (sizeof(policy->h2g.klv[0]) * policy->count);
2616
2617 return bytes / sizeof(u32);
2618 }
2619
__guc_context_policy_start_klv(struct context_policy * policy,u16 guc_id)2620 static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id)
2621 {
2622 policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
2623 policy->h2g.header.ctx_id = guc_id;
2624 policy->count = 0;
2625 }
2626
2627 #define MAKE_CONTEXT_POLICY_ADD(func, id) \
2628 static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \
2629 { \
2630 GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
2631 policy->h2g.klv[policy->count].kl = \
2632 FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
2633 FIELD_PREP(GUC_KLV_0_LEN, 1); \
2634 policy->h2g.klv[policy->count].value = data; \
2635 policy->count++; \
2636 }
2637
MAKE_CONTEXT_POLICY_ADD(execution_quantum,EXECUTION_QUANTUM)2638 MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
2639 MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
2640 MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
2641 MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
2642 MAKE_CONTEXT_POLICY_ADD(slpc_ctx_freq_req, SLPM_GT_FREQUENCY)
2643
2644 #undef MAKE_CONTEXT_POLICY_ADD
2645
2646 static int __guc_context_set_context_policies(struct intel_guc *guc,
2647 struct context_policy *policy,
2648 bool loop)
2649 {
2650 return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g,
2651 __guc_context_policy_action_size(policy),
2652 0, loop);
2653 }
2654
guc_context_policy_init_v70(struct intel_context * ce,bool loop)2655 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
2656 {
2657 struct intel_engine_cs *engine = ce->engine;
2658 struct intel_guc *guc = gt_to_guc(engine->gt);
2659 struct context_policy policy;
2660 u32 execution_quantum;
2661 u32 preemption_timeout;
2662 u32 slpc_ctx_freq_req = 0;
2663 unsigned long flags;
2664 int ret;
2665
2666 /* NB: For both of these, zero means disabled. */
2667 GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
2668 execution_quantum));
2669 GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
2670 preemption_timeout));
2671 execution_quantum = engine->props.timeslice_duration_ms * 1000;
2672 preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2673
2674 if (ce->flags & BIT(CONTEXT_LOW_LATENCY))
2675 slpc_ctx_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
2676
2677 __guc_context_policy_start_klv(&policy, ce->guc_id.id);
2678
2679 __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
2680 __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
2681 __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
2682 __guc_context_policy_add_slpc_ctx_freq_req(&policy, slpc_ctx_freq_req);
2683
2684 if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2685 __guc_context_policy_add_preempt_to_idle(&policy, 1);
2686
2687 ret = __guc_context_set_context_policies(guc, &policy, loop);
2688
2689 spin_lock_irqsave(&ce->guc_state.lock, flags);
2690 if (ret != 0)
2691 set_context_policy_required(ce);
2692 else
2693 clr_context_policy_required(ce);
2694 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2695
2696 return ret;
2697 }
2698
guc_context_policy_init_v69(struct intel_engine_cs * engine,struct guc_lrc_desc_v69 * desc)2699 static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
2700 struct guc_lrc_desc_v69 *desc)
2701 {
2702 desc->policy_flags = 0;
2703
2704 if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2705 desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
2706
2707 /* NB: For both of these, zero means disabled. */
2708 GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
2709 desc->execution_quantum));
2710 GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
2711 desc->preemption_timeout));
2712 desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
2713 desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2714 }
2715
map_guc_prio_to_lrc_desc_prio(u8 prio)2716 static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
2717 {
2718 /*
2719 * this matches the mapping we do in map_i915_prio_to_guc_prio()
2720 * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
2721 */
2722 switch (prio) {
2723 default:
2724 MISSING_CASE(prio);
2725 fallthrough;
2726 case GUC_CLIENT_PRIORITY_KMD_NORMAL:
2727 return GEN12_CTX_PRIORITY_NORMAL;
2728 case GUC_CLIENT_PRIORITY_NORMAL:
2729 return GEN12_CTX_PRIORITY_LOW;
2730 case GUC_CLIENT_PRIORITY_HIGH:
2731 case GUC_CLIENT_PRIORITY_KMD_HIGH:
2732 return GEN12_CTX_PRIORITY_HIGH;
2733 }
2734 }
2735
prepare_context_registration_info_v69(struct intel_context * ce)2736 static void prepare_context_registration_info_v69(struct intel_context *ce)
2737 {
2738 struct intel_engine_cs *engine = ce->engine;
2739 struct intel_guc *guc = gt_to_guc(engine->gt);
2740 u32 ctx_id = ce->guc_id.id;
2741 struct guc_lrc_desc_v69 *desc;
2742 struct intel_context *child;
2743
2744 GEM_BUG_ON(!engine->mask);
2745
2746 /*
2747 * Ensure LRC + CT vmas are is same region as write barrier is done
2748 * based on CT vma region.
2749 */
2750 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2751 i915_gem_object_is_lmem(ce->ring->vma->obj));
2752
2753 desc = __get_lrc_desc_v69(guc, ctx_id);
2754 GEM_BUG_ON(!desc);
2755 desc->engine_class = engine_class_to_guc_class(engine->class);
2756 desc->engine_submit_mask = engine->logical_mask;
2757 desc->hw_context_desc = ce->lrc.lrca;
2758 desc->priority = ce->guc_state.prio;
2759 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2760 guc_context_policy_init_v69(engine, desc);
2761
2762 /*
2763 * If context is a parent, we need to register a process descriptor
2764 * describing a work queue and register all child contexts.
2765 */
2766 if (intel_context_is_parent(ce)) {
2767 struct guc_process_desc_v69 *pdesc;
2768
2769 ce->parallel.guc.wqi_tail = 0;
2770 ce->parallel.guc.wqi_head = 0;
2771
2772 desc->process_desc = i915_ggtt_offset(ce->state) +
2773 __get_parent_scratch_offset(ce);
2774 desc->wq_addr = i915_ggtt_offset(ce->state) +
2775 __get_wq_offset(ce);
2776 desc->wq_size = WQ_SIZE;
2777
2778 pdesc = __get_process_desc_v69(ce);
2779 memset(pdesc, 0, sizeof(*(pdesc)));
2780 pdesc->stage_id = ce->guc_id.id;
2781 pdesc->wq_base_addr = desc->wq_addr;
2782 pdesc->wq_size_bytes = desc->wq_size;
2783 pdesc->wq_status = WQ_STATUS_ACTIVE;
2784
2785 ce->parallel.guc.wq_head = &pdesc->head;
2786 ce->parallel.guc.wq_tail = &pdesc->tail;
2787 ce->parallel.guc.wq_status = &pdesc->wq_status;
2788
2789 for_each_child(ce, child) {
2790 desc = __get_lrc_desc_v69(guc, child->guc_id.id);
2791
2792 desc->engine_class =
2793 engine_class_to_guc_class(engine->class);
2794 desc->hw_context_desc = child->lrc.lrca;
2795 desc->priority = ce->guc_state.prio;
2796 desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2797 guc_context_policy_init_v69(engine, desc);
2798 }
2799
2800 clear_children_join_go_memory(ce);
2801 }
2802 }
2803
prepare_context_registration_info_v70(struct intel_context * ce,struct guc_ctxt_registration_info * info)2804 static void prepare_context_registration_info_v70(struct intel_context *ce,
2805 struct guc_ctxt_registration_info *info)
2806 {
2807 struct intel_engine_cs *engine = ce->engine;
2808 struct intel_guc *guc = gt_to_guc(engine->gt);
2809 u32 ctx_id = ce->guc_id.id;
2810
2811 GEM_BUG_ON(!engine->mask);
2812
2813 /*
2814 * Ensure LRC + CT vmas are is same region as write barrier is done
2815 * based on CT vma region.
2816 */
2817 GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2818 i915_gem_object_is_lmem(ce->ring->vma->obj));
2819
2820 memset(info, 0, sizeof(*info));
2821 info->context_idx = ctx_id;
2822 info->engine_class = engine_class_to_guc_class(engine->class);
2823 info->engine_submit_mask = engine->logical_mask;
2824 /*
2825 * NB: GuC interface supports 64 bit LRCA even though i915/HW
2826 * only supports 32 bit currently.
2827 */
2828 info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
2829 info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
2830 if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
2831 info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
2832 info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
2833
2834 /*
2835 * If context is a parent, we need to register a process descriptor
2836 * describing a work queue and register all child contexts.
2837 */
2838 if (intel_context_is_parent(ce)) {
2839 struct guc_sched_wq_desc *wq_desc;
2840 u64 wq_desc_offset, wq_base_offset;
2841
2842 ce->parallel.guc.wqi_tail = 0;
2843 ce->parallel.guc.wqi_head = 0;
2844
2845 wq_desc_offset = i915_ggtt_offset(ce->state) +
2846 __get_parent_scratch_offset(ce);
2847 wq_base_offset = i915_ggtt_offset(ce->state) +
2848 __get_wq_offset(ce);
2849 info->wq_desc_lo = lower_32_bits(wq_desc_offset);
2850 info->wq_desc_hi = upper_32_bits(wq_desc_offset);
2851 info->wq_base_lo = lower_32_bits(wq_base_offset);
2852 info->wq_base_hi = upper_32_bits(wq_base_offset);
2853 info->wq_size = WQ_SIZE;
2854
2855 wq_desc = __get_wq_desc_v70(ce);
2856 memset(wq_desc, 0, sizeof(*wq_desc));
2857 wq_desc->wq_status = WQ_STATUS_ACTIVE;
2858
2859 ce->parallel.guc.wq_head = &wq_desc->head;
2860 ce->parallel.guc.wq_tail = &wq_desc->tail;
2861 ce->parallel.guc.wq_status = &wq_desc->wq_status;
2862
2863 clear_children_join_go_memory(ce);
2864 }
2865 }
2866
try_context_registration(struct intel_context * ce,bool loop)2867 static int try_context_registration(struct intel_context *ce, bool loop)
2868 {
2869 struct intel_engine_cs *engine = ce->engine;
2870 struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
2871 struct intel_guc *guc = gt_to_guc(engine->gt);
2872 intel_wakeref_t wakeref;
2873 u32 ctx_id = ce->guc_id.id;
2874 bool context_registered;
2875 int ret = 0;
2876
2877 GEM_BUG_ON(!sched_state_is_init(ce));
2878
2879 context_registered = ctx_id_mapped(guc, ctx_id);
2880
2881 clr_ctx_id_mapping(guc, ctx_id);
2882 set_ctx_id_mapping(guc, ctx_id, ce);
2883
2884 /*
2885 * The context_lookup xarray is used to determine if the hardware
2886 * context is currently registered. There are two cases in which it
2887 * could be registered either the guc_id has been stolen from another
2888 * context or the lrc descriptor address of this context has changed. In
2889 * either case the context needs to be deregistered with the GuC before
2890 * registering this context.
2891 */
2892 if (context_registered) {
2893 bool disabled;
2894 unsigned long flags;
2895
2896 trace_intel_context_steal_guc_id(ce);
2897 GEM_BUG_ON(!loop);
2898
2899 /* Seal race with Reset */
2900 spin_lock_irqsave(&ce->guc_state.lock, flags);
2901 disabled = submission_disabled(guc);
2902 if (likely(!disabled)) {
2903 set_context_wait_for_deregister_to_register(ce);
2904 intel_context_get(ce);
2905 }
2906 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2907 if (unlikely(disabled)) {
2908 clr_ctx_id_mapping(guc, ctx_id);
2909 return 0; /* Will get registered later */
2910 }
2911
2912 /*
2913 * If stealing the guc_id, this ce has the same guc_id as the
2914 * context whose guc_id was stolen.
2915 */
2916 with_intel_runtime_pm(runtime_pm, wakeref)
2917 ret = deregister_context(ce, ce->guc_id.id);
2918 if (unlikely(ret == -ENODEV))
2919 ret = 0; /* Will get registered later */
2920 } else {
2921 with_intel_runtime_pm(runtime_pm, wakeref)
2922 ret = register_context(ce, loop);
2923 if (unlikely(ret == -EBUSY)) {
2924 clr_ctx_id_mapping(guc, ctx_id);
2925 } else if (unlikely(ret == -ENODEV)) {
2926 clr_ctx_id_mapping(guc, ctx_id);
2927 ret = 0; /* Will get registered later */
2928 }
2929 }
2930
2931 return ret;
2932 }
2933
__guc_context_pre_pin(struct intel_context * ce,struct intel_engine_cs * engine,struct i915_gem_ww_ctx * ww,void ** vaddr)2934 static int __guc_context_pre_pin(struct intel_context *ce,
2935 struct intel_engine_cs *engine,
2936 struct i915_gem_ww_ctx *ww,
2937 void **vaddr)
2938 {
2939 return lrc_pre_pin(ce, engine, ww, vaddr);
2940 }
2941
__guc_context_pin(struct intel_context * ce,struct intel_engine_cs * engine,void * vaddr)2942 static int __guc_context_pin(struct intel_context *ce,
2943 struct intel_engine_cs *engine,
2944 void *vaddr)
2945 {
2946 if (i915_ggtt_offset(ce->state) !=
2947 (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
2948 set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
2949
2950 /*
2951 * GuC context gets pinned in guc_request_alloc. See that function for
2952 * explaination of why.
2953 */
2954
2955 return lrc_pin(ce, engine, vaddr);
2956 }
2957
guc_context_pre_pin(struct intel_context * ce,struct i915_gem_ww_ctx * ww,void ** vaddr)2958 static int guc_context_pre_pin(struct intel_context *ce,
2959 struct i915_gem_ww_ctx *ww,
2960 void **vaddr)
2961 {
2962 return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
2963 }
2964
guc_context_pin(struct intel_context * ce,void * vaddr)2965 static int guc_context_pin(struct intel_context *ce, void *vaddr)
2966 {
2967 int ret = __guc_context_pin(ce, ce->engine, vaddr);
2968
2969 if (likely(!ret && !intel_context_is_barrier(ce)))
2970 intel_engine_pm_get(ce->engine);
2971
2972 return ret;
2973 }
2974
guc_context_unpin(struct intel_context * ce)2975 static void guc_context_unpin(struct intel_context *ce)
2976 {
2977 struct intel_guc *guc = ce_to_guc(ce);
2978
2979 __guc_context_update_stats(ce);
2980 unpin_guc_id(guc, ce);
2981 lrc_unpin(ce);
2982
2983 if (likely(!intel_context_is_barrier(ce)))
2984 intel_engine_pm_put_async(ce->engine);
2985 }
2986
guc_context_post_unpin(struct intel_context * ce)2987 static void guc_context_post_unpin(struct intel_context *ce)
2988 {
2989 lrc_post_unpin(ce);
2990 }
2991
__guc_context_sched_enable(struct intel_guc * guc,struct intel_context * ce)2992 static void __guc_context_sched_enable(struct intel_guc *guc,
2993 struct intel_context *ce)
2994 {
2995 u32 action[] = {
2996 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2997 ce->guc_id.id,
2998 GUC_CONTEXT_ENABLE
2999 };
3000
3001 trace_intel_context_sched_enable(ce);
3002
3003 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
3004 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
3005 }
3006
__guc_context_sched_disable(struct intel_guc * guc,struct intel_context * ce,u16 guc_id)3007 static void __guc_context_sched_disable(struct intel_guc *guc,
3008 struct intel_context *ce,
3009 u16 guc_id)
3010 {
3011 u32 action[] = {
3012 INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
3013 guc_id, /* ce->guc_id.id not stable */
3014 GUC_CONTEXT_DISABLE
3015 };
3016
3017 GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID);
3018
3019 GEM_BUG_ON(intel_context_is_child(ce));
3020 trace_intel_context_sched_disable(ce);
3021
3022 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
3023 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
3024 }
3025
guc_blocked_fence_complete(struct intel_context * ce)3026 static void guc_blocked_fence_complete(struct intel_context *ce)
3027 {
3028 lockdep_assert_held(&ce->guc_state.lock);
3029
3030 if (!i915_sw_fence_done(&ce->guc_state.blocked))
3031 i915_sw_fence_complete(&ce->guc_state.blocked);
3032 }
3033
guc_blocked_fence_reinit(struct intel_context * ce)3034 static void guc_blocked_fence_reinit(struct intel_context *ce)
3035 {
3036 lockdep_assert_held(&ce->guc_state.lock);
3037 GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
3038
3039 /*
3040 * This fence is always complete unless a pending schedule disable is
3041 * outstanding. We arm the fence here and complete it when we receive
3042 * the pending schedule disable complete message.
3043 */
3044 i915_sw_fence_fini(&ce->guc_state.blocked);
3045 i915_sw_fence_reinit(&ce->guc_state.blocked);
3046 i915_sw_fence_await(&ce->guc_state.blocked);
3047 i915_sw_fence_commit(&ce->guc_state.blocked);
3048 }
3049
prep_context_pending_disable(struct intel_context * ce)3050 static u16 prep_context_pending_disable(struct intel_context *ce)
3051 {
3052 lockdep_assert_held(&ce->guc_state.lock);
3053
3054 set_context_pending_disable(ce);
3055 clr_context_enabled(ce);
3056 guc_blocked_fence_reinit(ce);
3057 intel_context_get(ce);
3058
3059 return ce->guc_id.id;
3060 }
3061
guc_context_block(struct intel_context * ce)3062 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
3063 {
3064 struct intel_guc *guc = ce_to_guc(ce);
3065 unsigned long flags;
3066 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
3067 intel_wakeref_t wakeref;
3068 u16 guc_id;
3069 bool enabled;
3070
3071 GEM_BUG_ON(intel_context_is_child(ce));
3072
3073 spin_lock_irqsave(&ce->guc_state.lock, flags);
3074
3075 incr_context_blocked(ce);
3076
3077 enabled = context_enabled(ce);
3078 if (unlikely(!enabled || submission_disabled(guc))) {
3079 if (enabled)
3080 clr_context_enabled(ce);
3081 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3082 return &ce->guc_state.blocked;
3083 }
3084
3085 /*
3086 * We add +2 here as the schedule disable complete CTB handler calls
3087 * intel_context_sched_disable_unpin (-2 to pin_count).
3088 */
3089 atomic_add(2, &ce->pin_count);
3090
3091 guc_id = prep_context_pending_disable(ce);
3092
3093 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3094
3095 with_intel_runtime_pm(runtime_pm, wakeref)
3096 __guc_context_sched_disable(guc, ce, guc_id);
3097
3098 return &ce->guc_state.blocked;
3099 }
3100
3101 #define SCHED_STATE_MULTI_BLOCKED_MASK \
3102 (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
3103 #define SCHED_STATE_NO_UNBLOCK \
3104 (SCHED_STATE_MULTI_BLOCKED_MASK | \
3105 SCHED_STATE_PENDING_DISABLE | \
3106 SCHED_STATE_BANNED)
3107
context_cant_unblock(struct intel_context * ce)3108 static bool context_cant_unblock(struct intel_context *ce)
3109 {
3110 lockdep_assert_held(&ce->guc_state.lock);
3111
3112 return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
3113 context_guc_id_invalid(ce) ||
3114 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
3115 !intel_context_is_pinned(ce);
3116 }
3117
guc_context_unblock(struct intel_context * ce)3118 static void guc_context_unblock(struct intel_context *ce)
3119 {
3120 struct intel_guc *guc = ce_to_guc(ce);
3121 unsigned long flags;
3122 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
3123 intel_wakeref_t wakeref;
3124 bool enable;
3125
3126 GEM_BUG_ON(context_enabled(ce));
3127 GEM_BUG_ON(intel_context_is_child(ce));
3128
3129 spin_lock_irqsave(&ce->guc_state.lock, flags);
3130
3131 if (unlikely(submission_disabled(guc) ||
3132 context_cant_unblock(ce))) {
3133 enable = false;
3134 } else {
3135 enable = true;
3136 set_context_pending_enable(ce);
3137 set_context_enabled(ce);
3138 intel_context_get(ce);
3139 }
3140
3141 decr_context_blocked(ce);
3142
3143 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3144
3145 if (enable) {
3146 with_intel_runtime_pm(runtime_pm, wakeref)
3147 __guc_context_sched_enable(guc, ce);
3148 }
3149 }
3150
guc_context_cancel_request(struct intel_context * ce,struct i915_request * rq)3151 static void guc_context_cancel_request(struct intel_context *ce,
3152 struct i915_request *rq)
3153 {
3154 struct intel_context *block_context =
3155 request_to_scheduling_context(rq);
3156
3157 if (i915_sw_fence_signaled(&rq->submit)) {
3158 struct i915_sw_fence *fence;
3159
3160 intel_context_get(ce);
3161 fence = guc_context_block(block_context);
3162 i915_sw_fence_wait(fence);
3163 if (!i915_request_completed(rq)) {
3164 __i915_request_skip(rq);
3165 guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
3166 true);
3167 }
3168
3169 guc_context_unblock(block_context);
3170 intel_context_put(ce);
3171 }
3172 }
3173
__guc_context_set_preemption_timeout(struct intel_guc * guc,u16 guc_id,u32 preemption_timeout)3174 static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
3175 u16 guc_id,
3176 u32 preemption_timeout)
3177 {
3178 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
3179 struct context_policy policy;
3180
3181 __guc_context_policy_start_klv(&policy, guc_id);
3182 __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
3183 __guc_context_set_context_policies(guc, &policy, true);
3184 } else {
3185 u32 action[] = {
3186 INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
3187 guc_id,
3188 preemption_timeout
3189 };
3190
3191 intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
3192 }
3193 }
3194
3195 static void
guc_context_revoke(struct intel_context * ce,struct i915_request * rq,unsigned int preempt_timeout_ms)3196 guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
3197 unsigned int preempt_timeout_ms)
3198 {
3199 struct intel_guc *guc = ce_to_guc(ce);
3200 struct intel_runtime_pm *runtime_pm =
3201 &ce->engine->gt->i915->runtime_pm;
3202 intel_wakeref_t wakeref;
3203 unsigned long flags;
3204
3205 GEM_BUG_ON(intel_context_is_child(ce));
3206
3207 guc_flush_submissions(guc);
3208
3209 spin_lock_irqsave(&ce->guc_state.lock, flags);
3210 set_context_banned(ce);
3211
3212 if (submission_disabled(guc) ||
3213 (!context_enabled(ce) && !context_pending_disable(ce))) {
3214 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3215
3216 guc_cancel_context_requests(ce);
3217 intel_engine_signal_breadcrumbs(ce->engine);
3218 } else if (!context_pending_disable(ce)) {
3219 u16 guc_id;
3220
3221 /*
3222 * We add +2 here as the schedule disable complete CTB handler
3223 * calls intel_context_sched_disable_unpin (-2 to pin_count).
3224 */
3225 atomic_add(2, &ce->pin_count);
3226
3227 guc_id = prep_context_pending_disable(ce);
3228 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3229
3230 /*
3231 * In addition to disabling scheduling, set the preemption
3232 * timeout to the minimum value (1 us) so the banned context
3233 * gets kicked off the HW ASAP.
3234 */
3235 with_intel_runtime_pm(runtime_pm, wakeref) {
3236 __guc_context_set_preemption_timeout(guc, guc_id,
3237 preempt_timeout_ms);
3238 __guc_context_sched_disable(guc, ce, guc_id);
3239 }
3240 } else {
3241 if (!context_guc_id_invalid(ce))
3242 with_intel_runtime_pm(runtime_pm, wakeref)
3243 __guc_context_set_preemption_timeout(guc,
3244 ce->guc_id.id,
3245 preempt_timeout_ms);
3246 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3247 }
3248 }
3249
do_sched_disable(struct intel_guc * guc,struct intel_context * ce,unsigned long flags)3250 static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
3251 unsigned long flags)
3252 __releases(ce->guc_state.lock)
3253 {
3254 struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
3255 intel_wakeref_t wakeref;
3256 u16 guc_id;
3257
3258 lockdep_assert_held(&ce->guc_state.lock);
3259 guc_id = prep_context_pending_disable(ce);
3260
3261 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3262
3263 with_intel_runtime_pm(runtime_pm, wakeref)
3264 __guc_context_sched_disable(guc, ce, guc_id);
3265 }
3266
bypass_sched_disable(struct intel_guc * guc,struct intel_context * ce)3267 static bool bypass_sched_disable(struct intel_guc *guc,
3268 struct intel_context *ce)
3269 {
3270 lockdep_assert_held(&ce->guc_state.lock);
3271 GEM_BUG_ON(intel_context_is_child(ce));
3272
3273 if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
3274 !ctx_id_mapped(guc, ce->guc_id.id)) {
3275 clr_context_enabled(ce);
3276 return true;
3277 }
3278
3279 return !context_enabled(ce);
3280 }
3281
__delay_sched_disable(struct work_struct * wrk)3282 static void __delay_sched_disable(struct work_struct *wrk)
3283 {
3284 struct intel_context *ce =
3285 container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work);
3286 struct intel_guc *guc = ce_to_guc(ce);
3287 unsigned long flags;
3288
3289 spin_lock_irqsave(&ce->guc_state.lock, flags);
3290
3291 if (bypass_sched_disable(guc, ce)) {
3292 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3293 intel_context_sched_disable_unpin(ce);
3294 } else {
3295 do_sched_disable(guc, ce, flags);
3296 }
3297 }
3298
guc_id_pressure(struct intel_guc * guc,struct intel_context * ce)3299 static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
3300 {
3301 /*
3302 * parent contexts are perma-pinned, if we are unpinning do schedule
3303 * disable immediately.
3304 */
3305 if (intel_context_is_parent(ce))
3306 return true;
3307
3308 /*
3309 * If we are beyond the threshold for avail guc_ids, do schedule disable immediately.
3310 */
3311 return guc->submission_state.guc_ids_in_use >
3312 guc->submission_state.sched_disable_gucid_threshold;
3313 }
3314
guc_context_sched_disable(struct intel_context * ce)3315 static void guc_context_sched_disable(struct intel_context *ce)
3316 {
3317 struct intel_guc *guc = ce_to_guc(ce);
3318 u64 delay = guc->submission_state.sched_disable_delay_ms;
3319 unsigned long flags;
3320
3321 spin_lock_irqsave(&ce->guc_state.lock, flags);
3322
3323 if (bypass_sched_disable(guc, ce)) {
3324 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3325 intel_context_sched_disable_unpin(ce);
3326 } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
3327 delay) {
3328 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3329 mod_delayed_work(system_unbound_wq,
3330 &ce->guc_state.sched_disable_delay_work,
3331 msecs_to_jiffies(delay));
3332 } else {
3333 do_sched_disable(guc, ce, flags);
3334 }
3335 }
3336
guc_context_close(struct intel_context * ce)3337 static void guc_context_close(struct intel_context *ce)
3338 {
3339 unsigned long flags;
3340
3341 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
3342 cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))
3343 __delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work);
3344
3345 spin_lock_irqsave(&ce->guc_state.lock, flags);
3346 set_context_close_done(ce);
3347 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3348 }
3349
guc_lrc_desc_unpin(struct intel_context * ce)3350 static inline int guc_lrc_desc_unpin(struct intel_context *ce)
3351 {
3352 struct intel_guc *guc = ce_to_guc(ce);
3353 struct intel_gt *gt = guc_to_gt(guc);
3354 unsigned long flags;
3355 bool disabled;
3356 int ret;
3357
3358 GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
3359 GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
3360 GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
3361 GEM_BUG_ON(context_enabled(ce));
3362
3363 /* Seal race with Reset */
3364 spin_lock_irqsave(&ce->guc_state.lock, flags);
3365 disabled = submission_disabled(guc);
3366 if (likely(!disabled)) {
3367 /*
3368 * Take a gt-pm ref and change context state to be destroyed.
3369 * NOTE: a G2H IRQ that comes after will put this gt-pm ref back
3370 */
3371 __intel_gt_pm_get(gt);
3372 set_context_destroyed(ce);
3373 clr_context_registered(ce);
3374 }
3375 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3376
3377 if (unlikely(disabled)) {
3378 release_guc_id(guc, ce);
3379 __guc_context_destroy(ce);
3380 return 0;
3381 }
3382
3383 /*
3384 * GuC is active, lets destroy this context, but at this point we can still be racing
3385 * with suspend, so we undo everything if the H2G fails in deregister_context so
3386 * that GuC reset will find this context during clean up.
3387 */
3388 ret = deregister_context(ce, ce->guc_id.id);
3389 if (ret) {
3390 spin_lock(&ce->guc_state.lock);
3391 set_context_registered(ce);
3392 clr_context_destroyed(ce);
3393 spin_unlock(&ce->guc_state.lock);
3394 /*
3395 * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
3396 * the wakeref immediately but per function spec usage call this after unlock.
3397 */
3398 intel_wakeref_put_async(>->wakeref);
3399 }
3400
3401 return ret;
3402 }
3403
__guc_context_destroy(struct intel_context * ce)3404 static void __guc_context_destroy(struct intel_context *ce)
3405 {
3406 GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
3407 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
3408 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
3409 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
3410
3411 lrc_fini(ce);
3412 intel_context_fini(ce);
3413
3414 if (intel_engine_is_virtual(ce->engine)) {
3415 struct guc_virtual_engine *ve =
3416 container_of(ce, typeof(*ve), context);
3417
3418 if (ve->base.breadcrumbs)
3419 intel_breadcrumbs_put(ve->base.breadcrumbs);
3420
3421 kfree(ve);
3422 } else {
3423 intel_context_free(ce);
3424 }
3425 }
3426
guc_flush_destroyed_contexts(struct intel_guc * guc)3427 static void guc_flush_destroyed_contexts(struct intel_guc *guc)
3428 {
3429 struct intel_context *ce;
3430 unsigned long flags;
3431
3432 GEM_BUG_ON(!submission_disabled(guc) &&
3433 guc_submission_initialized(guc));
3434
3435 while (!list_empty(&guc->submission_state.destroyed_contexts)) {
3436 spin_lock_irqsave(&guc->submission_state.lock, flags);
3437 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
3438 struct intel_context,
3439 destroyed_link);
3440 if (ce)
3441 list_del_init(&ce->destroyed_link);
3442 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3443
3444 if (!ce)
3445 break;
3446
3447 release_guc_id(guc, ce);
3448 __guc_context_destroy(ce);
3449 }
3450 }
3451
deregister_destroyed_contexts(struct intel_guc * guc)3452 static void deregister_destroyed_contexts(struct intel_guc *guc)
3453 {
3454 struct intel_context *ce;
3455 unsigned long flags;
3456
3457 while (!list_empty(&guc->submission_state.destroyed_contexts)) {
3458 spin_lock_irqsave(&guc->submission_state.lock, flags);
3459 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
3460 struct intel_context,
3461 destroyed_link);
3462 if (ce)
3463 list_del_init(&ce->destroyed_link);
3464 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3465
3466 if (!ce)
3467 break;
3468
3469 if (guc_lrc_desc_unpin(ce)) {
3470 /*
3471 * This means GuC's CT link severed mid-way which could happen
3472 * in suspend-resume corner cases. In this case, put the
3473 * context back into the destroyed_contexts list which will
3474 * get picked up on the next context deregistration event or
3475 * purged in a GuC sanitization event (reset/unload/wedged/...).
3476 */
3477 spin_lock_irqsave(&guc->submission_state.lock, flags);
3478 list_add_tail(&ce->destroyed_link,
3479 &guc->submission_state.destroyed_contexts);
3480 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3481 /* Bail now since the list might never be emptied if h2gs fail */
3482 break;
3483 }
3484
3485 }
3486 }
3487
destroyed_worker_func(struct work_struct * w)3488 static void destroyed_worker_func(struct work_struct *w)
3489 {
3490 struct intel_guc *guc = container_of(w, struct intel_guc,
3491 submission_state.destroyed_worker);
3492 struct intel_gt *gt = guc_to_gt(guc);
3493 intel_wakeref_t wakeref;
3494
3495 /*
3496 * In rare cases we can get here via async context-free fence-signals that
3497 * come very late in suspend flow or very early in resume flows. In these
3498 * cases, GuC won't be ready but just skipping it here is fine as these
3499 * pending-destroy-contexts get destroyed totally at GuC reset time at the
3500 * end of suspend.. OR.. this worker can be picked up later on the next
3501 * context destruction trigger after resume-completes
3502 */
3503 if (!intel_guc_is_ready(guc))
3504 return;
3505
3506 with_intel_gt_pm(gt, wakeref)
3507 deregister_destroyed_contexts(guc);
3508 }
3509
guc_context_destroy(struct kref * kref)3510 static void guc_context_destroy(struct kref *kref)
3511 {
3512 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3513 struct intel_guc *guc = ce_to_guc(ce);
3514 unsigned long flags;
3515 bool destroy;
3516
3517 /*
3518 * If the guc_id is invalid this context has been stolen and we can free
3519 * it immediately. Also can be freed immediately if the context is not
3520 * registered with the GuC or the GuC is in the middle of a reset.
3521 */
3522 spin_lock_irqsave(&guc->submission_state.lock, flags);
3523 destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
3524 !ctx_id_mapped(guc, ce->guc_id.id);
3525 if (likely(!destroy)) {
3526 if (!list_empty(&ce->guc_id.link))
3527 list_del_init(&ce->guc_id.link);
3528 list_add_tail(&ce->destroyed_link,
3529 &guc->submission_state.destroyed_contexts);
3530 } else {
3531 __release_guc_id(guc, ce);
3532 }
3533 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3534 if (unlikely(destroy)) {
3535 __guc_context_destroy(ce);
3536 return;
3537 }
3538
3539 /*
3540 * We use a worker to issue the H2G to deregister the context as we can
3541 * take the GT PM for the first time which isn't allowed from an atomic
3542 * context.
3543 */
3544 queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
3545 }
3546
guc_context_alloc(struct intel_context * ce)3547 static int guc_context_alloc(struct intel_context *ce)
3548 {
3549 return lrc_alloc(ce, ce->engine);
3550 }
3551
__guc_context_set_prio(struct intel_guc * guc,struct intel_context * ce)3552 static void __guc_context_set_prio(struct intel_guc *guc,
3553 struct intel_context *ce)
3554 {
3555 if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
3556 struct context_policy policy;
3557
3558 __guc_context_policy_start_klv(&policy, ce->guc_id.id);
3559 __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
3560 __guc_context_set_context_policies(guc, &policy, true);
3561 } else {
3562 u32 action[] = {
3563 INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
3564 ce->guc_id.id,
3565 ce->guc_state.prio,
3566 };
3567
3568 guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
3569 }
3570 }
3571
guc_context_set_prio(struct intel_guc * guc,struct intel_context * ce,u8 prio)3572 static void guc_context_set_prio(struct intel_guc *guc,
3573 struct intel_context *ce,
3574 u8 prio)
3575 {
3576 GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
3577 prio > GUC_CLIENT_PRIORITY_NORMAL);
3578 lockdep_assert_held(&ce->guc_state.lock);
3579
3580 if (ce->guc_state.prio == prio || submission_disabled(guc) ||
3581 !context_registered(ce)) {
3582 ce->guc_state.prio = prio;
3583 return;
3584 }
3585
3586 ce->guc_state.prio = prio;
3587 __guc_context_set_prio(guc, ce);
3588
3589 trace_intel_context_set_prio(ce);
3590 }
3591
map_i915_prio_to_guc_prio(int prio)3592 static inline u8 map_i915_prio_to_guc_prio(int prio)
3593 {
3594 if (prio == I915_PRIORITY_NORMAL)
3595 return GUC_CLIENT_PRIORITY_KMD_NORMAL;
3596 else if (prio < I915_PRIORITY_NORMAL)
3597 return GUC_CLIENT_PRIORITY_NORMAL;
3598 else if (prio < I915_PRIORITY_DISPLAY)
3599 return GUC_CLIENT_PRIORITY_HIGH;
3600 else
3601 return GUC_CLIENT_PRIORITY_KMD_HIGH;
3602 }
3603
add_context_inflight_prio(struct intel_context * ce,u8 guc_prio)3604 static inline void add_context_inflight_prio(struct intel_context *ce,
3605 u8 guc_prio)
3606 {
3607 lockdep_assert_held(&ce->guc_state.lock);
3608 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
3609
3610 ++ce->guc_state.prio_count[guc_prio];
3611
3612 /* Overflow protection */
3613 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
3614 }
3615
sub_context_inflight_prio(struct intel_context * ce,u8 guc_prio)3616 static inline void sub_context_inflight_prio(struct intel_context *ce,
3617 u8 guc_prio)
3618 {
3619 lockdep_assert_held(&ce->guc_state.lock);
3620 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
3621
3622 /* Underflow protection */
3623 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
3624
3625 --ce->guc_state.prio_count[guc_prio];
3626 }
3627
update_context_prio(struct intel_context * ce)3628 static inline void update_context_prio(struct intel_context *ce)
3629 {
3630 struct intel_guc *guc = &ce->engine->gt->uc.guc;
3631 int i;
3632
3633 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
3634 BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
3635
3636 lockdep_assert_held(&ce->guc_state.lock);
3637
3638 for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
3639 if (ce->guc_state.prio_count[i]) {
3640 guc_context_set_prio(guc, ce, i);
3641 break;
3642 }
3643 }
3644 }
3645
new_guc_prio_higher(u8 old_guc_prio,u8 new_guc_prio)3646 static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
3647 {
3648 /* Lower value is higher priority */
3649 return new_guc_prio < old_guc_prio;
3650 }
3651
add_to_context(struct i915_request * rq)3652 static void add_to_context(struct i915_request *rq)
3653 {
3654 struct intel_context *ce = request_to_scheduling_context(rq);
3655 u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
3656
3657 GEM_BUG_ON(intel_context_is_child(ce));
3658 GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
3659
3660 spin_lock(&ce->guc_state.lock);
3661 list_move_tail(&rq->sched.link, &ce->guc_state.requests);
3662
3663 if (rq->guc_prio == GUC_PRIO_INIT) {
3664 rq->guc_prio = new_guc_prio;
3665 add_context_inflight_prio(ce, rq->guc_prio);
3666 } else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
3667 sub_context_inflight_prio(ce, rq->guc_prio);
3668 rq->guc_prio = new_guc_prio;
3669 add_context_inflight_prio(ce, rq->guc_prio);
3670 }
3671 update_context_prio(ce);
3672
3673 spin_unlock(&ce->guc_state.lock);
3674 }
3675
guc_prio_fini(struct i915_request * rq,struct intel_context * ce)3676 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
3677 {
3678 lockdep_assert_held(&ce->guc_state.lock);
3679
3680 if (rq->guc_prio != GUC_PRIO_INIT &&
3681 rq->guc_prio != GUC_PRIO_FINI) {
3682 sub_context_inflight_prio(ce, rq->guc_prio);
3683 update_context_prio(ce);
3684 }
3685 rq->guc_prio = GUC_PRIO_FINI;
3686 }
3687
remove_from_context(struct i915_request * rq)3688 static void remove_from_context(struct i915_request *rq)
3689 {
3690 struct intel_context *ce = request_to_scheduling_context(rq);
3691
3692 GEM_BUG_ON(intel_context_is_child(ce));
3693
3694 spin_lock_irq(&ce->guc_state.lock);
3695
3696 list_del_init(&rq->sched.link);
3697 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
3698
3699 /* Prevent further __await_execution() registering a cb, then flush */
3700 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
3701
3702 guc_prio_fini(rq, ce);
3703
3704 spin_unlock_irq(&ce->guc_state.lock);
3705
3706 atomic_dec(&ce->guc_id.ref);
3707 i915_request_notify_execute_cb_imm(rq);
3708 }
3709
3710 static const struct intel_context_ops guc_context_ops = {
3711 .flags = COPS_RUNTIME_CYCLES,
3712 .alloc = guc_context_alloc,
3713
3714 .close = guc_context_close,
3715
3716 .pre_pin = guc_context_pre_pin,
3717 .pin = guc_context_pin,
3718 .unpin = guc_context_unpin,
3719 .post_unpin = guc_context_post_unpin,
3720
3721 .revoke = guc_context_revoke,
3722
3723 .cancel_request = guc_context_cancel_request,
3724
3725 .enter = intel_context_enter_engine,
3726 .exit = intel_context_exit_engine,
3727
3728 .sched_disable = guc_context_sched_disable,
3729
3730 .update_stats = guc_context_update_stats,
3731
3732 .reset = lrc_reset,
3733 .destroy = guc_context_destroy,
3734
3735 .create_virtual = guc_create_virtual,
3736 .create_parallel = guc_create_parallel,
3737 };
3738
submit_work_cb(struct irq_work * wrk)3739 static void submit_work_cb(struct irq_work *wrk)
3740 {
3741 struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
3742
3743 might_lock(&rq->engine->sched_engine->lock);
3744 i915_sw_fence_complete(&rq->submit);
3745 }
3746
__guc_signal_context_fence(struct intel_context * ce)3747 static void __guc_signal_context_fence(struct intel_context *ce)
3748 {
3749 struct i915_request *rq, *rn;
3750
3751 lockdep_assert_held(&ce->guc_state.lock);
3752
3753 if (!list_empty(&ce->guc_state.fences))
3754 trace_intel_context_fence_release(ce);
3755
3756 /*
3757 * Use an IRQ to ensure locking order of sched_engine->lock ->
3758 * ce->guc_state.lock is preserved.
3759 */
3760 list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
3761 guc_fence_link) {
3762 list_del(&rq->guc_fence_link);
3763 irq_work_queue(&rq->submit_work);
3764 }
3765
3766 INIT_LIST_HEAD(&ce->guc_state.fences);
3767 }
3768
guc_signal_context_fence(struct intel_context * ce)3769 static void guc_signal_context_fence(struct intel_context *ce)
3770 {
3771 unsigned long flags;
3772
3773 GEM_BUG_ON(intel_context_is_child(ce));
3774
3775 spin_lock_irqsave(&ce->guc_state.lock, flags);
3776 clr_context_wait_for_deregister_to_register(ce);
3777 __guc_signal_context_fence(ce);
3778 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3779 }
3780
context_needs_register(struct intel_context * ce,bool new_guc_id)3781 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
3782 {
3783 return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
3784 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
3785 !submission_disabled(ce_to_guc(ce));
3786 }
3787
guc_context_init(struct intel_context * ce)3788 static void guc_context_init(struct intel_context *ce)
3789 {
3790 const struct i915_gem_context *ctx;
3791 int prio = I915_CONTEXT_DEFAULT_PRIORITY;
3792
3793 rcu_read_lock();
3794 ctx = rcu_dereference(ce->gem_context);
3795 if (ctx)
3796 prio = ctx->sched.priority;
3797 rcu_read_unlock();
3798
3799 ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
3800
3801 INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay_work,
3802 __delay_sched_disable);
3803
3804 set_bit(CONTEXT_GUC_INIT, &ce->flags);
3805 }
3806
guc_request_alloc(struct i915_request * rq)3807 static int guc_request_alloc(struct i915_request *rq)
3808 {
3809 struct intel_context *ce = request_to_scheduling_context(rq);
3810 struct intel_guc *guc = ce_to_guc(ce);
3811 unsigned long flags;
3812 int ret;
3813
3814 GEM_BUG_ON(!intel_context_is_pinned(rq->context));
3815
3816 /*
3817 * Flush enough space to reduce the likelihood of waiting after
3818 * we start building the request - in which case we will just
3819 * have to repeat work.
3820 */
3821 rq->reserved_space += GUC_REQUEST_SIZE;
3822
3823 /*
3824 * Note that after this point, we have committed to using
3825 * this request as it is being used to both track the
3826 * state of engine initialisation and liveness of the
3827 * golden renderstate above. Think twice before you try
3828 * to cancel/unwind this request now.
3829 */
3830
3831 /* Unconditionally invalidate GPU caches and TLBs. */
3832 ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
3833 if (ret)
3834 return ret;
3835
3836 rq->reserved_space -= GUC_REQUEST_SIZE;
3837
3838 if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
3839 guc_context_init(ce);
3840
3841 /*
3842 * If the context gets closed while the execbuf is ongoing, the context
3843 * close code will race with the below code to cancel the delayed work.
3844 * If the context close wins the race and cancels the work, it will
3845 * immediately call the sched disable (see guc_context_close), so there
3846 * is a chance we can get past this check while the sched_disable code
3847 * is being executed. To make sure that code completes before we check
3848 * the status further down, we wait for the close process to complete.
3849 * Else, this code path could send a request down thinking that the
3850 * context is still in a schedule-enable mode while the GuC ends up
3851 * dropping the request completely because the disable did go from the
3852 * context_close path right to GuC just prior. In the event the CT is
3853 * full, we could potentially need to wait up to 1.5 seconds.
3854 */
3855 if (cancel_delayed_work_sync(&ce->guc_state.sched_disable_delay_work))
3856 intel_context_sched_disable_unpin(ce);
3857 else if (intel_context_is_closed(ce))
3858 if (wait_for(context_close_done(ce), 1500))
3859 guc_warn(guc, "timed out waiting on context sched close before realloc\n");
3860 /*
3861 * Call pin_guc_id here rather than in the pinning step as with
3862 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
3863 * guc_id and creating horrible race conditions. This is especially bad
3864 * when guc_id are being stolen due to over subscription. By the time
3865 * this function is reached, it is guaranteed that the guc_id will be
3866 * persistent until the generated request is retired. Thus, sealing these
3867 * race conditions. It is still safe to fail here if guc_id are
3868 * exhausted and return -EAGAIN to the user indicating that they can try
3869 * again in the future.
3870 *
3871 * There is no need for a lock here as the timeline mutex ensures at
3872 * most one context can be executing this code path at once. The
3873 * guc_id_ref is incremented once for every request in flight and
3874 * decremented on each retire. When it is zero, a lock around the
3875 * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
3876 */
3877 if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
3878 goto out;
3879
3880 ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
3881 if (unlikely(ret < 0))
3882 return ret;
3883 if (context_needs_register(ce, !!ret)) {
3884 ret = try_context_registration(ce, true);
3885 if (unlikely(ret)) { /* unwind */
3886 if (ret == -EPIPE) {
3887 disable_submission(guc);
3888 goto out; /* GPU will be reset */
3889 }
3890 atomic_dec(&ce->guc_id.ref);
3891 unpin_guc_id(guc, ce);
3892 return ret;
3893 }
3894 }
3895
3896 clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
3897
3898 out:
3899 /*
3900 * We block all requests on this context if a G2H is pending for a
3901 * schedule disable or context deregistration as the GuC will fail a
3902 * schedule enable or context registration if either G2H is pending
3903 * respectfully. Once a G2H returns, the fence is released that is
3904 * blocking these requests (see guc_signal_context_fence).
3905 */
3906 spin_lock_irqsave(&ce->guc_state.lock, flags);
3907 if (context_wait_for_deregister_to_register(ce) ||
3908 context_pending_disable(ce)) {
3909 init_irq_work(&rq->submit_work, submit_work_cb);
3910 i915_sw_fence_await(&rq->submit);
3911
3912 list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
3913 }
3914 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3915
3916 return 0;
3917 }
3918
guc_virtual_context_pre_pin(struct intel_context * ce,struct i915_gem_ww_ctx * ww,void ** vaddr)3919 static int guc_virtual_context_pre_pin(struct intel_context *ce,
3920 struct i915_gem_ww_ctx *ww,
3921 void **vaddr)
3922 {
3923 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3924
3925 return __guc_context_pre_pin(ce, engine, ww, vaddr);
3926 }
3927
guc_virtual_context_pin(struct intel_context * ce,void * vaddr)3928 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
3929 {
3930 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3931 int ret = __guc_context_pin(ce, engine, vaddr);
3932 intel_engine_mask_t tmp, mask = ce->engine->mask;
3933
3934 if (likely(!ret))
3935 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3936 intel_engine_pm_get(engine);
3937
3938 return ret;
3939 }
3940
guc_virtual_context_unpin(struct intel_context * ce)3941 static void guc_virtual_context_unpin(struct intel_context *ce)
3942 {
3943 intel_engine_mask_t tmp, mask = ce->engine->mask;
3944 struct intel_engine_cs *engine;
3945 struct intel_guc *guc = ce_to_guc(ce);
3946
3947 GEM_BUG_ON(context_enabled(ce));
3948 GEM_BUG_ON(intel_context_is_barrier(ce));
3949
3950 unpin_guc_id(guc, ce);
3951 lrc_unpin(ce);
3952
3953 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3954 intel_engine_pm_put_async(engine);
3955 }
3956
guc_virtual_context_enter(struct intel_context * ce)3957 static void guc_virtual_context_enter(struct intel_context *ce)
3958 {
3959 intel_engine_mask_t tmp, mask = ce->engine->mask;
3960 struct intel_engine_cs *engine;
3961
3962 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3963 intel_engine_pm_get(engine);
3964
3965 intel_timeline_enter(ce->timeline);
3966 }
3967
guc_virtual_context_exit(struct intel_context * ce)3968 static void guc_virtual_context_exit(struct intel_context *ce)
3969 {
3970 intel_engine_mask_t tmp, mask = ce->engine->mask;
3971 struct intel_engine_cs *engine;
3972
3973 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3974 intel_engine_pm_put(engine);
3975
3976 intel_timeline_exit(ce->timeline);
3977 }
3978
guc_virtual_context_alloc(struct intel_context * ce)3979 static int guc_virtual_context_alloc(struct intel_context *ce)
3980 {
3981 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3982
3983 return lrc_alloc(ce, engine);
3984 }
3985
3986 static const struct intel_context_ops virtual_guc_context_ops = {
3987 .flags = COPS_RUNTIME_CYCLES,
3988 .alloc = guc_virtual_context_alloc,
3989
3990 .close = guc_context_close,
3991
3992 .pre_pin = guc_virtual_context_pre_pin,
3993 .pin = guc_virtual_context_pin,
3994 .unpin = guc_virtual_context_unpin,
3995 .post_unpin = guc_context_post_unpin,
3996
3997 .revoke = guc_context_revoke,
3998
3999 .cancel_request = guc_context_cancel_request,
4000
4001 .enter = guc_virtual_context_enter,
4002 .exit = guc_virtual_context_exit,
4003
4004 .sched_disable = guc_context_sched_disable,
4005 .update_stats = guc_context_update_stats,
4006
4007 .destroy = guc_context_destroy,
4008
4009 .get_sibling = guc_virtual_get_sibling,
4010 };
4011
guc_parent_context_pin(struct intel_context * ce,void * vaddr)4012 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
4013 {
4014 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
4015 struct intel_guc *guc = ce_to_guc(ce);
4016 int ret;
4017
4018 GEM_BUG_ON(!intel_context_is_parent(ce));
4019 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4020
4021 ret = pin_guc_id(guc, ce);
4022 if (unlikely(ret < 0))
4023 return ret;
4024
4025 return __guc_context_pin(ce, engine, vaddr);
4026 }
4027
guc_child_context_pin(struct intel_context * ce,void * vaddr)4028 static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
4029 {
4030 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
4031
4032 GEM_BUG_ON(!intel_context_is_child(ce));
4033 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4034
4035 __intel_context_pin(ce->parallel.parent);
4036 return __guc_context_pin(ce, engine, vaddr);
4037 }
4038
guc_parent_context_unpin(struct intel_context * ce)4039 static void guc_parent_context_unpin(struct intel_context *ce)
4040 {
4041 struct intel_guc *guc = ce_to_guc(ce);
4042
4043 GEM_BUG_ON(context_enabled(ce));
4044 GEM_BUG_ON(intel_context_is_barrier(ce));
4045 GEM_BUG_ON(!intel_context_is_parent(ce));
4046 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4047
4048 unpin_guc_id(guc, ce);
4049 lrc_unpin(ce);
4050 }
4051
guc_child_context_unpin(struct intel_context * ce)4052 static void guc_child_context_unpin(struct intel_context *ce)
4053 {
4054 GEM_BUG_ON(context_enabled(ce));
4055 GEM_BUG_ON(intel_context_is_barrier(ce));
4056 GEM_BUG_ON(!intel_context_is_child(ce));
4057 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4058
4059 lrc_unpin(ce);
4060 }
4061
guc_child_context_post_unpin(struct intel_context * ce)4062 static void guc_child_context_post_unpin(struct intel_context *ce)
4063 {
4064 GEM_BUG_ON(!intel_context_is_child(ce));
4065 GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
4066 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4067
4068 lrc_post_unpin(ce);
4069 intel_context_unpin(ce->parallel.parent);
4070 }
4071
guc_child_context_destroy(struct kref * kref)4072 static void guc_child_context_destroy(struct kref *kref)
4073 {
4074 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
4075
4076 __guc_context_destroy(ce);
4077 }
4078
4079 static const struct intel_context_ops virtual_parent_context_ops = {
4080 .alloc = guc_virtual_context_alloc,
4081
4082 .close = guc_context_close,
4083
4084 .pre_pin = guc_context_pre_pin,
4085 .pin = guc_parent_context_pin,
4086 .unpin = guc_parent_context_unpin,
4087 .post_unpin = guc_context_post_unpin,
4088
4089 .revoke = guc_context_revoke,
4090
4091 .cancel_request = guc_context_cancel_request,
4092
4093 .enter = guc_virtual_context_enter,
4094 .exit = guc_virtual_context_exit,
4095
4096 .sched_disable = guc_context_sched_disable,
4097
4098 .destroy = guc_context_destroy,
4099
4100 .get_sibling = guc_virtual_get_sibling,
4101 };
4102
4103 static const struct intel_context_ops virtual_child_context_ops = {
4104 .alloc = guc_virtual_context_alloc,
4105
4106 .pre_pin = guc_context_pre_pin,
4107 .pin = guc_child_context_pin,
4108 .unpin = guc_child_context_unpin,
4109 .post_unpin = guc_child_context_post_unpin,
4110
4111 .cancel_request = guc_context_cancel_request,
4112
4113 .enter = guc_virtual_context_enter,
4114 .exit = guc_virtual_context_exit,
4115
4116 .destroy = guc_child_context_destroy,
4117
4118 .get_sibling = guc_virtual_get_sibling,
4119 };
4120
4121 /*
4122 * The below override of the breadcrumbs is enabled when the user configures a
4123 * context for parallel submission (multi-lrc, parent-child).
4124 *
4125 * The overridden breadcrumbs implements an algorithm which allows the GuC to
4126 * safely preempt all the hw contexts configured for parallel submission
4127 * between each BB. The contract between the i915 and GuC is if the parent
4128 * context can be preempted, all the children can be preempted, and the GuC will
4129 * always try to preempt the parent before the children. A handshake between the
4130 * parent / children breadcrumbs ensures the i915 holds up its end of the deal
4131 * creating a window to preempt between each set of BBs.
4132 */
4133 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
4134 u64 offset, u32 len,
4135 const unsigned int flags);
4136 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
4137 u64 offset, u32 len,
4138 const unsigned int flags);
4139 static u32 *
4140 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4141 u32 *cs);
4142 static u32 *
4143 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
4144 u32 *cs);
4145
4146 static struct intel_context *
guc_create_parallel(struct intel_engine_cs ** engines,unsigned int num_siblings,unsigned int width)4147 guc_create_parallel(struct intel_engine_cs **engines,
4148 unsigned int num_siblings,
4149 unsigned int width)
4150 {
4151 struct intel_engine_cs **siblings = NULL;
4152 struct intel_context *parent = NULL, *ce, *err;
4153 int i, j;
4154
4155 siblings = kmalloc_array(num_siblings,
4156 sizeof(*siblings),
4157 GFP_KERNEL);
4158 if (!siblings)
4159 return ERR_PTR(-ENOMEM);
4160
4161 for (i = 0; i < width; ++i) {
4162 for (j = 0; j < num_siblings; ++j)
4163 siblings[j] = engines[i * num_siblings + j];
4164
4165 ce = intel_engine_create_virtual(siblings, num_siblings,
4166 FORCE_VIRTUAL);
4167 if (IS_ERR(ce)) {
4168 err = ERR_CAST(ce);
4169 goto unwind;
4170 }
4171
4172 if (i == 0) {
4173 parent = ce;
4174 parent->ops = &virtual_parent_context_ops;
4175 } else {
4176 ce->ops = &virtual_child_context_ops;
4177 intel_context_bind_parent_child(parent, ce);
4178 }
4179 }
4180
4181 parent->parallel.fence_context = dma_fence_context_alloc(1);
4182
4183 parent->engine->emit_bb_start =
4184 emit_bb_start_parent_no_preempt_mid_batch;
4185 parent->engine->emit_fini_breadcrumb =
4186 emit_fini_breadcrumb_parent_no_preempt_mid_batch;
4187 parent->engine->emit_fini_breadcrumb_dw =
4188 12 + 4 * parent->parallel.number_children;
4189 for_each_child(parent, ce) {
4190 ce->engine->emit_bb_start =
4191 emit_bb_start_child_no_preempt_mid_batch;
4192 ce->engine->emit_fini_breadcrumb =
4193 emit_fini_breadcrumb_child_no_preempt_mid_batch;
4194 ce->engine->emit_fini_breadcrumb_dw = 16;
4195 }
4196
4197 kfree(siblings);
4198 return parent;
4199
4200 unwind:
4201 if (parent)
4202 intel_context_put(parent);
4203 kfree(siblings);
4204 return err;
4205 }
4206
4207 static bool
guc_irq_enable_breadcrumbs(struct intel_breadcrumbs * b)4208 guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
4209 {
4210 struct intel_engine_cs *sibling;
4211 intel_engine_mask_t tmp, mask = b->engine_mask;
4212 bool result = false;
4213
4214 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
4215 result |= intel_engine_irq_enable(sibling);
4216
4217 return result;
4218 }
4219
4220 static void
guc_irq_disable_breadcrumbs(struct intel_breadcrumbs * b)4221 guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
4222 {
4223 struct intel_engine_cs *sibling;
4224 intel_engine_mask_t tmp, mask = b->engine_mask;
4225
4226 for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
4227 intel_engine_irq_disable(sibling);
4228 }
4229
guc_init_breadcrumbs(struct intel_engine_cs * engine)4230 static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
4231 {
4232 int i;
4233
4234 /*
4235 * In GuC submission mode we do not know which physical engine a request
4236 * will be scheduled on, this creates a problem because the breadcrumb
4237 * interrupt is per physical engine. To work around this we attach
4238 * requests and direct all breadcrumb interrupts to the first instance
4239 * of an engine per class. In addition all breadcrumb interrupts are
4240 * enabled / disabled across an engine class in unison.
4241 */
4242 for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
4243 struct intel_engine_cs *sibling =
4244 engine->gt->engine_class[engine->class][i];
4245
4246 if (sibling) {
4247 if (engine->breadcrumbs != sibling->breadcrumbs) {
4248 intel_breadcrumbs_put(engine->breadcrumbs);
4249 engine->breadcrumbs =
4250 intel_breadcrumbs_get(sibling->breadcrumbs);
4251 }
4252 break;
4253 }
4254 }
4255
4256 if (engine->breadcrumbs) {
4257 engine->breadcrumbs->engine_mask |= engine->mask;
4258 engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
4259 engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
4260 }
4261 }
4262
guc_bump_inflight_request_prio(struct i915_request * rq,int prio)4263 static void guc_bump_inflight_request_prio(struct i915_request *rq,
4264 int prio)
4265 {
4266 struct intel_context *ce = request_to_scheduling_context(rq);
4267 u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
4268
4269 /* Short circuit function */
4270 if (prio < I915_PRIORITY_NORMAL ||
4271 rq->guc_prio == GUC_PRIO_FINI ||
4272 (rq->guc_prio != GUC_PRIO_INIT &&
4273 !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
4274 return;
4275
4276 spin_lock(&ce->guc_state.lock);
4277 if (rq->guc_prio != GUC_PRIO_FINI) {
4278 if (rq->guc_prio != GUC_PRIO_INIT)
4279 sub_context_inflight_prio(ce, rq->guc_prio);
4280 rq->guc_prio = new_guc_prio;
4281 add_context_inflight_prio(ce, rq->guc_prio);
4282 update_context_prio(ce);
4283 }
4284 spin_unlock(&ce->guc_state.lock);
4285 }
4286
guc_retire_inflight_request_prio(struct i915_request * rq)4287 static void guc_retire_inflight_request_prio(struct i915_request *rq)
4288 {
4289 struct intel_context *ce = request_to_scheduling_context(rq);
4290
4291 spin_lock(&ce->guc_state.lock);
4292 guc_prio_fini(rq, ce);
4293 spin_unlock(&ce->guc_state.lock);
4294 }
4295
sanitize_hwsp(struct intel_engine_cs * engine)4296 static void sanitize_hwsp(struct intel_engine_cs *engine)
4297 {
4298 struct intel_timeline *tl;
4299
4300 list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
4301 intel_timeline_reset_seqno(tl);
4302 }
4303
guc_sanitize(struct intel_engine_cs * engine)4304 static void guc_sanitize(struct intel_engine_cs *engine)
4305 {
4306 /*
4307 * Poison residual state on resume, in case the suspend didn't!
4308 *
4309 * We have to assume that across suspend/resume (or other loss
4310 * of control) that the contents of our pinned buffers has been
4311 * lost, replaced by garbage. Since this doesn't always happen,
4312 * let's poison such state so that we more quickly spot when
4313 * we falsely assume it has been preserved.
4314 */
4315 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
4316 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
4317
4318 /*
4319 * The kernel_context HWSP is stored in the status_page. As above,
4320 * that may be lost on resume/initialisation, and so we need to
4321 * reset the value in the HWSP.
4322 */
4323 sanitize_hwsp(engine);
4324
4325 /* And scrub the dirty cachelines for the HWSP */
4326 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
4327
4328 intel_engine_reset_pinned_contexts(engine);
4329 }
4330
setup_hwsp(struct intel_engine_cs * engine)4331 static void setup_hwsp(struct intel_engine_cs *engine)
4332 {
4333 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
4334
4335 ENGINE_WRITE_FW(engine,
4336 RING_HWS_PGA,
4337 i915_ggtt_offset(engine->status_page.vma));
4338 }
4339
start_engine(struct intel_engine_cs * engine)4340 static void start_engine(struct intel_engine_cs *engine)
4341 {
4342 ENGINE_WRITE_FW(engine,
4343 RING_MODE_GEN7,
4344 _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
4345
4346 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
4347 ENGINE_POSTING_READ(engine, RING_MI_MODE);
4348 }
4349
guc_resume(struct intel_engine_cs * engine)4350 static int guc_resume(struct intel_engine_cs *engine)
4351 {
4352 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
4353
4354 intel_mocs_init_engine(engine);
4355
4356 intel_breadcrumbs_reset(engine->breadcrumbs);
4357
4358 setup_hwsp(engine);
4359 start_engine(engine);
4360
4361 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
4362 xehp_enable_ccs_engines(engine);
4363
4364 return 0;
4365 }
4366
guc_sched_engine_disabled(struct i915_sched_engine * sched_engine)4367 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
4368 {
4369 return !sched_engine->tasklet.callback;
4370 }
4371
guc_set_default_submission(struct intel_engine_cs * engine)4372 static void guc_set_default_submission(struct intel_engine_cs *engine)
4373 {
4374 engine->submit_request = guc_submit_request;
4375 }
4376
guc_kernel_context_pin(struct intel_guc * guc,struct intel_context * ce)4377 static inline int guc_kernel_context_pin(struct intel_guc *guc,
4378 struct intel_context *ce)
4379 {
4380 int ret;
4381
4382 /*
4383 * Note: we purposefully do not check the returns below because
4384 * the registration can only fail if a reset is just starting.
4385 * This is called at the end of reset so presumably another reset
4386 * isn't happening and even it did this code would be run again.
4387 */
4388
4389 if (context_guc_id_invalid(ce)) {
4390 ret = pin_guc_id(guc, ce);
4391
4392 if (ret < 0)
4393 return ret;
4394 }
4395
4396 if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
4397 guc_context_init(ce);
4398
4399 ret = try_context_registration(ce, true);
4400 if (ret)
4401 unpin_guc_id(guc, ce);
4402
4403 return ret;
4404 }
4405
guc_init_submission(struct intel_guc * guc)4406 static inline int guc_init_submission(struct intel_guc *guc)
4407 {
4408 struct intel_gt *gt = guc_to_gt(guc);
4409 struct intel_engine_cs *engine;
4410 enum intel_engine_id id;
4411
4412 /* make sure all descriptors are clean... */
4413 xa_destroy(&guc->context_lookup);
4414
4415 /*
4416 * A reset might have occurred while we had a pending stalled request,
4417 * so make sure we clean that up.
4418 */
4419 guc->stalled_request = NULL;
4420 guc->submission_stall_reason = STALL_NONE;
4421
4422 /*
4423 * Some contexts might have been pinned before we enabled GuC
4424 * submission, so we need to add them to the GuC bookeeping.
4425 * Also, after a reset the of the GuC we want to make sure that the
4426 * information shared with GuC is properly reset. The kernel LRCs are
4427 * not attached to the gem_context, so they need to be added separately.
4428 */
4429 for_each_engine(engine, gt, id) {
4430 struct intel_context *ce;
4431
4432 list_for_each_entry(ce, &engine->pinned_contexts_list,
4433 pinned_contexts_link) {
4434 int ret = guc_kernel_context_pin(guc, ce);
4435
4436 if (ret) {
4437 /* No point in trying to clean up as i915 will wedge on failure */
4438 return ret;
4439 }
4440 }
4441 }
4442
4443 return 0;
4444 }
4445
guc_release(struct intel_engine_cs * engine)4446 static void guc_release(struct intel_engine_cs *engine)
4447 {
4448 engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
4449
4450 intel_engine_cleanup_common(engine);
4451 lrc_fini_wa_ctx(engine);
4452 }
4453
virtual_guc_bump_serial(struct intel_engine_cs * engine)4454 static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
4455 {
4456 struct intel_engine_cs *e;
4457 intel_engine_mask_t tmp, mask = engine->mask;
4458
4459 for_each_engine_masked(e, engine->gt, mask, tmp)
4460 e->serial++;
4461 }
4462
guc_default_vfuncs(struct intel_engine_cs * engine)4463 static void guc_default_vfuncs(struct intel_engine_cs *engine)
4464 {
4465 /* Default vfuncs which can be overridden by each engine. */
4466
4467 engine->resume = guc_resume;
4468
4469 engine->cops = &guc_context_ops;
4470 engine->request_alloc = guc_request_alloc;
4471 engine->add_active_request = add_to_context;
4472 engine->remove_active_request = remove_from_context;
4473
4474 engine->sched_engine->schedule = i915_schedule;
4475
4476 engine->reset.prepare = guc_engine_reset_prepare;
4477 engine->reset.rewind = guc_rewind_nop;
4478 engine->reset.cancel = guc_reset_nop;
4479 engine->reset.finish = guc_reset_nop;
4480
4481 engine->emit_flush = gen8_emit_flush_xcs;
4482 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
4483 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
4484 if (GRAPHICS_VER(engine->i915) >= 12) {
4485 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
4486 engine->emit_flush = gen12_emit_flush_xcs;
4487 }
4488 engine->set_default_submission = guc_set_default_submission;
4489 engine->busyness = guc_engine_busyness;
4490
4491 engine->flags |= I915_ENGINE_SUPPORTS_STATS;
4492 engine->flags |= I915_ENGINE_HAS_PREEMPTION;
4493 engine->flags |= I915_ENGINE_HAS_TIMESLICES;
4494
4495 /* Wa_14014475959:dg2 */
4496 if (engine->class == COMPUTE_CLASS)
4497 if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
4498 IS_DG2(engine->i915))
4499 engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
4500
4501 /* Wa_16019325821 */
4502 /* Wa_14019159160 */
4503 if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
4504 IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
4505 engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
4506
4507 /*
4508 * TODO: GuC supports timeslicing and semaphores as well, but they're
4509 * handled by the firmware so some minor tweaks are required before
4510 * enabling.
4511 *
4512 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
4513 */
4514
4515 engine->emit_bb_start = gen8_emit_bb_start;
4516 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
4517 engine->emit_bb_start = xehp_emit_bb_start;
4518 }
4519
rcs_submission_override(struct intel_engine_cs * engine)4520 static void rcs_submission_override(struct intel_engine_cs *engine)
4521 {
4522 switch (GRAPHICS_VER(engine->i915)) {
4523 case 12:
4524 engine->emit_flush = gen12_emit_flush_rcs;
4525 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
4526 break;
4527 case 11:
4528 engine->emit_flush = gen11_emit_flush_rcs;
4529 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
4530 break;
4531 default:
4532 engine->emit_flush = gen8_emit_flush_rcs;
4533 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
4534 break;
4535 }
4536 }
4537
guc_default_irqs(struct intel_engine_cs * engine)4538 static inline void guc_default_irqs(struct intel_engine_cs *engine)
4539 {
4540 engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
4541 intel_engine_set_irq_handler(engine, cs_irq_handler);
4542 }
4543
guc_sched_engine_destroy(struct kref * kref)4544 static void guc_sched_engine_destroy(struct kref *kref)
4545 {
4546 struct i915_sched_engine *sched_engine =
4547 container_of(kref, typeof(*sched_engine), ref);
4548 struct intel_guc *guc = sched_engine->private_data;
4549
4550 guc->sched_engine = NULL;
4551 tasklet_kill(&sched_engine->tasklet); /* flush the callback */
4552 kfree(sched_engine);
4553 }
4554
intel_guc_submission_setup(struct intel_engine_cs * engine)4555 int intel_guc_submission_setup(struct intel_engine_cs *engine)
4556 {
4557 struct drm_i915_private *i915 = engine->i915;
4558 struct intel_guc *guc = gt_to_guc(engine->gt);
4559
4560 /*
4561 * The setup relies on several assumptions (e.g. irqs always enabled)
4562 * that are only valid on gen11+
4563 */
4564 GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
4565
4566 if (!guc->sched_engine) {
4567 guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
4568 if (!guc->sched_engine)
4569 return -ENOMEM;
4570
4571 guc->sched_engine->schedule = i915_schedule;
4572 guc->sched_engine->disabled = guc_sched_engine_disabled;
4573 guc->sched_engine->private_data = guc;
4574 guc->sched_engine->destroy = guc_sched_engine_destroy;
4575 guc->sched_engine->bump_inflight_request_prio =
4576 guc_bump_inflight_request_prio;
4577 guc->sched_engine->retire_inflight_request_prio =
4578 guc_retire_inflight_request_prio;
4579 tasklet_setup(&guc->sched_engine->tasklet,
4580 guc_submission_tasklet);
4581 }
4582 i915_sched_engine_put(engine->sched_engine);
4583 engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
4584
4585 guc_default_vfuncs(engine);
4586 guc_default_irqs(engine);
4587 guc_init_breadcrumbs(engine);
4588
4589 if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
4590 rcs_submission_override(engine);
4591
4592 lrc_init_wa_ctx(engine);
4593
4594 /* Finally, take ownership and responsibility for cleanup! */
4595 engine->sanitize = guc_sanitize;
4596 engine->release = guc_release;
4597
4598 return 0;
4599 }
4600
4601 struct scheduling_policy {
4602 /* internal data */
4603 u32 max_words, num_words;
4604 u32 count;
4605 /* API data */
4606 struct guc_update_scheduling_policy h2g;
4607 };
4608
__guc_scheduling_policy_action_size(struct scheduling_policy * policy)4609 static u32 __guc_scheduling_policy_action_size(struct scheduling_policy *policy)
4610 {
4611 u32 *start = (void *)&policy->h2g;
4612 u32 *end = policy->h2g.data + policy->num_words;
4613 size_t delta = end - start;
4614
4615 return delta;
4616 }
4617
__guc_scheduling_policy_start_klv(struct scheduling_policy * policy)4618 static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct scheduling_policy *policy)
4619 {
4620 policy->h2g.header.action = INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
4621 policy->max_words = ARRAY_SIZE(policy->h2g.data);
4622 policy->num_words = 0;
4623 policy->count = 0;
4624
4625 return policy;
4626 }
4627
__guc_scheduling_policy_add_klv(struct scheduling_policy * policy,u32 action,u32 * data,u32 len)4628 static void __guc_scheduling_policy_add_klv(struct scheduling_policy *policy,
4629 u32 action, u32 *data, u32 len)
4630 {
4631 u32 *klv_ptr = policy->h2g.data + policy->num_words;
4632
4633 GEM_BUG_ON((policy->num_words + 1 + len) > policy->max_words);
4634 *(klv_ptr++) = FIELD_PREP(GUC_KLV_0_KEY, action) |
4635 FIELD_PREP(GUC_KLV_0_LEN, len);
4636 memcpy(klv_ptr, data, sizeof(u32) * len);
4637 policy->num_words += 1 + len;
4638 policy->count++;
4639 }
4640
__guc_action_set_scheduling_policies(struct intel_guc * guc,struct scheduling_policy * policy)4641 static int __guc_action_set_scheduling_policies(struct intel_guc *guc,
4642 struct scheduling_policy *policy)
4643 {
4644 int ret;
4645
4646 ret = intel_guc_send(guc, (u32 *)&policy->h2g,
4647 __guc_scheduling_policy_action_size(policy));
4648 if (ret < 0) {
4649 guc_probe_error(guc, "Failed to configure global scheduling policies: %pe!\n",
4650 ERR_PTR(ret));
4651 return ret;
4652 }
4653
4654 if (ret != policy->count) {
4655 guc_warn(guc, "global scheduler policy processed %d of %d KLVs!",
4656 ret, policy->count);
4657 if (ret > policy->count)
4658 return -EPROTO;
4659 }
4660
4661 return 0;
4662 }
4663
guc_init_global_schedule_policy(struct intel_guc * guc)4664 static int guc_init_global_schedule_policy(struct intel_guc *guc)
4665 {
4666 struct scheduling_policy policy;
4667 struct intel_gt *gt = guc_to_gt(guc);
4668 intel_wakeref_t wakeref;
4669 int ret;
4670
4671 if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
4672 return 0;
4673
4674 __guc_scheduling_policy_start_klv(&policy);
4675
4676 with_intel_runtime_pm(>->i915->runtime_pm, wakeref) {
4677 u32 yield[] = {
4678 GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION,
4679 GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO,
4680 };
4681
4682 __guc_scheduling_policy_add_klv(&policy,
4683 GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD,
4684 yield, ARRAY_SIZE(yield));
4685
4686 ret = __guc_action_set_scheduling_policies(guc, &policy);
4687 }
4688
4689 return ret;
4690 }
4691
guc_route_semaphores(struct intel_guc * guc,bool to_guc)4692 static void guc_route_semaphores(struct intel_guc *guc, bool to_guc)
4693 {
4694 struct intel_gt *gt = guc_to_gt(guc);
4695 u32 val;
4696
4697 if (GRAPHICS_VER(gt->i915) < 12)
4698 return;
4699
4700 if (to_guc)
4701 val = GUC_SEM_INTR_ROUTE_TO_GUC | GUC_SEM_INTR_ENABLE_ALL;
4702 else
4703 val = 0;
4704
4705 intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val);
4706 }
4707
intel_guc_submission_enable(struct intel_guc * guc)4708 int intel_guc_submission_enable(struct intel_guc *guc)
4709 {
4710 int ret;
4711
4712 /* Semaphore interrupt enable and route to GuC */
4713 guc_route_semaphores(guc, true);
4714
4715 ret = guc_init_submission(guc);
4716 if (ret)
4717 goto fail_sem;
4718
4719 ret = guc_init_engine_stats(guc);
4720 if (ret)
4721 goto fail_sem;
4722
4723 ret = guc_init_global_schedule_policy(guc);
4724 if (ret)
4725 goto fail_stats;
4726
4727 return 0;
4728
4729 fail_stats:
4730 guc_fini_engine_stats(guc);
4731 fail_sem:
4732 guc_route_semaphores(guc, false);
4733 return ret;
4734 }
4735
4736 /* Note: By the time we're here, GuC may have already been reset */
intel_guc_submission_disable(struct intel_guc * guc)4737 void intel_guc_submission_disable(struct intel_guc *guc)
4738 {
4739 guc_cancel_busyness_worker(guc);
4740
4741 /* Semaphore interrupt disable and route to host */
4742 guc_route_semaphores(guc, false);
4743 }
4744
__guc_submission_supported(struct intel_guc * guc)4745 static bool __guc_submission_supported(struct intel_guc *guc)
4746 {
4747 /* GuC submission is unavailable for pre-Gen11 */
4748 return intel_guc_is_supported(guc) &&
4749 GRAPHICS_VER(guc_to_i915(guc)) >= 11;
4750 }
4751
__guc_submission_selected(struct intel_guc * guc)4752 static bool __guc_submission_selected(struct intel_guc *guc)
4753 {
4754 struct drm_i915_private *i915 = guc_to_i915(guc);
4755
4756 if (!intel_guc_submission_is_supported(guc))
4757 return false;
4758
4759 return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
4760 }
4761
intel_guc_sched_disable_gucid_threshold_max(struct intel_guc * guc)4762 int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc)
4763 {
4764 return guc->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc);
4765 }
4766
4767 /*
4768 * This default value of 33 milisecs (+1 milisec round up) ensures 30fps or higher
4769 * workloads are able to enjoy the latency reduction when delaying the schedule-disable
4770 * operation. This matches the 30fps game-render + encode (real world) workload this
4771 * knob was tested against.
4772 */
4773 #define SCHED_DISABLE_DELAY_MS 34
4774
4775 /*
4776 * A threshold of 75% is a reasonable starting point considering that real world apps
4777 * generally don't get anywhere near this.
4778 */
4779 #define NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(__guc) \
4780 (((intel_guc_sched_disable_gucid_threshold_max(guc)) * 3) / 4)
4781
intel_guc_submission_init_early(struct intel_guc * guc)4782 void intel_guc_submission_init_early(struct intel_guc *guc)
4783 {
4784 xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
4785
4786 spin_lock_init(&guc->submission_state.lock);
4787 INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
4788 ida_init(&guc->submission_state.guc_ids);
4789 INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
4790 INIT_WORK(&guc->submission_state.destroyed_worker,
4791 destroyed_worker_func);
4792 INIT_WORK(&guc->submission_state.reset_fail_worker,
4793 reset_fail_worker_func);
4794
4795 spin_lock_init(&guc->timestamp.lock);
4796 INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
4797
4798 guc->submission_state.sched_disable_delay_ms = SCHED_DISABLE_DELAY_MS;
4799 guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
4800 guc->submission_state.sched_disable_gucid_threshold =
4801 NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(guc);
4802 guc->submission_supported = __guc_submission_supported(guc);
4803 guc->submission_selected = __guc_submission_selected(guc);
4804 }
4805
4806 static inline struct intel_context *
g2h_context_lookup(struct intel_guc * guc,u32 ctx_id)4807 g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
4808 {
4809 struct intel_context *ce;
4810
4811 if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
4812 guc_err(guc, "Invalid ctx_id %u\n", ctx_id);
4813 return NULL;
4814 }
4815
4816 ce = __get_context(guc, ctx_id);
4817 if (unlikely(!ce)) {
4818 guc_err(guc, "Context is NULL, ctx_id %u\n", ctx_id);
4819 return NULL;
4820 }
4821
4822 if (unlikely(intel_context_is_child(ce))) {
4823 guc_err(guc, "Context is child, ctx_id %u\n", ctx_id);
4824 return NULL;
4825 }
4826
4827 return ce;
4828 }
4829
wait_wake_outstanding_tlb_g2h(struct intel_guc * guc,u32 seqno)4830 static void wait_wake_outstanding_tlb_g2h(struct intel_guc *guc, u32 seqno)
4831 {
4832 struct intel_guc_tlb_wait *wait;
4833 unsigned long flags;
4834
4835 xa_lock_irqsave(&guc->tlb_lookup, flags);
4836 wait = xa_load(&guc->tlb_lookup, seqno);
4837
4838 if (wait)
4839 wake_up(&wait->wq);
4840 else
4841 guc_dbg(guc,
4842 "Stale TLB invalidation response with seqno %d\n", seqno);
4843
4844 xa_unlock_irqrestore(&guc->tlb_lookup, flags);
4845 }
4846
intel_guc_tlb_invalidation_done(struct intel_guc * guc,const u32 * payload,u32 len)4847 int intel_guc_tlb_invalidation_done(struct intel_guc *guc,
4848 const u32 *payload, u32 len)
4849 {
4850 if (len < 1)
4851 return -EPROTO;
4852
4853 wait_wake_outstanding_tlb_g2h(guc, payload[0]);
4854 return 0;
4855 }
4856
must_wait_woken(struct wait_queue_entry * wq_entry,long timeout)4857 static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
4858 {
4859 /*
4860 * This is equivalent to wait_woken() with the exception that
4861 * we do not wake up early if the kthread task has been completed.
4862 * As we are called from page reclaim in any task context,
4863 * we may be invoked from stopped kthreads, but we *must*
4864 * complete the wait from the HW.
4865 */
4866 do {
4867 set_current_state(TASK_UNINTERRUPTIBLE);
4868 if (wq_entry->flags & WQ_FLAG_WOKEN)
4869 break;
4870
4871 timeout = schedule_timeout(timeout);
4872 } while (timeout);
4873
4874 /* See wait_woken() and woken_wake_function() */
4875 __set_current_state(TASK_RUNNING);
4876 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN);
4877
4878 return timeout;
4879 }
4880
intel_gt_is_enabled(const struct intel_gt * gt)4881 static bool intel_gt_is_enabled(const struct intel_gt *gt)
4882 {
4883 /* Check if GT is wedged or suspended */
4884 if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915))
4885 return false;
4886 return true;
4887 }
4888
guc_send_invalidate_tlb(struct intel_guc * guc,enum intel_guc_tlb_invalidation_type type)4889 static int guc_send_invalidate_tlb(struct intel_guc *guc,
4890 enum intel_guc_tlb_invalidation_type type)
4891 {
4892 struct intel_guc_tlb_wait _wq, *wq = &_wq;
4893 struct intel_gt *gt = guc_to_gt(guc);
4894 DEFINE_WAIT_FUNC(wait, woken_wake_function);
4895 int err;
4896 u32 seqno;
4897 u32 action[] = {
4898 INTEL_GUC_ACTION_TLB_INVALIDATION,
4899 0,
4900 REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
4901 REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
4902 INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
4903 INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
4904 };
4905 u32 size = ARRAY_SIZE(action);
4906
4907 /*
4908 * Early guard against GT enablement. TLB invalidation should not be
4909 * attempted if the GT is disabled due to suspend/wedge.
4910 */
4911 if (!intel_gt_is_enabled(gt))
4912 return -EINVAL;
4913
4914 init_waitqueue_head(&_wq.wq);
4915
4916 if (xa_alloc_cyclic_irq(&guc->tlb_lookup, &seqno, wq,
4917 xa_limit_32b, &guc->next_seqno,
4918 GFP_ATOMIC | __GFP_NOWARN) < 0) {
4919 /* Under severe memory pressure? Serialise TLB allocations */
4920 xa_lock_irq(&guc->tlb_lookup);
4921 wq = xa_load(&guc->tlb_lookup, guc->serial_slot);
4922 wait_event_lock_irq(wq->wq,
4923 !READ_ONCE(wq->busy),
4924 guc->tlb_lookup.xa_lock);
4925 /*
4926 * Update wq->busy under lock to ensure only one waiter can
4927 * issue the TLB invalidation command using the serial slot at a
4928 * time. The condition is set to true before releasing the lock
4929 * so that other caller continue to wait until woken up again.
4930 */
4931 wq->busy = true;
4932 xa_unlock_irq(&guc->tlb_lookup);
4933
4934 seqno = guc->serial_slot;
4935 }
4936
4937 action[1] = seqno;
4938
4939 add_wait_queue(&wq->wq, &wait);
4940
4941 /* This is a critical reclaim path and thus we must loop here. */
4942 err = intel_guc_send_busy_loop(guc, action, size, G2H_LEN_DW_INVALIDATE_TLB, true);
4943 if (err)
4944 goto out;
4945
4946 /*
4947 * Late guard against GT enablement. It is not an error for the TLB
4948 * invalidation to time out if the GT is disabled during the process
4949 * due to suspend/wedge. In fact, the TLB invalidation is cancelled
4950 * in this case.
4951 */
4952 if (!must_wait_woken(&wait, intel_guc_ct_max_queue_time_jiffies()) &&
4953 intel_gt_is_enabled(gt)) {
4954 guc_err(guc,
4955 "TLB invalidation response timed out for seqno %u\n", seqno);
4956 err = -ETIME;
4957 }
4958 out:
4959 remove_wait_queue(&wq->wq, &wait);
4960 if (seqno != guc->serial_slot)
4961 xa_erase_irq(&guc->tlb_lookup, seqno);
4962
4963 return err;
4964 }
4965
4966 /* Send a H2G command to invalidate the TLBs at engine level and beyond. */
intel_guc_invalidate_tlb_engines(struct intel_guc * guc)4967 int intel_guc_invalidate_tlb_engines(struct intel_guc *guc)
4968 {
4969 return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_ENGINES);
4970 }
4971
4972 /* Send a H2G command to invalidate the GuC's internal TLB. */
intel_guc_invalidate_tlb_guc(struct intel_guc * guc)4973 int intel_guc_invalidate_tlb_guc(struct intel_guc *guc)
4974 {
4975 return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
4976 }
4977
intel_guc_deregister_done_process_msg(struct intel_guc * guc,const u32 * msg,u32 len)4978 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
4979 const u32 *msg,
4980 u32 len)
4981 {
4982 struct intel_context *ce;
4983 u32 ctx_id;
4984
4985 if (unlikely(len < 1)) {
4986 guc_err(guc, "Invalid length %u\n", len);
4987 return -EPROTO;
4988 }
4989 ctx_id = msg[0];
4990
4991 ce = g2h_context_lookup(guc, ctx_id);
4992 if (unlikely(!ce))
4993 return -EPROTO;
4994
4995 trace_intel_context_deregister_done(ce);
4996
4997 #ifdef CONFIG_DRM_I915_SELFTEST
4998 if (unlikely(ce->drop_deregister)) {
4999 ce->drop_deregister = false;
5000 return 0;
5001 }
5002 #endif
5003
5004 if (context_wait_for_deregister_to_register(ce)) {
5005 struct intel_runtime_pm *runtime_pm =
5006 &ce->engine->gt->i915->runtime_pm;
5007 intel_wakeref_t wakeref;
5008
5009 /*
5010 * Previous owner of this guc_id has been deregistered, now safe
5011 * register this context.
5012 */
5013 with_intel_runtime_pm(runtime_pm, wakeref)
5014 register_context(ce, true);
5015 guc_signal_context_fence(ce);
5016 intel_context_put(ce);
5017 } else if (context_destroyed(ce)) {
5018 /* Context has been destroyed */
5019 intel_gt_pm_put_async_untracked(guc_to_gt(guc));
5020 release_guc_id(guc, ce);
5021 __guc_context_destroy(ce);
5022 }
5023
5024 decr_outstanding_submission_g2h(guc);
5025
5026 return 0;
5027 }
5028
intel_guc_sched_done_process_msg(struct intel_guc * guc,const u32 * msg,u32 len)5029 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
5030 const u32 *msg,
5031 u32 len)
5032 {
5033 struct intel_context *ce;
5034 unsigned long flags;
5035 u32 ctx_id;
5036
5037 if (unlikely(len < 2)) {
5038 guc_err(guc, "Invalid length %u\n", len);
5039 return -EPROTO;
5040 }
5041 ctx_id = msg[0];
5042
5043 ce = g2h_context_lookup(guc, ctx_id);
5044 if (unlikely(!ce))
5045 return -EPROTO;
5046
5047 if (unlikely(context_destroyed(ce) ||
5048 (!context_pending_enable(ce) &&
5049 !context_pending_disable(ce)))) {
5050 guc_err(guc, "Bad context sched_state 0x%x, ctx_id %u\n",
5051 ce->guc_state.sched_state, ctx_id);
5052 return -EPROTO;
5053 }
5054
5055 trace_intel_context_sched_done(ce);
5056
5057 if (context_pending_enable(ce)) {
5058 #ifdef CONFIG_DRM_I915_SELFTEST
5059 if (unlikely(ce->drop_schedule_enable)) {
5060 ce->drop_schedule_enable = false;
5061 return 0;
5062 }
5063 #endif
5064
5065 spin_lock_irqsave(&ce->guc_state.lock, flags);
5066 clr_context_pending_enable(ce);
5067 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
5068 } else if (context_pending_disable(ce)) {
5069 bool banned;
5070
5071 #ifdef CONFIG_DRM_I915_SELFTEST
5072 if (unlikely(ce->drop_schedule_disable)) {
5073 ce->drop_schedule_disable = false;
5074 return 0;
5075 }
5076 #endif
5077
5078 /*
5079 * Unpin must be done before __guc_signal_context_fence,
5080 * otherwise a race exists between the requests getting
5081 * submitted + retired before this unpin completes resulting in
5082 * the pin_count going to zero and the context still being
5083 * enabled.
5084 */
5085 intel_context_sched_disable_unpin(ce);
5086
5087 spin_lock_irqsave(&ce->guc_state.lock, flags);
5088 banned = context_banned(ce);
5089 clr_context_banned(ce);
5090 clr_context_pending_disable(ce);
5091 __guc_signal_context_fence(ce);
5092 guc_blocked_fence_complete(ce);
5093 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
5094
5095 if (banned) {
5096 guc_cancel_context_requests(ce);
5097 intel_engine_signal_breadcrumbs(ce->engine);
5098 }
5099 }
5100
5101 decr_outstanding_submission_g2h(guc);
5102 intel_context_put(ce);
5103
5104 return 0;
5105 }
5106
capture_error_state(struct intel_guc * guc,struct intel_context * ce)5107 static void capture_error_state(struct intel_guc *guc,
5108 struct intel_context *ce)
5109 {
5110 struct intel_gt *gt = guc_to_gt(guc);
5111 struct drm_i915_private *i915 = gt->i915;
5112 intel_wakeref_t wakeref;
5113 intel_engine_mask_t engine_mask;
5114
5115 if (intel_engine_is_virtual(ce->engine)) {
5116 struct intel_engine_cs *e;
5117 intel_engine_mask_t tmp, virtual_mask = ce->engine->mask;
5118
5119 engine_mask = 0;
5120 for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) {
5121 bool match = intel_guc_capture_is_matching_engine(gt, ce, e);
5122
5123 if (match) {
5124 intel_engine_set_hung_context(e, ce);
5125 engine_mask |= e->mask;
5126 i915_increase_reset_engine_count(&i915->gpu_error,
5127 e);
5128 }
5129 }
5130
5131 if (!engine_mask) {
5132 guc_warn(guc, "No matching physical engine capture for virtual engine context 0x%04X / %s",
5133 ce->guc_id.id, ce->engine->name);
5134 engine_mask = ~0U;
5135 }
5136 } else {
5137 intel_engine_set_hung_context(ce->engine, ce);
5138 engine_mask = ce->engine->mask;
5139 i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
5140 }
5141
5142 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
5143 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
5144 }
5145
guc_context_replay(struct intel_context * ce)5146 static void guc_context_replay(struct intel_context *ce)
5147 {
5148 struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
5149
5150 __guc_reset_context(ce, ce->engine->mask);
5151 tasklet_hi_schedule(&sched_engine->tasklet);
5152 }
5153
guc_handle_context_reset(struct intel_guc * guc,struct intel_context * ce)5154 static void guc_handle_context_reset(struct intel_guc *guc,
5155 struct intel_context *ce)
5156 {
5157 bool capture = intel_context_is_schedulable(ce);
5158
5159 trace_intel_context_reset(ce);
5160
5161 guc_dbg(guc, "%s context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
5162 capture ? "Got" : "Ignoring",
5163 ce->guc_id.id, ce->engine->name,
5164 str_yes_no(intel_context_is_exiting(ce)),
5165 str_yes_no(intel_context_is_banned(ce)));
5166
5167 if (capture) {
5168 capture_error_state(guc, ce);
5169 guc_context_replay(ce);
5170 }
5171 }
5172
intel_guc_context_reset_process_msg(struct intel_guc * guc,const u32 * msg,u32 len)5173 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
5174 const u32 *msg, u32 len)
5175 {
5176 struct intel_context *ce;
5177 unsigned long flags;
5178 int ctx_id;
5179
5180 if (unlikely(len != 1)) {
5181 guc_err(guc, "Invalid length %u", len);
5182 return -EPROTO;
5183 }
5184
5185 ctx_id = msg[0];
5186
5187 /*
5188 * The context lookup uses the xarray but lookups only require an RCU lock
5189 * not the full spinlock. So take the lock explicitly and keep it until the
5190 * context has been reference count locked to ensure it can't be destroyed
5191 * asynchronously until the reset is done.
5192 */
5193 xa_lock_irqsave(&guc->context_lookup, flags);
5194 ce = g2h_context_lookup(guc, ctx_id);
5195 if (ce)
5196 intel_context_get(ce);
5197 xa_unlock_irqrestore(&guc->context_lookup, flags);
5198
5199 if (unlikely(!ce))
5200 return -EPROTO;
5201
5202 guc_handle_context_reset(guc, ce);
5203 intel_context_put(ce);
5204
5205 return 0;
5206 }
5207
intel_guc_error_capture_process_msg(struct intel_guc * guc,const u32 * msg,u32 len)5208 int intel_guc_error_capture_process_msg(struct intel_guc *guc,
5209 const u32 *msg, u32 len)
5210 {
5211 u32 status;
5212
5213 if (unlikely(len != 1)) {
5214 guc_dbg(guc, "Invalid length %u", len);
5215 return -EPROTO;
5216 }
5217
5218 status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
5219 if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
5220 guc_warn(guc, "No space for error capture");
5221
5222 intel_guc_capture_process(guc);
5223
5224 return 0;
5225 }
5226
5227 struct intel_engine_cs *
intel_guc_lookup_engine(struct intel_guc * guc,u8 guc_class,u8 instance)5228 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
5229 {
5230 struct intel_gt *gt = guc_to_gt(guc);
5231 u8 engine_class = guc_class_to_engine_class(guc_class);
5232
5233 /* Class index is checked in class converter */
5234 GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
5235
5236 return gt->engine_class[engine_class][instance];
5237 }
5238
reset_fail_worker_func(struct work_struct * w)5239 static void reset_fail_worker_func(struct work_struct *w)
5240 {
5241 struct intel_guc *guc = container_of(w, struct intel_guc,
5242 submission_state.reset_fail_worker);
5243 struct intel_gt *gt = guc_to_gt(guc);
5244 intel_engine_mask_t reset_fail_mask;
5245 unsigned long flags;
5246
5247 spin_lock_irqsave(&guc->submission_state.lock, flags);
5248 reset_fail_mask = guc->submission_state.reset_fail_mask;
5249 guc->submission_state.reset_fail_mask = 0;
5250 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
5251
5252 if (likely(reset_fail_mask)) {
5253 struct intel_engine_cs *engine;
5254 enum intel_engine_id id;
5255
5256 /*
5257 * GuC is toast at this point - it dead loops after sending the failed
5258 * reset notification. So need to manually determine the guilty context.
5259 * Note that it should be reliable to do this here because the GuC is
5260 * toast and will not be scheduling behind the KMD's back.
5261 */
5262 for_each_engine_masked(engine, gt, reset_fail_mask, id)
5263 intel_guc_find_hung_context(engine);
5264
5265 intel_gt_handle_error(gt, reset_fail_mask,
5266 I915_ERROR_CAPTURE,
5267 "GuC failed to reset engine mask=0x%x",
5268 reset_fail_mask);
5269 }
5270 }
5271
intel_guc_engine_failure_process_msg(struct intel_guc * guc,const u32 * msg,u32 len)5272 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
5273 const u32 *msg, u32 len)
5274 {
5275 struct intel_engine_cs *engine;
5276 u8 guc_class, instance;
5277 u32 reason;
5278 unsigned long flags;
5279
5280 if (unlikely(len != 3)) {
5281 guc_err(guc, "Invalid length %u", len);
5282 return -EPROTO;
5283 }
5284
5285 guc_class = msg[0];
5286 instance = msg[1];
5287 reason = msg[2];
5288
5289 engine = intel_guc_lookup_engine(guc, guc_class, instance);
5290 if (unlikely(!engine)) {
5291 guc_err(guc, "Invalid engine %d:%d", guc_class, instance);
5292 return -EPROTO;
5293 }
5294
5295 /*
5296 * This is an unexpected failure of a hardware feature. So, log a real
5297 * error message not just the informational that comes with the reset.
5298 */
5299 guc_err(guc, "Engine reset failed on %d:%d (%s) because 0x%08X",
5300 guc_class, instance, engine->name, reason);
5301
5302 spin_lock_irqsave(&guc->submission_state.lock, flags);
5303 guc->submission_state.reset_fail_mask |= engine->mask;
5304 spin_unlock_irqrestore(&guc->submission_state.lock, flags);
5305
5306 /*
5307 * A GT reset flushes this worker queue (G2H handler) so we must use
5308 * another worker to trigger a GT reset.
5309 */
5310 queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
5311
5312 return 0;
5313 }
5314
intel_guc_find_hung_context(struct intel_engine_cs * engine)5315 void intel_guc_find_hung_context(struct intel_engine_cs *engine)
5316 {
5317 struct intel_guc *guc = gt_to_guc(engine->gt);
5318 struct intel_context *ce;
5319 struct i915_request *rq;
5320 unsigned long index;
5321 unsigned long flags;
5322
5323 /* Reset called during driver load? GuC not yet initialised! */
5324 if (unlikely(!guc_submission_initialized(guc)))
5325 return;
5326
5327 xa_lock_irqsave(&guc->context_lookup, flags);
5328 xa_for_each(&guc->context_lookup, index, ce) {
5329 bool found;
5330
5331 if (!kref_get_unless_zero(&ce->ref))
5332 continue;
5333
5334 xa_unlock(&guc->context_lookup);
5335
5336 if (!intel_context_is_pinned(ce))
5337 goto next;
5338
5339 if (intel_engine_is_virtual(ce->engine)) {
5340 if (!(ce->engine->mask & engine->mask))
5341 goto next;
5342 } else {
5343 if (ce->engine != engine)
5344 goto next;
5345 }
5346
5347 found = false;
5348 spin_lock(&ce->guc_state.lock);
5349 list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
5350 if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
5351 continue;
5352
5353 found = true;
5354 break;
5355 }
5356 spin_unlock(&ce->guc_state.lock);
5357
5358 if (found) {
5359 intel_engine_set_hung_context(engine, ce);
5360
5361 /* Can only cope with one hang at a time... */
5362 intel_context_put(ce);
5363 xa_lock(&guc->context_lookup);
5364 goto done;
5365 }
5366
5367 next:
5368 intel_context_put(ce);
5369 xa_lock(&guc->context_lookup);
5370 }
5371 done:
5372 xa_unlock_irqrestore(&guc->context_lookup, flags);
5373 }
5374
intel_guc_dump_active_requests(struct intel_engine_cs * engine,struct i915_request * hung_rq,struct drm_printer * m)5375 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
5376 struct i915_request *hung_rq,
5377 struct drm_printer *m)
5378 {
5379 struct intel_guc *guc = gt_to_guc(engine->gt);
5380 struct intel_context *ce;
5381 unsigned long index;
5382 unsigned long flags;
5383
5384 /* Reset called during driver load? GuC not yet initialised! */
5385 if (unlikely(!guc_submission_initialized(guc)))
5386 return;
5387
5388 xa_lock_irqsave(&guc->context_lookup, flags);
5389 xa_for_each(&guc->context_lookup, index, ce) {
5390 if (!kref_get_unless_zero(&ce->ref))
5391 continue;
5392
5393 xa_unlock(&guc->context_lookup);
5394
5395 if (!intel_context_is_pinned(ce))
5396 goto next;
5397
5398 if (intel_engine_is_virtual(ce->engine)) {
5399 if (!(ce->engine->mask & engine->mask))
5400 goto next;
5401 } else {
5402 if (ce->engine != engine)
5403 goto next;
5404 }
5405
5406 spin_lock(&ce->guc_state.lock);
5407 intel_engine_dump_active_requests(&ce->guc_state.requests,
5408 hung_rq, m);
5409 spin_unlock(&ce->guc_state.lock);
5410
5411 next:
5412 intel_context_put(ce);
5413 xa_lock(&guc->context_lookup);
5414 }
5415 xa_unlock_irqrestore(&guc->context_lookup, flags);
5416 }
5417
intel_guc_submission_print_info(struct intel_guc * guc,struct drm_printer * p)5418 void intel_guc_submission_print_info(struct intel_guc *guc,
5419 struct drm_printer *p)
5420 {
5421 struct i915_sched_engine *sched_engine = guc->sched_engine;
5422 struct rb_node *rb;
5423 unsigned long flags;
5424
5425 if (!sched_engine)
5426 return;
5427
5428 drm_printf(p, "GuC Submission API Version: %d.%d.%d\n",
5429 guc->submission_version.major, guc->submission_version.minor,
5430 guc->submission_version.patch);
5431 drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
5432 atomic_read(&guc->outstanding_submission_g2h));
5433 drm_printf(p, "GuC tasklet count: %u\n",
5434 atomic_read(&sched_engine->tasklet.count));
5435
5436 spin_lock_irqsave(&sched_engine->lock, flags);
5437 drm_printf(p, "Requests in GuC submit tasklet:\n");
5438 for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
5439 struct i915_priolist *pl = to_priolist(rb);
5440 struct i915_request *rq;
5441
5442 priolist_for_each_request(rq, pl)
5443 drm_printf(p, "guc_id=%u, seqno=%llu\n",
5444 rq->context->guc_id.id,
5445 rq->fence.seqno);
5446 }
5447 spin_unlock_irqrestore(&sched_engine->lock, flags);
5448 drm_printf(p, "\n");
5449 }
5450
guc_log_context_priority(struct drm_printer * p,struct intel_context * ce)5451 static inline void guc_log_context_priority(struct drm_printer *p,
5452 struct intel_context *ce)
5453 {
5454 int i;
5455
5456 drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
5457 drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
5458 for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
5459 i < GUC_CLIENT_PRIORITY_NUM; ++i) {
5460 drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
5461 i, ce->guc_state.prio_count[i]);
5462 }
5463 drm_printf(p, "\n");
5464 }
5465
guc_log_context(struct drm_printer * p,struct intel_context * ce)5466 static inline void guc_log_context(struct drm_printer *p,
5467 struct intel_context *ce)
5468 {
5469 drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
5470 drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
5471 drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
5472 ce->ring->head,
5473 ce->lrc_reg_state[CTX_RING_HEAD]);
5474 drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
5475 ce->ring->tail,
5476 ce->lrc_reg_state[CTX_RING_TAIL]);
5477 drm_printf(p, "\t\tContext Pin Count: %u\n",
5478 atomic_read(&ce->pin_count));
5479 drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
5480 atomic_read(&ce->guc_id.ref));
5481 drm_printf(p, "\t\tSchedule State: 0x%x\n",
5482 ce->guc_state.sched_state);
5483 }
5484
intel_guc_submission_print_context_info(struct intel_guc * guc,struct drm_printer * p)5485 void intel_guc_submission_print_context_info(struct intel_guc *guc,
5486 struct drm_printer *p)
5487 {
5488 struct intel_context *ce;
5489 unsigned long index;
5490 unsigned long flags;
5491
5492 xa_lock_irqsave(&guc->context_lookup, flags);
5493 xa_for_each(&guc->context_lookup, index, ce) {
5494 GEM_BUG_ON(intel_context_is_child(ce));
5495
5496 guc_log_context(p, ce);
5497 guc_log_context_priority(p, ce);
5498
5499 if (intel_context_is_parent(ce)) {
5500 struct intel_context *child;
5501
5502 drm_printf(p, "\t\tNumber children: %u\n",
5503 ce->parallel.number_children);
5504
5505 if (ce->parallel.guc.wq_status) {
5506 drm_printf(p, "\t\tWQI Head: %u\n",
5507 READ_ONCE(*ce->parallel.guc.wq_head));
5508 drm_printf(p, "\t\tWQI Tail: %u\n",
5509 READ_ONCE(*ce->parallel.guc.wq_tail));
5510 drm_printf(p, "\t\tWQI Status: %u\n",
5511 READ_ONCE(*ce->parallel.guc.wq_status));
5512 }
5513
5514 if (ce->engine->emit_bb_start ==
5515 emit_bb_start_parent_no_preempt_mid_batch) {
5516 u8 i;
5517
5518 drm_printf(p, "\t\tChildren Go: %u\n",
5519 get_children_go_value(ce));
5520 for (i = 0; i < ce->parallel.number_children; ++i)
5521 drm_printf(p, "\t\tChildren Join: %u\n",
5522 get_children_join_value(ce, i));
5523 }
5524
5525 for_each_child(ce, child)
5526 guc_log_context(p, child);
5527 }
5528 }
5529 xa_unlock_irqrestore(&guc->context_lookup, flags);
5530 }
5531
get_children_go_addr(struct intel_context * ce)5532 static inline u32 get_children_go_addr(struct intel_context *ce)
5533 {
5534 GEM_BUG_ON(!intel_context_is_parent(ce));
5535
5536 return i915_ggtt_offset(ce->state) +
5537 __get_parent_scratch_offset(ce) +
5538 offsetof(struct parent_scratch, go.semaphore);
5539 }
5540
get_children_join_addr(struct intel_context * ce,u8 child_index)5541 static inline u32 get_children_join_addr(struct intel_context *ce,
5542 u8 child_index)
5543 {
5544 GEM_BUG_ON(!intel_context_is_parent(ce));
5545
5546 return i915_ggtt_offset(ce->state) +
5547 __get_parent_scratch_offset(ce) +
5548 offsetof(struct parent_scratch, join[child_index].semaphore);
5549 }
5550
5551 #define PARENT_GO_BB 1
5552 #define PARENT_GO_FINI_BREADCRUMB 0
5553 #define CHILD_GO_BB 1
5554 #define CHILD_GO_FINI_BREADCRUMB 0
emit_bb_start_parent_no_preempt_mid_batch(struct i915_request * rq,u64 offset,u32 len,const unsigned int flags)5555 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
5556 u64 offset, u32 len,
5557 const unsigned int flags)
5558 {
5559 struct intel_context *ce = rq->context;
5560 u32 *cs;
5561 u8 i;
5562
5563 GEM_BUG_ON(!intel_context_is_parent(ce));
5564
5565 cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
5566 if (IS_ERR(cs))
5567 return PTR_ERR(cs);
5568
5569 /* Wait on children */
5570 for (i = 0; i < ce->parallel.number_children; ++i) {
5571 *cs++ = (MI_SEMAPHORE_WAIT |
5572 MI_SEMAPHORE_GLOBAL_GTT |
5573 MI_SEMAPHORE_POLL |
5574 MI_SEMAPHORE_SAD_EQ_SDD);
5575 *cs++ = PARENT_GO_BB;
5576 *cs++ = get_children_join_addr(ce, i);
5577 *cs++ = 0;
5578 }
5579
5580 /* Turn off preemption */
5581 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
5582 *cs++ = MI_NOOP;
5583
5584 /* Tell children go */
5585 cs = gen8_emit_ggtt_write(cs,
5586 CHILD_GO_BB,
5587 get_children_go_addr(ce),
5588 0);
5589
5590 /* Jump to batch */
5591 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
5592 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
5593 *cs++ = lower_32_bits(offset);
5594 *cs++ = upper_32_bits(offset);
5595 *cs++ = MI_NOOP;
5596
5597 intel_ring_advance(rq, cs);
5598
5599 return 0;
5600 }
5601
emit_bb_start_child_no_preempt_mid_batch(struct i915_request * rq,u64 offset,u32 len,const unsigned int flags)5602 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
5603 u64 offset, u32 len,
5604 const unsigned int flags)
5605 {
5606 struct intel_context *ce = rq->context;
5607 struct intel_context *parent = intel_context_to_parent(ce);
5608 u32 *cs;
5609
5610 GEM_BUG_ON(!intel_context_is_child(ce));
5611
5612 cs = intel_ring_begin(rq, 12);
5613 if (IS_ERR(cs))
5614 return PTR_ERR(cs);
5615
5616 /* Signal parent */
5617 cs = gen8_emit_ggtt_write(cs,
5618 PARENT_GO_BB,
5619 get_children_join_addr(parent,
5620 ce->parallel.child_index),
5621 0);
5622
5623 /* Wait on parent for go */
5624 *cs++ = (MI_SEMAPHORE_WAIT |
5625 MI_SEMAPHORE_GLOBAL_GTT |
5626 MI_SEMAPHORE_POLL |
5627 MI_SEMAPHORE_SAD_EQ_SDD);
5628 *cs++ = CHILD_GO_BB;
5629 *cs++ = get_children_go_addr(parent);
5630 *cs++ = 0;
5631
5632 /* Turn off preemption */
5633 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
5634
5635 /* Jump to batch */
5636 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
5637 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
5638 *cs++ = lower_32_bits(offset);
5639 *cs++ = upper_32_bits(offset);
5640
5641 intel_ring_advance(rq, cs);
5642
5643 return 0;
5644 }
5645
5646 static u32 *
__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request * rq,u32 * cs)5647 __emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
5648 u32 *cs)
5649 {
5650 struct intel_context *ce = rq->context;
5651 u8 i;
5652
5653 GEM_BUG_ON(!intel_context_is_parent(ce));
5654
5655 /* Wait on children */
5656 for (i = 0; i < ce->parallel.number_children; ++i) {
5657 *cs++ = (MI_SEMAPHORE_WAIT |
5658 MI_SEMAPHORE_GLOBAL_GTT |
5659 MI_SEMAPHORE_POLL |
5660 MI_SEMAPHORE_SAD_EQ_SDD);
5661 *cs++ = PARENT_GO_FINI_BREADCRUMB;
5662 *cs++ = get_children_join_addr(ce, i);
5663 *cs++ = 0;
5664 }
5665
5666 /* Turn on preemption */
5667 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
5668 *cs++ = MI_NOOP;
5669
5670 /* Tell children go */
5671 cs = gen8_emit_ggtt_write(cs,
5672 CHILD_GO_FINI_BREADCRUMB,
5673 get_children_go_addr(ce),
5674 0);
5675
5676 return cs;
5677 }
5678
5679 /*
5680 * If this true, a submission of multi-lrc requests had an error and the
5681 * requests need to be skipped. The front end (execuf IOCTL) should've called
5682 * i915_request_skip which squashes the BB but we still need to emit the fini
5683 * breadrcrumbs seqno write. At this point we don't know how many of the
5684 * requests in the multi-lrc submission were generated so we can't do the
5685 * handshake between the parent and children (e.g. if 4 requests should be
5686 * generated but 2nd hit an error only 1 would be seen by the GuC backend).
5687 * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
5688 * has occurred on any of the requests in submission / relationship.
5689 */
skip_handshake(struct i915_request * rq)5690 static inline bool skip_handshake(struct i915_request *rq)
5691 {
5692 return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
5693 }
5694
5695 #define NON_SKIP_LEN 6
5696 static u32 *
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request * rq,u32 * cs)5697 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
5698 u32 *cs)
5699 {
5700 struct intel_context *ce = rq->context;
5701 __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
5702 __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
5703
5704 GEM_BUG_ON(!intel_context_is_parent(ce));
5705
5706 if (unlikely(skip_handshake(rq))) {
5707 /*
5708 * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
5709 * the NON_SKIP_LEN comes from the length of the emits below.
5710 */
5711 memset(cs, 0, sizeof(u32) *
5712 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
5713 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
5714 } else {
5715 cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
5716 }
5717
5718 /* Emit fini breadcrumb */
5719 before_fini_breadcrumb_user_interrupt_cs = cs;
5720 cs = gen8_emit_ggtt_write(cs,
5721 rq->fence.seqno,
5722 i915_request_active_timeline(rq)->hwsp_offset,
5723 0);
5724
5725 /* User interrupt */
5726 *cs++ = MI_USER_INTERRUPT;
5727 *cs++ = MI_NOOP;
5728
5729 /* Ensure our math for skip + emit is correct */
5730 GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
5731 cs);
5732 GEM_BUG_ON(start_fini_breadcrumb_cs +
5733 ce->engine->emit_fini_breadcrumb_dw != cs);
5734
5735 rq->tail = intel_ring_offset(rq, cs);
5736
5737 return cs;
5738 }
5739
5740 static u32 *
__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request * rq,u32 * cs)5741 __emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
5742 u32 *cs)
5743 {
5744 struct intel_context *ce = rq->context;
5745 struct intel_context *parent = intel_context_to_parent(ce);
5746
5747 GEM_BUG_ON(!intel_context_is_child(ce));
5748
5749 /* Turn on preemption */
5750 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
5751 *cs++ = MI_NOOP;
5752
5753 /* Signal parent */
5754 cs = gen8_emit_ggtt_write(cs,
5755 PARENT_GO_FINI_BREADCRUMB,
5756 get_children_join_addr(parent,
5757 ce->parallel.child_index),
5758 0);
5759
5760 /* Wait parent on for go */
5761 *cs++ = (MI_SEMAPHORE_WAIT |
5762 MI_SEMAPHORE_GLOBAL_GTT |
5763 MI_SEMAPHORE_POLL |
5764 MI_SEMAPHORE_SAD_EQ_SDD);
5765 *cs++ = CHILD_GO_FINI_BREADCRUMB;
5766 *cs++ = get_children_go_addr(parent);
5767 *cs++ = 0;
5768
5769 return cs;
5770 }
5771
5772 static u32 *
emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request * rq,u32 * cs)5773 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
5774 u32 *cs)
5775 {
5776 struct intel_context *ce = rq->context;
5777 __maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
5778 __maybe_unused u32 *start_fini_breadcrumb_cs = cs;
5779
5780 GEM_BUG_ON(!intel_context_is_child(ce));
5781
5782 if (unlikely(skip_handshake(rq))) {
5783 /*
5784 * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
5785 * the NON_SKIP_LEN comes from the length of the emits below.
5786 */
5787 memset(cs, 0, sizeof(u32) *
5788 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
5789 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
5790 } else {
5791 cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
5792 }
5793
5794 /* Emit fini breadcrumb */
5795 before_fini_breadcrumb_user_interrupt_cs = cs;
5796 cs = gen8_emit_ggtt_write(cs,
5797 rq->fence.seqno,
5798 i915_request_active_timeline(rq)->hwsp_offset,
5799 0);
5800
5801 /* User interrupt */
5802 *cs++ = MI_USER_INTERRUPT;
5803 *cs++ = MI_NOOP;
5804
5805 /* Ensure our math for skip + emit is correct */
5806 GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
5807 cs);
5808 GEM_BUG_ON(start_fini_breadcrumb_cs +
5809 ce->engine->emit_fini_breadcrumb_dw != cs);
5810
5811 rq->tail = intel_ring_offset(rq, cs);
5812
5813 return cs;
5814 }
5815
5816 #undef NON_SKIP_LEN
5817
5818 static struct intel_context *
guc_create_virtual(struct intel_engine_cs ** siblings,unsigned int count,unsigned long flags)5819 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
5820 unsigned long flags)
5821 {
5822 struct guc_virtual_engine *ve;
5823 struct intel_guc *guc;
5824 unsigned int n;
5825 int err;
5826
5827 ve = kzalloc(sizeof(*ve), GFP_KERNEL);
5828 if (!ve)
5829 return ERR_PTR(-ENOMEM);
5830
5831 guc = gt_to_guc(siblings[0]->gt);
5832
5833 ve->base.i915 = siblings[0]->i915;
5834 ve->base.gt = siblings[0]->gt;
5835 ve->base.uncore = siblings[0]->uncore;
5836 ve->base.id = -1;
5837
5838 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
5839 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
5840 ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
5841 ve->base.saturated = ALL_ENGINES;
5842
5843 snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
5844
5845 ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
5846
5847 ve->base.cops = &virtual_guc_context_ops;
5848 ve->base.request_alloc = guc_request_alloc;
5849 ve->base.bump_serial = virtual_guc_bump_serial;
5850
5851 ve->base.submit_request = guc_submit_request;
5852
5853 ve->base.flags = I915_ENGINE_IS_VIRTUAL;
5854
5855 BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
5856 ve->base.mask = VIRTUAL_ENGINES;
5857
5858 intel_context_init(&ve->context, &ve->base);
5859
5860 for (n = 0; n < count; n++) {
5861 struct intel_engine_cs *sibling = siblings[n];
5862
5863 GEM_BUG_ON(!is_power_of_2(sibling->mask));
5864 if (sibling->mask & ve->base.mask) {
5865 guc_dbg(guc, "duplicate %s entry in load balancer\n",
5866 sibling->name);
5867 err = -EINVAL;
5868 goto err_put;
5869 }
5870
5871 ve->base.mask |= sibling->mask;
5872 ve->base.logical_mask |= sibling->logical_mask;
5873
5874 if (n != 0 && ve->base.class != sibling->class) {
5875 guc_dbg(guc, "invalid mixing of engine class, sibling %d, already %d\n",
5876 sibling->class, ve->base.class);
5877 err = -EINVAL;
5878 goto err_put;
5879 } else if (n == 0) {
5880 ve->base.class = sibling->class;
5881 ve->base.uabi_class = sibling->uabi_class;
5882 snprintf(ve->base.name, sizeof(ve->base.name),
5883 "v%dx%d", ve->base.class, count);
5884 ve->base.context_size = sibling->context_size;
5885
5886 ve->base.add_active_request =
5887 sibling->add_active_request;
5888 ve->base.remove_active_request =
5889 sibling->remove_active_request;
5890 ve->base.emit_bb_start = sibling->emit_bb_start;
5891 ve->base.emit_flush = sibling->emit_flush;
5892 ve->base.emit_init_breadcrumb =
5893 sibling->emit_init_breadcrumb;
5894 ve->base.emit_fini_breadcrumb =
5895 sibling->emit_fini_breadcrumb;
5896 ve->base.emit_fini_breadcrumb_dw =
5897 sibling->emit_fini_breadcrumb_dw;
5898 ve->base.breadcrumbs =
5899 intel_breadcrumbs_get(sibling->breadcrumbs);
5900
5901 ve->base.flags |= sibling->flags;
5902
5903 ve->base.props.timeslice_duration_ms =
5904 sibling->props.timeslice_duration_ms;
5905 ve->base.props.preempt_timeout_ms =
5906 sibling->props.preempt_timeout_ms;
5907 }
5908 }
5909
5910 return &ve->context;
5911
5912 err_put:
5913 intel_context_put(&ve->context);
5914 return ERR_PTR(err);
5915 }
5916
intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs * ve)5917 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
5918 {
5919 struct intel_engine_cs *engine;
5920 intel_engine_mask_t tmp, mask = ve->mask;
5921
5922 for_each_engine_masked(engine, ve->gt, mask, tmp)
5923 if (READ_ONCE(engine->props.heartbeat_interval_ms))
5924 return true;
5925
5926 return false;
5927 }
5928
5929 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5930 #include "selftest_guc.c"
5931 #include "selftest_guc_multi_lrc.c"
5932 #include "selftest_guc_hangcheck.c"
5933 #endif
5934