1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014 Intel Corporation
4 */
5
6 #include <linux/circ_buf.h>
7
8 #include "gem/i915_gem_context.h"
9 #include "gt/gen8_engine_cs.h"
10 #include "gt/intel_breadcrumbs.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_engine_pm.h"
13 #include "gt/intel_gt.h"
14 #include "gt/intel_gt_pm.h"
15 #include "gt/intel_lrc.h"
16 #include "gt/intel_mocs.h"
17 #include "gt/intel_ring.h"
18
19 #include "intel_guc_submission.h"
20
21 #include "i915_drv.h"
22 #include "i915_trace.h"
23
24 /**
25 * DOC: GuC-based command submission
26 *
27 * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
28 * firmware is moving to an updated submission interface and we plan to
29 * turn submission back on when that lands. The below documentation (and related
30 * code) matches the old submission model and will be updated as part of the
31 * upgrade to the new flow.
32 *
33 * GuC stage descriptor:
34 * During initialization, the driver allocates a static pool of 1024 such
35 * descriptors, and shares them with the GuC. Currently, we only use one
36 * descriptor. This stage descriptor lets the GuC know about the workqueue and
37 * process descriptor. Theoretically, it also lets the GuC know about our HW
38 * contexts (context ID, etc...), but we actually employ a kind of submission
39 * where the GuC uses the LRCA sent via the work item instead. This is called
40 * a "proxy" submission.
41 *
42 * The Scratch registers:
43 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
44 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
45 * triggers an interrupt on the GuC via another register write (0xC4C8).
46 * Firmware writes a success/fail code back to the action register after
47 * processes the request. The kernel driver polls waiting for this update and
48 * then proceeds.
49 *
50 * Work Items:
51 * There are several types of work items that the host may place into a
52 * workqueue, each with its own requirements and limitations. Currently only
53 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
54 * represents in-order queue. The kernel driver packs ring tail pointer and an
55 * ELSP context descriptor dword into Work Item.
56 * See guc_add_request()
57 *
58 */
59
60 #define GUC_REQUEST_SIZE 64 /* bytes */
61
to_priolist(struct rb_node * rb)62 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
63 {
64 return rb_entry(rb, struct i915_priolist, node);
65 }
66
__get_stage_desc(struct intel_guc * guc,u32 id)67 static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
68 {
69 struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
70
71 return &base[id];
72 }
73
guc_stage_desc_pool_create(struct intel_guc * guc)74 static int guc_stage_desc_pool_create(struct intel_guc *guc)
75 {
76 u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
77 GUC_MAX_STAGE_DESCRIPTORS);
78
79 return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
80 &guc->stage_desc_pool_vaddr);
81 }
82
guc_stage_desc_pool_destroy(struct intel_guc * guc)83 static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
84 {
85 i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
86 }
87
88 /*
89 * Initialise/clear the stage descriptor shared with the GuC firmware.
90 *
91 * This descriptor tells the GuC where (in GGTT space) to find the important
92 * data structures related to work submission (process descriptor, write queue,
93 * etc).
94 */
guc_stage_desc_init(struct intel_guc * guc)95 static void guc_stage_desc_init(struct intel_guc *guc)
96 {
97 struct guc_stage_desc *desc;
98
99 /* we only use 1 stage desc, so hardcode it to 0 */
100 desc = __get_stage_desc(guc, 0);
101 memset(desc, 0, sizeof(*desc));
102
103 desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
104 GUC_STAGE_DESC_ATTR_KERNEL;
105
106 desc->stage_id = 0;
107 desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
108
109 desc->wq_size = GUC_WQ_SIZE;
110 }
111
guc_stage_desc_fini(struct intel_guc * guc)112 static void guc_stage_desc_fini(struct intel_guc *guc)
113 {
114 struct guc_stage_desc *desc;
115
116 desc = __get_stage_desc(guc, 0);
117 memset(desc, 0, sizeof(*desc));
118 }
119
guc_add_request(struct intel_guc * guc,struct i915_request * rq)120 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
121 {
122 /* Leaving stub as this function will be used in future patches */
123 }
124
125 /*
126 * When we're doing submissions using regular execlists backend, writing to
127 * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
128 * pinned in mappable aperture portion of GGTT are visible to command streamer.
129 * Writes done by GuC on our behalf are not guaranteeing such ordering,
130 * therefore, to ensure the flush, we're issuing a POSTING READ.
131 */
flush_ggtt_writes(struct i915_vma * vma)132 static void flush_ggtt_writes(struct i915_vma *vma)
133 {
134 if (i915_vma_is_map_and_fenceable(vma))
135 intel_uncore_posting_read_fw(vma->vm->gt->uncore,
136 GUC_STATUS);
137 }
138
guc_submit(struct intel_engine_cs * engine,struct i915_request ** out,struct i915_request ** end)139 static void guc_submit(struct intel_engine_cs *engine,
140 struct i915_request **out,
141 struct i915_request **end)
142 {
143 struct intel_guc *guc = &engine->gt->uc.guc;
144
145 do {
146 struct i915_request *rq = *out++;
147
148 flush_ggtt_writes(rq->ring->vma);
149 guc_add_request(guc, rq);
150 } while (out != end);
151 }
152
rq_prio(const struct i915_request * rq)153 static inline int rq_prio(const struct i915_request *rq)
154 {
155 return rq->sched.attr.priority;
156 }
157
schedule_in(struct i915_request * rq,int idx)158 static struct i915_request *schedule_in(struct i915_request *rq, int idx)
159 {
160 trace_i915_request_in(rq, idx);
161
162 /*
163 * Currently we are not tracking the rq->context being inflight
164 * (ce->inflight = rq->engine). It is only used by the execlists
165 * backend at the moment, a similar counting strategy would be
166 * required if we generalise the inflight tracking.
167 */
168
169 __intel_gt_pm_get(rq->engine->gt);
170 return i915_request_get(rq);
171 }
172
schedule_out(struct i915_request * rq)173 static void schedule_out(struct i915_request *rq)
174 {
175 trace_i915_request_out(rq);
176
177 intel_gt_pm_put_async(rq->engine->gt);
178 i915_request_put(rq);
179 }
180
__guc_dequeue(struct intel_engine_cs * engine)181 static void __guc_dequeue(struct intel_engine_cs *engine)
182 {
183 struct intel_engine_execlists * const execlists = &engine->execlists;
184 struct i915_request **first = execlists->inflight;
185 struct i915_request ** const last_port = first + execlists->port_mask;
186 struct i915_request *last = first[0];
187 struct i915_request **port;
188 bool submit = false;
189 struct rb_node *rb;
190
191 lockdep_assert_held(&engine->active.lock);
192
193 if (last) {
194 if (*++first)
195 return;
196
197 last = NULL;
198 }
199
200 /*
201 * We write directly into the execlists->inflight queue and don't use
202 * the execlists->pending queue, as we don't have a distinct switch
203 * event.
204 */
205 port = first;
206 while ((rb = rb_first_cached(&execlists->queue))) {
207 struct i915_priolist *p = to_priolist(rb);
208 struct i915_request *rq, *rn;
209
210 priolist_for_each_request_consume(rq, rn, p) {
211 if (last && rq->context != last->context) {
212 if (port == last_port)
213 goto done;
214
215 *port = schedule_in(last,
216 port - execlists->inflight);
217 port++;
218 }
219
220 list_del_init(&rq->sched.link);
221 __i915_request_submit(rq);
222 submit = true;
223 last = rq;
224 }
225
226 rb_erase_cached(&p->node, &execlists->queue);
227 i915_priolist_free(p);
228 }
229 done:
230 execlists->queue_priority_hint =
231 rb ? to_priolist(rb)->priority : INT_MIN;
232 if (submit) {
233 *port = schedule_in(last, port - execlists->inflight);
234 *++port = NULL;
235 guc_submit(engine, first, port);
236 }
237 execlists->active = execlists->inflight;
238 }
239
guc_submission_tasklet(struct tasklet_struct * t)240 static void guc_submission_tasklet(struct tasklet_struct *t)
241 {
242 struct intel_engine_cs * const engine =
243 from_tasklet(engine, t, execlists.tasklet);
244 struct intel_engine_execlists * const execlists = &engine->execlists;
245 struct i915_request **port, *rq;
246 unsigned long flags;
247
248 spin_lock_irqsave(&engine->active.lock, flags);
249
250 for (port = execlists->inflight; (rq = *port); port++) {
251 if (!i915_request_completed(rq))
252 break;
253
254 schedule_out(rq);
255 }
256 if (port != execlists->inflight) {
257 int idx = port - execlists->inflight;
258 int rem = ARRAY_SIZE(execlists->inflight) - idx;
259 memmove(execlists->inflight, port, rem * sizeof(*port));
260 }
261
262 __guc_dequeue(engine);
263
264 spin_unlock_irqrestore(&engine->active.lock, flags);
265 }
266
guc_reset_prepare(struct intel_engine_cs * engine)267 static void guc_reset_prepare(struct intel_engine_cs *engine)
268 {
269 struct intel_engine_execlists * const execlists = &engine->execlists;
270
271 ENGINE_TRACE(engine, "\n");
272
273 /*
274 * Prevent request submission to the hardware until we have
275 * completed the reset in i915_gem_reset_finish(). If a request
276 * is completed by one engine, it may then queue a request
277 * to a second via its execlists->tasklet *just* as we are
278 * calling engine->init_hw() and also writing the ELSP.
279 * Turning off the execlists->tasklet until the reset is over
280 * prevents the race.
281 */
282 __tasklet_disable_sync_once(&execlists->tasklet);
283 }
284
guc_reset_state(struct intel_context * ce,struct intel_engine_cs * engine,u32 head,bool scrub)285 static void guc_reset_state(struct intel_context *ce,
286 struct intel_engine_cs *engine,
287 u32 head,
288 bool scrub)
289 {
290 GEM_BUG_ON(!intel_context_is_pinned(ce));
291
292 /*
293 * We want a simple context + ring to execute the breadcrumb update.
294 * We cannot rely on the context being intact across the GPU hang,
295 * so clear it and rebuild just what we need for the breadcrumb.
296 * All pending requests for this context will be zapped, and any
297 * future request will be after userspace has had the opportunity
298 * to recreate its own state.
299 */
300 if (scrub)
301 lrc_init_regs(ce, engine, true);
302
303 /* Rerun the request; its payload has been neutered (if guilty). */
304 lrc_update_regs(ce, engine, head);
305 }
306
guc_reset_rewind(struct intel_engine_cs * engine,bool stalled)307 static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
308 {
309 struct intel_engine_execlists * const execlists = &engine->execlists;
310 struct i915_request *rq;
311 unsigned long flags;
312
313 spin_lock_irqsave(&engine->active.lock, flags);
314
315 /* Push back any incomplete requests for replay after the reset. */
316 rq = execlists_unwind_incomplete_requests(execlists);
317 if (!rq)
318 goto out_unlock;
319
320 if (!i915_request_started(rq))
321 stalled = false;
322
323 __i915_request_reset(rq, stalled);
324 guc_reset_state(rq->context, engine, rq->head, stalled);
325
326 out_unlock:
327 spin_unlock_irqrestore(&engine->active.lock, flags);
328 }
329
guc_reset_cancel(struct intel_engine_cs * engine)330 static void guc_reset_cancel(struct intel_engine_cs *engine)
331 {
332 struct intel_engine_execlists * const execlists = &engine->execlists;
333 struct i915_request *rq, *rn;
334 struct rb_node *rb;
335 unsigned long flags;
336
337 ENGINE_TRACE(engine, "\n");
338
339 /*
340 * Before we call engine->cancel_requests(), we should have exclusive
341 * access to the submission state. This is arranged for us by the
342 * caller disabling the interrupt generation, the tasklet and other
343 * threads that may then access the same state, giving us a free hand
344 * to reset state. However, we still need to let lockdep be aware that
345 * we know this state may be accessed in hardirq context, so we
346 * disable the irq around this manipulation and we want to keep
347 * the spinlock focused on its duties and not accidentally conflate
348 * coverage to the submission's irq state. (Similarly, although we
349 * shouldn't need to disable irq around the manipulation of the
350 * submission's irq state, we also wish to remind ourselves that
351 * it is irq state.)
352 */
353 spin_lock_irqsave(&engine->active.lock, flags);
354
355 /* Mark all executing requests as skipped. */
356 list_for_each_entry(rq, &engine->active.requests, sched.link) {
357 i915_request_set_error_once(rq, -EIO);
358 i915_request_mark_complete(rq);
359 }
360
361 /* Flush the queued requests to the timeline list (for retiring). */
362 while ((rb = rb_first_cached(&execlists->queue))) {
363 struct i915_priolist *p = to_priolist(rb);
364
365 priolist_for_each_request_consume(rq, rn, p) {
366 list_del_init(&rq->sched.link);
367 __i915_request_submit(rq);
368 dma_fence_set_error(&rq->fence, -EIO);
369 i915_request_mark_complete(rq);
370 }
371
372 rb_erase_cached(&p->node, &execlists->queue);
373 i915_priolist_free(p);
374 }
375
376 /* Remaining _unready_ requests will be nop'ed when submitted */
377
378 execlists->queue_priority_hint = INT_MIN;
379 execlists->queue = RB_ROOT_CACHED;
380
381 spin_unlock_irqrestore(&engine->active.lock, flags);
382 }
383
guc_reset_finish(struct intel_engine_cs * engine)384 static void guc_reset_finish(struct intel_engine_cs *engine)
385 {
386 struct intel_engine_execlists * const execlists = &engine->execlists;
387
388 if (__tasklet_enable(&execlists->tasklet))
389 /* And kick in case we missed a new request submission. */
390 tasklet_hi_schedule(&execlists->tasklet);
391
392 ENGINE_TRACE(engine, "depth->%d\n",
393 atomic_read(&execlists->tasklet.count));
394 }
395
396 /*
397 * Set up the memory resources to be shared with the GuC (via the GGTT)
398 * at firmware loading time.
399 */
intel_guc_submission_init(struct intel_guc * guc)400 int intel_guc_submission_init(struct intel_guc *guc)
401 {
402 int ret;
403
404 if (guc->stage_desc_pool)
405 return 0;
406
407 ret = guc_stage_desc_pool_create(guc);
408 if (ret)
409 return ret;
410 /*
411 * Keep static analysers happy, let them know that we allocated the
412 * vma after testing that it didn't exist earlier.
413 */
414 GEM_BUG_ON(!guc->stage_desc_pool);
415
416 return 0;
417 }
418
intel_guc_submission_fini(struct intel_guc * guc)419 void intel_guc_submission_fini(struct intel_guc *guc)
420 {
421 if (guc->stage_desc_pool) {
422 guc_stage_desc_pool_destroy(guc);
423 }
424 }
425
guc_interrupts_capture(struct intel_gt * gt)426 static void guc_interrupts_capture(struct intel_gt *gt)
427 {
428 struct intel_uncore *uncore = gt->uncore;
429 u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
430 u32 dmask = irqs << 16 | irqs;
431
432 GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
433
434 /* Don't handle the ctx switch interrupt in GuC submission mode */
435 intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
436 intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
437 }
438
guc_interrupts_release(struct intel_gt * gt)439 static void guc_interrupts_release(struct intel_gt *gt)
440 {
441 struct intel_uncore *uncore = gt->uncore;
442 u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
443 u32 dmask = irqs << 16 | irqs;
444
445 GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
446
447 /* Handle ctx switch interrupts again */
448 intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
449 intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
450 }
451
guc_context_alloc(struct intel_context * ce)452 static int guc_context_alloc(struct intel_context *ce)
453 {
454 return lrc_alloc(ce, ce->engine);
455 }
456
guc_context_pre_pin(struct intel_context * ce,struct i915_gem_ww_ctx * ww,void ** vaddr)457 static int guc_context_pre_pin(struct intel_context *ce,
458 struct i915_gem_ww_ctx *ww,
459 void **vaddr)
460 {
461 return lrc_pre_pin(ce, ce->engine, ww, vaddr);
462 }
463
guc_context_pin(struct intel_context * ce,void * vaddr)464 static int guc_context_pin(struct intel_context *ce, void *vaddr)
465 {
466 return lrc_pin(ce, ce->engine, vaddr);
467 }
468
469 static const struct intel_context_ops guc_context_ops = {
470 .alloc = guc_context_alloc,
471
472 .pre_pin = guc_context_pre_pin,
473 .pin = guc_context_pin,
474 .unpin = lrc_unpin,
475 .post_unpin = lrc_post_unpin,
476
477 .enter = intel_context_enter_engine,
478 .exit = intel_context_exit_engine,
479
480 .reset = lrc_reset,
481 .destroy = lrc_destroy,
482 };
483
guc_request_alloc(struct i915_request * request)484 static int guc_request_alloc(struct i915_request *request)
485 {
486 int ret;
487
488 GEM_BUG_ON(!intel_context_is_pinned(request->context));
489
490 /*
491 * Flush enough space to reduce the likelihood of waiting after
492 * we start building the request - in which case we will just
493 * have to repeat work.
494 */
495 request->reserved_space += GUC_REQUEST_SIZE;
496
497 /*
498 * Note that after this point, we have committed to using
499 * this request as it is being used to both track the
500 * state of engine initialisation and liveness of the
501 * golden renderstate above. Think twice before you try
502 * to cancel/unwind this request now.
503 */
504
505 /* Unconditionally invalidate GPU caches and TLBs. */
506 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
507 if (ret)
508 return ret;
509
510 request->reserved_space -= GUC_REQUEST_SIZE;
511 return 0;
512 }
513
queue_request(struct intel_engine_cs * engine,struct i915_request * rq,int prio)514 static inline void queue_request(struct intel_engine_cs *engine,
515 struct i915_request *rq,
516 int prio)
517 {
518 GEM_BUG_ON(!list_empty(&rq->sched.link));
519 list_add_tail(&rq->sched.link,
520 i915_sched_lookup_priolist(engine, prio));
521 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
522 }
523
guc_submit_request(struct i915_request * rq)524 static void guc_submit_request(struct i915_request *rq)
525 {
526 struct intel_engine_cs *engine = rq->engine;
527 unsigned long flags;
528
529 /* Will be called from irq-context when using foreign fences. */
530 spin_lock_irqsave(&engine->active.lock, flags);
531
532 queue_request(engine, rq, rq_prio(rq));
533
534 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
535 GEM_BUG_ON(list_empty(&rq->sched.link));
536
537 tasklet_hi_schedule(&engine->execlists.tasklet);
538
539 spin_unlock_irqrestore(&engine->active.lock, flags);
540 }
541
sanitize_hwsp(struct intel_engine_cs * engine)542 static void sanitize_hwsp(struct intel_engine_cs *engine)
543 {
544 struct intel_timeline *tl;
545
546 list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
547 intel_timeline_reset_seqno(tl);
548 }
549
guc_sanitize(struct intel_engine_cs * engine)550 static void guc_sanitize(struct intel_engine_cs *engine)
551 {
552 /*
553 * Poison residual state on resume, in case the suspend didn't!
554 *
555 * We have to assume that across suspend/resume (or other loss
556 * of control) that the contents of our pinned buffers has been
557 * lost, replaced by garbage. Since this doesn't always happen,
558 * let's poison such state so that we more quickly spot when
559 * we falsely assume it has been preserved.
560 */
561 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
562 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
563
564 /*
565 * The kernel_context HWSP is stored in the status_page. As above,
566 * that may be lost on resume/initialisation, and so we need to
567 * reset the value in the HWSP.
568 */
569 sanitize_hwsp(engine);
570
571 /* And scrub the dirty cachelines for the HWSP */
572 clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
573 }
574
setup_hwsp(struct intel_engine_cs * engine)575 static void setup_hwsp(struct intel_engine_cs *engine)
576 {
577 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
578
579 ENGINE_WRITE_FW(engine,
580 RING_HWS_PGA,
581 i915_ggtt_offset(engine->status_page.vma));
582 }
583
start_engine(struct intel_engine_cs * engine)584 static void start_engine(struct intel_engine_cs *engine)
585 {
586 ENGINE_WRITE_FW(engine,
587 RING_MODE_GEN7,
588 _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
589
590 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
591 ENGINE_POSTING_READ(engine, RING_MI_MODE);
592 }
593
guc_resume(struct intel_engine_cs * engine)594 static int guc_resume(struct intel_engine_cs *engine)
595 {
596 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
597
598 intel_mocs_init_engine(engine);
599
600 intel_breadcrumbs_reset(engine->breadcrumbs);
601
602 setup_hwsp(engine);
603 start_engine(engine);
604
605 return 0;
606 }
607
guc_set_default_submission(struct intel_engine_cs * engine)608 static void guc_set_default_submission(struct intel_engine_cs *engine)
609 {
610 engine->submit_request = guc_submit_request;
611 engine->schedule = i915_schedule;
612 engine->execlists.tasklet.callback = guc_submission_tasklet;
613
614 engine->reset.prepare = guc_reset_prepare;
615 engine->reset.rewind = guc_reset_rewind;
616 engine->reset.cancel = guc_reset_cancel;
617 engine->reset.finish = guc_reset_finish;
618
619 engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
620 engine->flags |= I915_ENGINE_HAS_PREEMPTION;
621
622 /*
623 * TODO: GuC supports timeslicing and semaphores as well, but they're
624 * handled by the firmware so some minor tweaks are required before
625 * enabling.
626 *
627 * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
628 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
629 */
630
631 engine->emit_bb_start = gen8_emit_bb_start;
632
633 /*
634 * For the breadcrumb irq to work we need the interrupts to stay
635 * enabled. However, on all platforms on which we'll have support for
636 * GuC submission we don't allow disabling the interrupts at runtime, so
637 * we're always safe with the current flow.
638 */
639 GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
640 }
641
guc_release(struct intel_engine_cs * engine)642 static void guc_release(struct intel_engine_cs *engine)
643 {
644 engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
645
646 tasklet_kill(&engine->execlists.tasklet);
647
648 intel_engine_cleanup_common(engine);
649 lrc_fini_wa_ctx(engine);
650 }
651
guc_default_vfuncs(struct intel_engine_cs * engine)652 static void guc_default_vfuncs(struct intel_engine_cs *engine)
653 {
654 /* Default vfuncs which can be overridden by each engine. */
655
656 engine->resume = guc_resume;
657
658 engine->cops = &guc_context_ops;
659 engine->request_alloc = guc_request_alloc;
660
661 engine->emit_flush = gen8_emit_flush_xcs;
662 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
663 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
664 if (INTEL_GEN(engine->i915) >= 12) {
665 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
666 engine->emit_flush = gen12_emit_flush_xcs;
667 }
668 engine->set_default_submission = guc_set_default_submission;
669 }
670
rcs_submission_override(struct intel_engine_cs * engine)671 static void rcs_submission_override(struct intel_engine_cs *engine)
672 {
673 switch (INTEL_GEN(engine->i915)) {
674 case 12:
675 engine->emit_flush = gen12_emit_flush_rcs;
676 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
677 break;
678 case 11:
679 engine->emit_flush = gen11_emit_flush_rcs;
680 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
681 break;
682 default:
683 engine->emit_flush = gen8_emit_flush_rcs;
684 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
685 break;
686 }
687 }
688
guc_default_irqs(struct intel_engine_cs * engine)689 static inline void guc_default_irqs(struct intel_engine_cs *engine)
690 {
691 engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
692 }
693
intel_guc_submission_setup(struct intel_engine_cs * engine)694 int intel_guc_submission_setup(struct intel_engine_cs *engine)
695 {
696 struct drm_i915_private *i915 = engine->i915;
697
698 /*
699 * The setup relies on several assumptions (e.g. irqs always enabled)
700 * that are only valid on gen11+
701 */
702 GEM_BUG_ON(INTEL_GEN(i915) < 11);
703
704 tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
705
706 guc_default_vfuncs(engine);
707 guc_default_irqs(engine);
708
709 if (engine->class == RENDER_CLASS)
710 rcs_submission_override(engine);
711
712 lrc_init_wa_ctx(engine);
713
714 /* Finally, take ownership and responsibility for cleanup! */
715 engine->sanitize = guc_sanitize;
716 engine->release = guc_release;
717
718 return 0;
719 }
720
intel_guc_submission_enable(struct intel_guc * guc)721 void intel_guc_submission_enable(struct intel_guc *guc)
722 {
723 guc_stage_desc_init(guc);
724
725 /* Take over from manual control of ELSP (execlists) */
726 guc_interrupts_capture(guc_to_gt(guc));
727 }
728
intel_guc_submission_disable(struct intel_guc * guc)729 void intel_guc_submission_disable(struct intel_guc *guc)
730 {
731 struct intel_gt *gt = guc_to_gt(guc);
732
733 GEM_BUG_ON(gt->awake); /* GT should be parked first */
734
735 /* Note: By the time we're here, GuC may have already been reset */
736
737 guc_interrupts_release(gt);
738
739 guc_stage_desc_fini(guc);
740 }
741
__guc_submission_selected(struct intel_guc * guc)742 static bool __guc_submission_selected(struct intel_guc *guc)
743 {
744 struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
745
746 if (!intel_guc_submission_is_supported(guc))
747 return false;
748
749 return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
750 }
751
intel_guc_submission_init_early(struct intel_guc * guc)752 void intel_guc_submission_init_early(struct intel_guc *guc)
753 {
754 guc->submission_selected = __guc_submission_selected(guc);
755 }
756
intel_engine_in_guc_submission_mode(const struct intel_engine_cs * engine)757 bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
758 {
759 return engine->set_default_submission == guc_set_default_submission;
760 }
761