1 /* $NetBSD: intel_engine_cs.c,v 1.9 2021/12/19 12:40:43 riastradh Exp $ */
2
3 /*
4 * Copyright © 2016 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: intel_engine_cs.c,v 1.9 2021/12/19 12:40:43 riastradh Exp $");
29
30 #include <drm/drm_print.h>
31
32 #include "gem/i915_gem_context.h"
33
34 #include "i915_drv.h"
35
36 #include "intel_context.h"
37 #include "intel_engine.h"
38 #include "intel_engine_pm.h"
39 #include "intel_engine_pool.h"
40 #include "intel_engine_user.h"
41 #include "intel_gt.h"
42 #include "intel_gt_requests.h"
43 #include "intel_lrc.h"
44 #include "intel_reset.h"
45 #include "intel_ring.h"
46
47 /* Haswell does have the CXT_SIZE register however it does not appear to be
48 * valid. Now, docs explain in dwords what is in the context object. The full
49 * size is 70720 bytes, however, the power context and execlist context will
50 * never be saved (power context is stored elsewhere, and execlists don't work
51 * on HSW) - so the final size, including the extra state required for the
52 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
53 */
54 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
55
56 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
57 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
58 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
59 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
60 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
61
62 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
63
64 #define MAX_MMIO_BASES 3
65 struct engine_info {
66 unsigned int hw_id;
67 u8 class;
68 u8 instance;
69 /* mmio bases table *must* be sorted in reverse gen order */
70 struct engine_mmio_base {
71 u32 gen : 8;
72 u32 base : 24;
73 } mmio_bases[MAX_MMIO_BASES];
74 };
75
76 static const struct engine_info intel_engines[] = {
77 [RCS0] = {
78 .hw_id = RCS0_HW,
79 .class = RENDER_CLASS,
80 .instance = 0,
81 .mmio_bases = {
82 { .gen = 1, .base = RENDER_RING_BASE }
83 },
84 },
85 [BCS0] = {
86 .hw_id = BCS0_HW,
87 .class = COPY_ENGINE_CLASS,
88 .instance = 0,
89 .mmio_bases = {
90 { .gen = 6, .base = BLT_RING_BASE }
91 },
92 },
93 [VCS0] = {
94 .hw_id = VCS0_HW,
95 .class = VIDEO_DECODE_CLASS,
96 .instance = 0,
97 .mmio_bases = {
98 { .gen = 11, .base = GEN11_BSD_RING_BASE },
99 { .gen = 6, .base = GEN6_BSD_RING_BASE },
100 { .gen = 4, .base = BSD_RING_BASE }
101 },
102 },
103 [VCS1] = {
104 .hw_id = VCS1_HW,
105 .class = VIDEO_DECODE_CLASS,
106 .instance = 1,
107 .mmio_bases = {
108 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
109 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
110 },
111 },
112 [VCS2] = {
113 .hw_id = VCS2_HW,
114 .class = VIDEO_DECODE_CLASS,
115 .instance = 2,
116 .mmio_bases = {
117 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
118 },
119 },
120 [VCS3] = {
121 .hw_id = VCS3_HW,
122 .class = VIDEO_DECODE_CLASS,
123 .instance = 3,
124 .mmio_bases = {
125 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
126 },
127 },
128 [VECS0] = {
129 .hw_id = VECS0_HW,
130 .class = VIDEO_ENHANCEMENT_CLASS,
131 .instance = 0,
132 .mmio_bases = {
133 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
134 { .gen = 7, .base = VEBOX_RING_BASE }
135 },
136 },
137 [VECS1] = {
138 .hw_id = VECS1_HW,
139 .class = VIDEO_ENHANCEMENT_CLASS,
140 .instance = 1,
141 .mmio_bases = {
142 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
143 },
144 },
145 };
146
147 /**
148 * intel_engine_context_size() - return the size of the context for an engine
149 * @gt: the gt
150 * @class: engine class
151 *
152 * Each engine class may require a different amount of space for a context
153 * image.
154 *
155 * Return: size (in bytes) of an engine class specific context image
156 *
157 * Note: this size includes the HWSP, which is part of the context image
158 * in LRC mode, but does not include the "shared data page" used with
159 * GuC submission. The caller should account for this if using the GuC.
160 */
intel_engine_context_size(struct intel_gt * gt,u8 class)161 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
162 {
163 struct intel_uncore *uncore = gt->uncore;
164 u32 cxt_size;
165
166 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
167
168 switch (class) {
169 case RENDER_CLASS:
170 switch (INTEL_GEN(gt->i915)) {
171 default:
172 MISSING_CASE(INTEL_GEN(gt->i915));
173 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
174 case 12:
175 case 11:
176 return GEN11_LR_CONTEXT_RENDER_SIZE;
177 case 10:
178 return GEN10_LR_CONTEXT_RENDER_SIZE;
179 case 9:
180 return GEN9_LR_CONTEXT_RENDER_SIZE;
181 case 8:
182 return GEN8_LR_CONTEXT_RENDER_SIZE;
183 case 7:
184 if (IS_HASWELL(gt->i915))
185 return HSW_CXT_TOTAL_SIZE;
186
187 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
188 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
189 PAGE_SIZE);
190 case 6:
191 cxt_size = intel_uncore_read(uncore, CXT_SIZE);
192 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
193 PAGE_SIZE);
194 case 5:
195 case 4:
196 /*
197 * There is a discrepancy here between the size reported
198 * by the register and the size of the context layout
199 * in the docs. Both are described as authorative!
200 *
201 * The discrepancy is on the order of a few cachelines,
202 * but the total is under one page (4k), which is our
203 * minimum allocation anyway so it should all come
204 * out in the wash.
205 */
206 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
207 DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
208 INTEL_GEN(gt->i915),
209 cxt_size * 64,
210 cxt_size - 1);
211 return round_up(cxt_size * 64, PAGE_SIZE);
212 case 3:
213 case 2:
214 /* For the special day when i810 gets merged. */
215 case 1:
216 return 0;
217 }
218 break;
219 default:
220 MISSING_CASE(class);
221 /* fall through */
222 case VIDEO_DECODE_CLASS:
223 case VIDEO_ENHANCEMENT_CLASS:
224 case COPY_ENGINE_CLASS:
225 if (INTEL_GEN(gt->i915) < 8)
226 return 0;
227 return GEN8_LR_CONTEXT_OTHER_SIZE;
228 }
229 }
230
__engine_mmio_base(struct drm_i915_private * i915,const struct engine_mmio_base * bases)231 static u32 __engine_mmio_base(struct drm_i915_private *i915,
232 const struct engine_mmio_base *bases)
233 {
234 int i;
235
236 for (i = 0; i < MAX_MMIO_BASES; i++)
237 if (INTEL_GEN(i915) >= bases[i].gen)
238 break;
239
240 GEM_BUG_ON(i == MAX_MMIO_BASES);
241 GEM_BUG_ON(!bases[i].base);
242
243 return bases[i].base;
244 }
245
__sprint_engine_name(struct intel_engine_cs * engine)246 static void __sprint_engine_name(struct intel_engine_cs *engine)
247 {
248 /*
249 * Before we know what the uABI name for this engine will be,
250 * we still would like to keep track of this engine in the debug logs.
251 * We throw in a ' here as a reminder that this isn't its final name.
252 */
253 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
254 intel_engine_class_repr(engine->class),
255 engine->instance) >= sizeof(engine->name));
256 }
257
intel_engine_set_hwsp_writemask(struct intel_engine_cs * engine,u32 mask)258 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
259 {
260 /*
261 * Though they added more rings on g4x/ilk, they did not add
262 * per-engine HWSTAM until gen6.
263 */
264 if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
265 return;
266
267 if (INTEL_GEN(engine->i915) >= 3)
268 ENGINE_WRITE(engine, RING_HWSTAM, mask);
269 else
270 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
271 }
272
intel_engine_sanitize_mmio(struct intel_engine_cs * engine)273 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
274 {
275 /* Mask off all writes into the unknown HWSP */
276 intel_engine_set_hwsp_writemask(engine, ~0u);
277 }
278
intel_engine_setup(struct intel_gt * gt,enum intel_engine_id id)279 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
280 {
281 const struct engine_info *info = &intel_engines[id];
282 struct intel_engine_cs *engine;
283
284 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
285 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
286
287 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
288 return -EINVAL;
289
290 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
291 return -EINVAL;
292
293 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
294 return -EINVAL;
295
296 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
297 return -EINVAL;
298
299 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
300 if (!engine)
301 return -ENOMEM;
302
303 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
304
305 engine->id = id;
306 engine->legacy_idx = INVALID_ENGINE;
307 engine->mask = BIT(id);
308 engine->i915 = gt->i915;
309 engine->gt = gt;
310 engine->uncore = gt->uncore;
311 engine->hw_id = engine->guc_id = info->hw_id;
312 engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
313
314 engine->class = info->class;
315 engine->instance = info->instance;
316 __sprint_engine_name(engine);
317
318 engine->props.heartbeat_interval_ms =
319 CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
320 engine->props.preempt_timeout_ms =
321 CONFIG_DRM_I915_PREEMPT_TIMEOUT;
322 engine->props.stop_timeout_ms =
323 CONFIG_DRM_I915_STOP_TIMEOUT;
324 engine->props.timeslice_duration_ms =
325 CONFIG_DRM_I915_TIMESLICE_DURATION;
326
327 engine->context_size = intel_engine_context_size(gt, engine->class);
328 if (WARN_ON(engine->context_size > BIT(20)))
329 engine->context_size = 0;
330 if (engine->context_size)
331 DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
332
333 /* Nothing to do here, execute in order of dependencies */
334 engine->schedule = NULL;
335
336 ewma__engine_latency_init(&engine->latency);
337 seqlock_init(&engine->stats.lock);
338
339 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
340
341 /* Scrub mmio state on takeover */
342 intel_engine_sanitize_mmio(engine);
343
344 gt->engine_class[info->class][info->instance] = engine;
345 gt->engine[id] = engine;
346
347 gt->i915->engine[id] = engine;
348
349 return 0;
350 }
351
__setup_engine_capabilities(struct intel_engine_cs * engine)352 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
353 {
354 struct drm_i915_private *i915 = engine->i915;
355
356 if (engine->class == VIDEO_DECODE_CLASS) {
357 /*
358 * HEVC support is present on first engine instance
359 * before Gen11 and on all instances afterwards.
360 */
361 if (INTEL_GEN(i915) >= 11 ||
362 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
363 engine->uabi_capabilities |=
364 I915_VIDEO_CLASS_CAPABILITY_HEVC;
365
366 /*
367 * SFC block is present only on even logical engine
368 * instances.
369 */
370 if ((INTEL_GEN(i915) >= 11 &&
371 RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
372 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
373 engine->uabi_capabilities |=
374 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
375 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
376 if (INTEL_GEN(i915) >= 9)
377 engine->uabi_capabilities |=
378 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
379 }
380 }
381
intel_setup_engine_capabilities(struct intel_gt * gt)382 static void intel_setup_engine_capabilities(struct intel_gt *gt)
383 {
384 struct intel_engine_cs *engine;
385 enum intel_engine_id id;
386
387 for_each_engine(engine, gt, id)
388 __setup_engine_capabilities(engine);
389 }
390
391 /**
392 * intel_engines_release() - free the resources allocated for Command Streamers
393 * @gt: pointer to struct intel_gt
394 */
intel_engines_release(struct intel_gt * gt)395 void intel_engines_release(struct intel_gt *gt)
396 {
397 struct intel_engine_cs *engine;
398 enum intel_engine_id id;
399
400 /* Decouple the backend; but keep the layout for late GPU resets */
401 for_each_engine(engine, gt, id) {
402 if (!engine->release)
403 continue;
404
405 engine->release(engine);
406 engine->release = NULL;
407
408 memset(&engine->reset, 0, sizeof(engine->reset));
409
410 gt->i915->engine[id] = NULL;
411 }
412 }
413
intel_engines_free(struct intel_gt * gt)414 void intel_engines_free(struct intel_gt *gt)
415 {
416 struct intel_engine_cs *engine;
417 enum intel_engine_id id;
418
419 for_each_engine(engine, gt, id) {
420 seqlock_destroy(&engine->stats.lock);
421 kfree(engine);
422 gt->engine[id] = NULL;
423 }
424 }
425
426 /**
427 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
428 * @gt: pointer to struct intel_gt
429 *
430 * Return: non-zero if the initialization failed.
431 */
intel_engines_init_mmio(struct intel_gt * gt)432 int intel_engines_init_mmio(struct intel_gt *gt)
433 {
434 struct drm_i915_private *i915 = gt->i915;
435 struct intel_device_info *device_info = mkwrite_device_info(i915);
436 const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
437 unsigned int mask = 0;
438 unsigned int i;
439 int err;
440
441 WARN_ON(engine_mask == 0);
442 WARN_ON(engine_mask &
443 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
444
445 if (i915_inject_probe_failure(i915))
446 return -ENODEV;
447
448 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
449 if (!HAS_ENGINE(i915, i))
450 continue;
451
452 err = intel_engine_setup(gt, i);
453 if (err)
454 goto cleanup;
455
456 mask |= BIT(i);
457 }
458
459 /*
460 * Catch failures to update intel_engines table when the new engines
461 * are added to the driver by a warning and disabling the forgotten
462 * engines.
463 */
464 if (WARN_ON(mask != engine_mask))
465 device_info->engine_mask = mask;
466
467 RUNTIME_INFO(i915)->num_engines = hweight32(mask);
468
469 intel_gt_check_and_clear_faults(gt);
470
471 intel_setup_engine_capabilities(gt);
472
473 return 0;
474
475 cleanup:
476 intel_engines_free(gt);
477 return err;
478 }
479
intel_engine_init_execlists(struct intel_engine_cs * engine)480 void intel_engine_init_execlists(struct intel_engine_cs *engine)
481 {
482 struct intel_engine_execlists * const execlists = &engine->execlists;
483
484 execlists->port_mask = 1;
485 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
486 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
487
488 memset(execlists->pending, 0, sizeof(execlists->pending));
489 execlists->active =
490 memset(execlists->inflight, 0, sizeof(execlists->inflight));
491
492 execlists->queue_priority_hint = INT_MIN;
493 i915_sched_init(execlists);
494 }
495
cleanup_status_page(struct intel_engine_cs * engine)496 static void cleanup_status_page(struct intel_engine_cs *engine)
497 {
498 struct i915_vma *vma;
499
500 /* Prevent writes into HWSP after returning the page to the system */
501 intel_engine_set_hwsp_writemask(engine, ~0u);
502
503 vma = fetch_and_zero(&engine->status_page.vma);
504 if (!vma)
505 return;
506
507 if (!HWS_NEEDS_PHYSICAL(engine->i915))
508 i915_vma_unpin(vma);
509
510 i915_gem_object_unpin_map(vma->obj);
511 i915_gem_object_put(vma->obj);
512 }
513
pin_ggtt_status_page(struct intel_engine_cs * engine,struct i915_vma * vma)514 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
515 struct i915_vma *vma)
516 {
517 unsigned int flags;
518
519 flags = PIN_GLOBAL;
520 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
521 /*
522 * On g33, we cannot place HWS above 256MiB, so
523 * restrict its pinning to the low mappable arena.
524 * Though this restriction is not documented for
525 * gen4, gen5, or byt, they also behave similarly
526 * and hang if the HWS is placed at the top of the
527 * GTT. To generalise, it appears that all !llc
528 * platforms have issues with us placing the HWS
529 * above the mappable region (even though we never
530 * actually map it).
531 */
532 flags |= PIN_MAPPABLE;
533 else
534 flags |= PIN_HIGH;
535
536 return i915_vma_pin(vma, 0, 0, flags);
537 }
538
init_status_page(struct intel_engine_cs * engine)539 static int init_status_page(struct intel_engine_cs *engine)
540 {
541 struct drm_i915_gem_object *obj;
542 struct i915_vma *vma;
543 void *vaddr;
544 int ret;
545
546 /*
547 * Though the HWS register does support 36bit addresses, historically
548 * we have had hangs and corruption reported due to wild writes if
549 * the HWS is placed above 4G. We only allow objects to be allocated
550 * in GFP_DMA32 for i965, and no earlier physical address users had
551 * access to more than 4G.
552 */
553 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
554 if (IS_ERR(obj)) {
555 DRM_ERROR("Failed to allocate status page\n");
556 return PTR_ERR(obj);
557 }
558
559 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
560
561 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
562 if (IS_ERR(vma)) {
563 ret = PTR_ERR(vma);
564 goto err;
565 }
566
567 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
568 if (IS_ERR(vaddr)) {
569 ret = PTR_ERR(vaddr);
570 goto err;
571 }
572
573 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
574 engine->status_page.vma = vma;
575
576 if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
577 ret = pin_ggtt_status_page(engine, vma);
578 if (ret)
579 goto err_unpin;
580 }
581
582 return 0;
583
584 err_unpin:
585 i915_gem_object_unpin_map(obj);
586 err:
587 i915_gem_object_put(obj);
588 return ret;
589 }
590
engine_setup_common(struct intel_engine_cs * engine)591 static int engine_setup_common(struct intel_engine_cs *engine)
592 {
593 int err;
594
595 init_llist_head(&engine->barrier_tasks);
596
597 err = init_status_page(engine);
598 if (err)
599 return err;
600
601 intel_engine_init_active(engine, ENGINE_PHYSICAL);
602 intel_engine_init_breadcrumbs(engine);
603 intel_engine_init_execlists(engine);
604 intel_engine_init_cmd_parser(engine);
605 intel_engine_init__pm(engine);
606 intel_engine_init_retire(engine);
607
608 intel_engine_pool_init(&engine->pool);
609
610 /* Use the whole device by default */
611 engine->sseu =
612 intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
613
614 intel_engine_init_workarounds(engine);
615 intel_engine_init_whitelist(engine);
616 intel_engine_init_ctx_wa(engine);
617
618 return 0;
619 }
620
621 struct measure_breadcrumb {
622 struct i915_request rq;
623 struct intel_timeline timeline;
624 struct intel_ring ring;
625 u32 cs[1024];
626 };
627
measure_breadcrumb_dw(struct intel_engine_cs * engine)628 static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
629 {
630 struct measure_breadcrumb *frame;
631 int dw = -ENOMEM;
632
633 GEM_BUG_ON(!engine->gt->scratch);
634
635 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
636 if (!frame)
637 return -ENOMEM;
638
639 if (intel_timeline_init(&frame->timeline,
640 engine->gt,
641 engine->status_page.vma))
642 goto out_frame;
643
644 mutex_lock(&frame->timeline.mutex);
645
646 frame->ring.vaddr = frame->cs;
647 frame->ring.size = sizeof(frame->cs);
648 frame->ring.effective_size = frame->ring.size;
649 intel_ring_update_space(&frame->ring);
650
651 frame->rq.i915 = engine->i915;
652 frame->rq.engine = engine;
653 frame->rq.ring = &frame->ring;
654 rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
655
656 dw = intel_timeline_pin(&frame->timeline);
657 if (dw < 0)
658 goto out_timeline;
659
660 spin_lock_irq(&engine->active.lock);
661 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
662 spin_unlock_irq(&engine->active.lock);
663
664 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
665
666 intel_timeline_unpin(&frame->timeline);
667
668 out_timeline:
669 mutex_unlock(&frame->timeline.mutex);
670 intel_timeline_fini(&frame->timeline);
671 out_frame:
672 kfree(frame);
673 return dw;
674 }
675
676 void
intel_engine_init_active(struct intel_engine_cs * engine,unsigned int subclass)677 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
678 {
679 INIT_LIST_HEAD(&engine->active.requests);
680 INIT_LIST_HEAD(&engine->active.hold);
681
682 spin_lock_init(&engine->active.lock);
683 lockdep_set_subclass(&engine->active.lock, subclass);
684
685 /*
686 * Due to an interesting quirk in lockdep's internal debug tracking,
687 * after setting a subclass we must ensure the lock is used. Otherwise,
688 * nr_unused_locks is incremented once too often.
689 */
690 #ifdef CONFIG_DEBUG_LOCK_ALLOC
691 local_irq_disable();
692 lock_map_acquire(&engine->active.lock.dep_map);
693 lock_map_release(&engine->active.lock.dep_map);
694 local_irq_enable();
695 #endif
696 }
697
698 static struct intel_context *
create_kernel_context(struct intel_engine_cs * engine)699 create_kernel_context(struct intel_engine_cs *engine)
700 {
701 static struct lock_class_key kernel;
702 struct intel_context *ce;
703 int err;
704
705 ce = intel_context_create(engine);
706 if (IS_ERR(ce))
707 return ce;
708
709 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
710
711 err = intel_context_pin(ce); /* perma-pin so it is always available */
712 if (err) {
713 intel_context_put(ce);
714 return ERR_PTR(err);
715 }
716
717 /*
718 * Give our perma-pinned kernel timelines a separate lockdep class,
719 * so that we can use them from within the normal user timelines
720 * should we need to inject GPU operations during their request
721 * construction.
722 */
723 lockdep_set_class(&ce->timeline->mutex, &kernel);
724
725 return ce;
726 }
727
728 /**
729 * intel_engines_init_common - initialize cengine state which might require hw access
730 * @engine: Engine to initialize.
731 *
732 * Initializes @engine@ structure members shared between legacy and execlists
733 * submission modes which do require hardware access.
734 *
735 * Typcally done at later stages of submission mode specific engine setup.
736 *
737 * Returns zero on success or an error code on failure.
738 */
engine_init_common(struct intel_engine_cs * engine)739 static int engine_init_common(struct intel_engine_cs *engine)
740 {
741 struct intel_context *ce;
742 int ret;
743
744 engine->set_default_submission(engine);
745
746 ret = measure_breadcrumb_dw(engine);
747 if (ret < 0)
748 return ret;
749
750 engine->emit_fini_breadcrumb_dw = ret;
751
752 /*
753 * We may need to do things with the shrinker which
754 * require us to immediately switch back to the default
755 * context. This can cause a problem as pinning the
756 * default context also requires GTT space which may not
757 * be available. To avoid this we always pin the default
758 * context.
759 */
760 ce = create_kernel_context(engine);
761 if (IS_ERR(ce))
762 return PTR_ERR(ce);
763
764 engine->kernel_context = ce;
765
766 return 0;
767 }
768
intel_engines_init(struct intel_gt * gt)769 int intel_engines_init(struct intel_gt *gt)
770 {
771 int (*setup)(struct intel_engine_cs *engine);
772 struct intel_engine_cs *engine;
773 enum intel_engine_id id;
774 int err;
775
776 if (HAS_EXECLISTS(gt->i915))
777 setup = intel_execlists_submission_setup;
778 else
779 setup = intel_ring_submission_setup;
780
781 for_each_engine(engine, gt, id) {
782 err = engine_setup_common(engine);
783 if (err)
784 return err;
785
786 err = setup(engine);
787 if (err)
788 return err;
789
790 err = engine_init_common(engine);
791 if (err)
792 return err;
793
794 intel_engine_add_user(engine);
795 }
796
797 return 0;
798 }
799
800 /**
801 * intel_engines_cleanup_common - cleans up the engine state created by
802 * the common initiailizers.
803 * @engine: Engine to cleanup.
804 *
805 * This cleans up everything created by the common helpers.
806 */
intel_engine_cleanup_common(struct intel_engine_cs * engine)807 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
808 {
809 GEM_BUG_ON(!list_empty(&engine->active.requests));
810 tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
811
812 cleanup_status_page(engine);
813
814 intel_engine_fini_retire(engine);
815 intel_engine_fini__pm(engine);
816 intel_engine_pool_fini(&engine->pool);
817 intel_engine_fini_breadcrumbs(engine);
818 intel_engine_cleanup_cmd_parser(engine);
819
820 if (engine->default_state)
821 i915_gem_object_put(engine->default_state);
822
823 if (engine->kernel_context) {
824 intel_context_unpin(engine->kernel_context);
825 intel_context_put(engine->kernel_context);
826 }
827 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
828
829 intel_wa_list_free(&engine->ctx_wa_list);
830 intel_wa_list_free(&engine->wa_list);
831 intel_wa_list_free(&engine->whitelist);
832
833 spin_lock_destroy(&engine->active.lock);
834 }
835
intel_engine_get_active_head(const struct intel_engine_cs * engine)836 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
837 {
838 struct drm_i915_private *i915 = engine->i915;
839
840 u64 acthd;
841
842 if (INTEL_GEN(i915) >= 8)
843 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
844 else if (INTEL_GEN(i915) >= 4)
845 acthd = ENGINE_READ(engine, RING_ACTHD);
846 else
847 acthd = ENGINE_READ(engine, ACTHD);
848
849 return acthd;
850 }
851
intel_engine_get_last_batch_head(const struct intel_engine_cs * engine)852 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
853 {
854 u64 bbaddr;
855
856 if (INTEL_GEN(engine->i915) >= 8)
857 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
858 else
859 bbaddr = ENGINE_READ(engine, RING_BBADDR);
860
861 return bbaddr;
862 }
863
stop_timeout(const struct intel_engine_cs * engine)864 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
865 {
866 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
867 return 0;
868
869 /*
870 * If we are doing a normal GPU reset, we can take our time and allow
871 * the engine to quiesce. We've stopped submission to the engine, and
872 * if we wait long enough an innocent context should complete and
873 * leave the engine idle. So they should not be caught unaware by
874 * the forthcoming GPU reset (which usually follows the stop_cs)!
875 */
876 return READ_ONCE(engine->props.stop_timeout_ms);
877 }
878
intel_engine_stop_cs(struct intel_engine_cs * engine)879 int intel_engine_stop_cs(struct intel_engine_cs *engine)
880 {
881 struct intel_uncore *uncore = engine->uncore;
882 const u32 base = engine->mmio_base;
883 const i915_reg_t mode = RING_MI_MODE(base);
884 int err;
885
886 if (INTEL_GEN(engine->i915) < 3)
887 return -ENODEV;
888
889 ENGINE_TRACE(engine, "\n");
890
891 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
892
893 err = 0;
894 if (__intel_wait_for_register_fw(uncore,
895 mode, MODE_IDLE, MODE_IDLE,
896 1000, stop_timeout(engine),
897 NULL)) {
898 ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
899 err = -ETIMEDOUT;
900 }
901
902 /* A final mmio read to let GPU writes be hopefully flushed to memory */
903 intel_uncore_posting_read_fw(uncore, mode);
904
905 return err;
906 }
907
intel_engine_cancel_stop_cs(struct intel_engine_cs * engine)908 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
909 {
910 ENGINE_TRACE(engine, "\n");
911
912 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
913 }
914
i915_cache_level_str(struct drm_i915_private * i915,int type)915 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
916 {
917 switch (type) {
918 case I915_CACHE_NONE: return " uncached";
919 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
920 case I915_CACHE_L3_LLC: return " L3+LLC";
921 case I915_CACHE_WT: return " WT";
922 default: return "";
923 }
924 }
925
926 static u32
read_subslice_reg(const struct intel_engine_cs * engine,int slice,int subslice,i915_reg_t reg)927 read_subslice_reg(const struct intel_engine_cs *engine,
928 int slice, int subslice, i915_reg_t reg)
929 {
930 struct drm_i915_private *i915 = engine->i915;
931 struct intel_uncore *uncore = engine->uncore;
932 u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
933 enum forcewake_domains fw_domains;
934
935 if (INTEL_GEN(i915) >= 11) {
936 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
937 mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
938 } else {
939 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
940 mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
941 }
942
943 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
944 FW_REG_READ);
945 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
946 GEN8_MCR_SELECTOR,
947 FW_REG_READ | FW_REG_WRITE);
948
949 spin_lock_irq(&uncore->lock);
950 intel_uncore_forcewake_get__locked(uncore, fw_domains);
951
952 old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
953
954 mcr &= ~mcr_mask;
955 mcr |= mcr_ss;
956 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
957
958 val = intel_uncore_read_fw(uncore, reg);
959
960 mcr &= ~mcr_mask;
961 mcr |= old_mcr & mcr_mask;
962
963 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
964
965 intel_uncore_forcewake_put__locked(uncore, fw_domains);
966 spin_unlock_irq(&uncore->lock);
967
968 return val;
969 }
970
971 /* NB: please notice the memset */
intel_engine_get_instdone(const struct intel_engine_cs * engine,struct intel_instdone * instdone)972 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
973 struct intel_instdone *instdone)
974 {
975 struct drm_i915_private *i915 = engine->i915;
976 const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
977 struct intel_uncore *uncore = engine->uncore;
978 u32 mmio_base = engine->mmio_base;
979 int slice;
980 int subslice;
981
982 memset(instdone, 0, sizeof(*instdone));
983
984 switch (INTEL_GEN(i915)) {
985 default:
986 instdone->instdone =
987 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
988
989 if (engine->id != RCS0)
990 break;
991
992 instdone->slice_common =
993 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
994 for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
995 instdone->sampler[slice][subslice] =
996 read_subslice_reg(engine, slice, subslice,
997 GEN7_SAMPLER_INSTDONE);
998 instdone->row[slice][subslice] =
999 read_subslice_reg(engine, slice, subslice,
1000 GEN7_ROW_INSTDONE);
1001 }
1002 break;
1003 case 7:
1004 instdone->instdone =
1005 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1006
1007 if (engine->id != RCS0)
1008 break;
1009
1010 instdone->slice_common =
1011 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1012 instdone->sampler[0][0] =
1013 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1014 instdone->row[0][0] =
1015 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1016
1017 break;
1018 case 6:
1019 case 5:
1020 case 4:
1021 instdone->instdone =
1022 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1023 if (engine->id == RCS0)
1024 /* HACK: Using the wrong struct member */
1025 instdone->slice_common =
1026 intel_uncore_read(uncore, GEN4_INSTDONE1);
1027 break;
1028 case 3:
1029 case 2:
1030 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1031 break;
1032 }
1033 }
1034
ring_is_idle(struct intel_engine_cs * engine)1035 static bool ring_is_idle(struct intel_engine_cs *engine)
1036 {
1037 bool idle = true;
1038
1039 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1040 return true;
1041
1042 if (!intel_engine_pm_get_if_awake(engine))
1043 return true;
1044
1045 /* First check that no commands are left in the ring */
1046 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1047 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1048 idle = false;
1049
1050 /* No bit for gen2, so assume the CS parser is idle */
1051 if (INTEL_GEN(engine->i915) > 2 &&
1052 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1053 idle = false;
1054
1055 intel_engine_pm_put(engine);
1056
1057 return idle;
1058 }
1059
intel_engine_flush_submission(struct intel_engine_cs * engine)1060 void intel_engine_flush_submission(struct intel_engine_cs *engine)
1061 {
1062 struct tasklet_struct *t = &engine->execlists.tasklet;
1063
1064 if (__tasklet_is_scheduled(t)) {
1065 #ifdef __NetBSD__
1066 int s = splsoftserial();
1067 #else
1068 local_bh_disable();
1069 #endif
1070 if (tasklet_trylock(t)) {
1071 /* Must wait for any GPU reset in progress. */
1072 if (__tasklet_is_enabled(t))
1073 t->func(t->data);
1074 tasklet_unlock(t);
1075 }
1076 #ifdef __NetBSD__
1077 splx(s);
1078 #else
1079 local_bh_enable();
1080 #endif
1081 }
1082
1083 /* Otherwise flush the tasklet if it was running on another cpu */
1084 tasklet_unlock_wait(t);
1085 }
1086
1087 /**
1088 * intel_engine_is_idle() - Report if the engine has finished process all work
1089 * @engine: the intel_engine_cs
1090 *
1091 * Return true if there are no requests pending, nothing left to be submitted
1092 * to hardware, and that the engine is idle.
1093 */
intel_engine_is_idle(struct intel_engine_cs * engine)1094 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1095 {
1096 /* More white lies, if wedged, hw state is inconsistent */
1097 if (intel_gt_is_wedged(engine->gt))
1098 return true;
1099
1100 if (!intel_engine_pm_is_awake(engine))
1101 return true;
1102
1103 /* Waiting to drain ELSP? */
1104 if (execlists_active(&engine->execlists)) {
1105 #ifdef __NetBSD__
1106 xc_barrier(XC_HIGHPRI);
1107 #else
1108 synchronize_hardirq(engine->i915->drm.pdev->irq);
1109 #endif
1110
1111 intel_engine_flush_submission(engine);
1112
1113 if (execlists_active(&engine->execlists))
1114 return false;
1115 }
1116
1117 /* ELSP is empty, but there are ready requests? E.g. after reset */
1118 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1119 return false;
1120
1121 /* Ring stopped? */
1122 return ring_is_idle(engine);
1123 }
1124
intel_engines_are_idle(struct intel_gt * gt)1125 bool intel_engines_are_idle(struct intel_gt *gt)
1126 {
1127 struct intel_engine_cs *engine;
1128 enum intel_engine_id id;
1129
1130 /*
1131 * If the driver is wedged, HW state may be very inconsistent and
1132 * report that it is still busy, even though we have stopped using it.
1133 */
1134 if (intel_gt_is_wedged(gt))
1135 return true;
1136
1137 /* Already parked (and passed an idleness test); must still be idle */
1138 if (!READ_ONCE(gt->awake))
1139 return true;
1140
1141 for_each_engine(engine, gt, id) {
1142 if (!intel_engine_is_idle(engine))
1143 return false;
1144 }
1145
1146 return true;
1147 }
1148
intel_engines_reset_default_submission(struct intel_gt * gt)1149 void intel_engines_reset_default_submission(struct intel_gt *gt)
1150 {
1151 struct intel_engine_cs *engine;
1152 enum intel_engine_id id;
1153
1154 for_each_engine(engine, gt, id)
1155 engine->set_default_submission(engine);
1156 }
1157
intel_engine_can_store_dword(struct intel_engine_cs * engine)1158 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1159 {
1160 switch (INTEL_GEN(engine->i915)) {
1161 case 2:
1162 return false; /* uses physical not virtual addresses */
1163 case 3:
1164 /* maybe only uses physical not virtual addresses */
1165 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1166 case 4:
1167 return !IS_I965G(engine->i915); /* who knows! */
1168 case 6:
1169 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1170 default:
1171 return true;
1172 }
1173 }
1174
print_sched_attr(struct drm_i915_private * i915,const struct i915_sched_attr * attr,char * buf,int x,int len)1175 static int print_sched_attr(struct drm_i915_private *i915,
1176 const struct i915_sched_attr *attr,
1177 char *buf, int x, int len)
1178 {
1179 if (attr->priority == I915_PRIORITY_INVALID)
1180 return x;
1181
1182 x += snprintf(buf + x, len - x,
1183 " prio=%d", attr->priority);
1184
1185 return x;
1186 }
1187
print_request(struct drm_printer * m,struct i915_request * rq,const char * prefix)1188 static void print_request(struct drm_printer *m,
1189 struct i915_request *rq,
1190 const char *prefix)
1191 {
1192 const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1193 char buf[80] = "";
1194 int x = 0;
1195
1196 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1197
1198 drm_printf(m, "%s %"PRIx64":%"PRIx64"%s%s %s @ %dms: %s\n",
1199 prefix,
1200 (uint64_t)rq->fence.context, (uint64_t)rq->fence.seqno,
1201 i915_request_completed(rq) ? "!" :
1202 i915_request_started(rq) ? "*" :
1203 "",
1204 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1205 &rq->fence.flags) ? "+" :
1206 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1207 &rq->fence.flags) ? "-" :
1208 "",
1209 buf,
1210 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1211 name);
1212 }
1213
1214 #define hexdump intel_hexdump
1215
hexdump(struct drm_printer * m,const void * buf,size_t len)1216 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1217 {
1218 const size_t rowsize = 8 * sizeof(u32);
1219 const void *prev = NULL;
1220 bool skip = false;
1221 size_t pos;
1222
1223 for (pos = 0; pos < len; pos += rowsize) {
1224 char line[128];
1225
1226 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1227 if (!skip) {
1228 drm_printf(m, "*\n");
1229 skip = true;
1230 }
1231 continue;
1232 }
1233
1234 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1235 rowsize, sizeof(u32),
1236 line, sizeof(line),
1237 false) >= sizeof(line));
1238 drm_printf(m, "[%04zx] %s\n", pos, line);
1239
1240 prev = buf + pos;
1241 skip = false;
1242 }
1243 }
1244
get_timeline(struct i915_request * rq)1245 static struct intel_timeline *get_timeline(struct i915_request *rq)
1246 {
1247 struct intel_timeline *tl;
1248
1249 /*
1250 * Even though we are holding the engine->active.lock here, there
1251 * is no control over the submission queue per-se and we are
1252 * inspecting the active state at a random point in time, with an
1253 * unknown queue. Play safe and make sure the timeline remains valid.
1254 * (Only being used for pretty printing, one extra kref shouldn't
1255 * cause a camel stampede!)
1256 */
1257 rcu_read_lock();
1258 tl = rcu_dereference(rq->timeline);
1259 if (!kref_get_unless_zero(&tl->kref))
1260 tl = NULL;
1261 rcu_read_unlock();
1262
1263 return tl;
1264 }
1265
repr_timer(const struct timer_list * t)1266 static const char *repr_timer(const struct timer_list *t)
1267 {
1268 #ifdef __NetBSD__
1269 if (!callout_active(__UNCONST(&t->tl_callout)))
1270 return "inactive";
1271
1272 if (callout_pending(__UNCONST(&t->tl_callout)))
1273 return "pending";
1274 #else
1275 if (!READ_ONCE(t->expires))
1276 return "inactive";
1277
1278 if (timer_pending(t))
1279 return "active";
1280 #endif
1281
1282 return "expired";
1283 }
1284
intel_engine_print_registers(struct intel_engine_cs * engine,struct drm_printer * m)1285 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1286 struct drm_printer *m)
1287 {
1288 struct drm_i915_private *dev_priv = engine->i915;
1289 struct intel_engine_execlists * const execlists = &engine->execlists;
1290 u64 addr;
1291
1292 if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
1293 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1294 drm_printf(m, "\tRING_START: 0x%08x\n",
1295 ENGINE_READ(engine, RING_START));
1296 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1297 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1298 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1299 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1300 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1301 ENGINE_READ(engine, RING_CTL),
1302 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1303 if (INTEL_GEN(engine->i915) > 2) {
1304 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1305 ENGINE_READ(engine, RING_MI_MODE),
1306 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1307 }
1308
1309 if (INTEL_GEN(dev_priv) >= 6) {
1310 drm_printf(m, "\tRING_IMR: %08x\n",
1311 ENGINE_READ(engine, RING_IMR));
1312 }
1313
1314 addr = intel_engine_get_active_head(engine);
1315 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1316 upper_32_bits(addr), lower_32_bits(addr));
1317 addr = intel_engine_get_last_batch_head(engine);
1318 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1319 upper_32_bits(addr), lower_32_bits(addr));
1320 if (INTEL_GEN(dev_priv) >= 8)
1321 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1322 else if (INTEL_GEN(dev_priv) >= 4)
1323 addr = ENGINE_READ(engine, RING_DMA_FADD);
1324 else
1325 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1326 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1327 upper_32_bits(addr), lower_32_bits(addr));
1328 if (INTEL_GEN(dev_priv) >= 4) {
1329 drm_printf(m, "\tIPEIR: 0x%08x\n",
1330 ENGINE_READ(engine, RING_IPEIR));
1331 drm_printf(m, "\tIPEHR: 0x%08x\n",
1332 ENGINE_READ(engine, RING_IPEHR));
1333 } else {
1334 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1335 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1336 }
1337
1338 if (HAS_EXECLISTS(dev_priv)) {
1339 struct i915_request * const *port, *rq;
1340 const u32 *hws =
1341 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1342 const u8 num_entries = execlists->csb_size;
1343 unsigned int idx;
1344 u8 read, write;
1345
1346 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1347 #ifdef __NetBSD__ /* XXX sigh */
1348 "<abstraction violation>",
1349 "<abstraction violation>",
1350 #else
1351 yesno(test_bit(TASKLET_STATE_SCHED,
1352 &engine->execlists.tasklet.state)),
1353 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
1354 #endif
1355 repr_timer(&engine->execlists.preempt),
1356 repr_timer(&engine->execlists.timer));
1357
1358 read = execlists->csb_head;
1359 write = READ_ONCE(*execlists->csb_write);
1360
1361 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1362 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1363 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1364 read, write, num_entries);
1365
1366 if (read >= num_entries)
1367 read = 0;
1368 if (write >= num_entries)
1369 write = 0;
1370 if (read > write)
1371 write += num_entries;
1372 while (read < write) {
1373 idx = ++read % num_entries;
1374 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1375 idx, hws[idx * 2], hws[idx * 2 + 1]);
1376 }
1377
1378 #ifdef __NetBSD__
1379 int s = execlists_active_lock_bh(execlists);
1380 #else
1381 execlists_active_lock_bh(execlists);
1382 #endif
1383 rcu_read_lock();
1384 for (port = execlists->active; (rq = *port); port++) {
1385 char hdr[80];
1386 int len;
1387
1388 len = snprintf(hdr, sizeof(hdr),
1389 "\t\tActive[%d]: ",
1390 (int)(port - execlists->active));
1391 if (!i915_request_signaled(rq)) {
1392 struct intel_timeline *tl = get_timeline(rq);
1393
1394 len += snprintf(hdr + len, sizeof(hdr) - len,
1395 "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
1396 i915_ggtt_offset(rq->ring->vma),
1397 tl ? tl->hwsp_offset : 0,
1398 hwsp_seqno(rq));
1399
1400 if (tl)
1401 intel_timeline_put(tl);
1402 }
1403 snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1404 print_request(m, rq, hdr);
1405 }
1406 for (port = execlists->pending; (rq = *port); port++) {
1407 struct intel_timeline *tl = get_timeline(rq);
1408 char hdr[80];
1409
1410 snprintf(hdr, sizeof(hdr),
1411 "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
1412 (int)(port - execlists->pending),
1413 i915_ggtt_offset(rq->ring->vma),
1414 tl ? tl->hwsp_offset : 0,
1415 hwsp_seqno(rq));
1416 print_request(m, rq, hdr);
1417
1418 if (tl)
1419 intel_timeline_put(tl);
1420 }
1421 rcu_read_unlock();
1422 #ifdef __NetBSD__
1423 execlists_active_unlock_bh(execlists, s);
1424 #else
1425 execlists_active_unlock_bh(execlists);
1426 #endif
1427 } else if (INTEL_GEN(dev_priv) > 6) {
1428 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1429 ENGINE_READ(engine, RING_PP_DIR_BASE));
1430 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1431 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1432 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1433 ENGINE_READ(engine, RING_PP_DIR_DCLV));
1434 }
1435 }
1436
print_request_ring(struct drm_printer * m,struct i915_request * rq)1437 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1438 {
1439 void *ring;
1440 int size;
1441
1442 drm_printf(m,
1443 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1444 rq->head, rq->postfix, rq->tail,
1445 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1446 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1447
1448 size = rq->tail - rq->head;
1449 if (rq->tail < rq->head)
1450 size += rq->ring->size;
1451
1452 ring = kmalloc(size, GFP_ATOMIC);
1453 if (ring) {
1454 const void *vaddr = rq->ring->vaddr;
1455 unsigned int head = rq->head;
1456 unsigned int len = 0;
1457
1458 if (rq->tail < head) {
1459 len = rq->ring->size - head;
1460 memcpy(ring, vaddr + head, len);
1461 head = 0;
1462 }
1463 memcpy(ring + len, vaddr + head, size - len);
1464
1465 hexdump(m, ring, size);
1466 kfree(ring);
1467 }
1468 }
1469
list_count(struct list_head * list)1470 static unsigned long list_count(struct list_head *list)
1471 {
1472 struct list_head *pos;
1473 unsigned long count = 0;
1474
1475 list_for_each(pos, list)
1476 count++;
1477
1478 return count;
1479 }
1480
intel_engine_dump(struct intel_engine_cs * engine,struct drm_printer * m,const char * header,...)1481 void intel_engine_dump(struct intel_engine_cs *engine,
1482 struct drm_printer *m,
1483 const char *header, ...)
1484 {
1485 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1486 struct i915_request *rq;
1487 intel_wakeref_t wakeref;
1488 unsigned long flags;
1489
1490 if (header) {
1491 va_list ap;
1492
1493 va_start(ap, header);
1494 drm_vprintf(m, header, &ap);
1495 va_end(ap);
1496 }
1497
1498 if (intel_gt_is_wedged(engine->gt))
1499 drm_printf(m, "*** WEDGED ***\n");
1500
1501 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1502 drm_printf(m, "\tBarriers?: %s\n",
1503 yesno(!llist_empty(&engine->barrier_tasks)));
1504 drm_printf(m, "\tLatency: %luus\n",
1505 ewma__engine_latency_read(&engine->latency));
1506
1507 rcu_read_lock();
1508 rq = READ_ONCE(engine->heartbeat.systole);
1509 if (rq)
1510 drm_printf(m, "\tHeartbeat: %d ms ago\n",
1511 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1512 rcu_read_unlock();
1513 drm_printf(m, "\tReset count: %d (global %d)\n",
1514 i915_reset_engine_count(error, engine),
1515 i915_reset_count(error));
1516
1517 drm_printf(m, "\tRequests:\n");
1518
1519 spin_lock_irqsave(&engine->active.lock, flags);
1520 rq = intel_engine_find_active_request(engine);
1521 if (rq) {
1522 struct intel_timeline *tl = get_timeline(rq);
1523
1524 print_request(m, rq, "\t\tactive ");
1525
1526 drm_printf(m, "\t\tring->start: 0x%08x\n",
1527 i915_ggtt_offset(rq->ring->vma));
1528 drm_printf(m, "\t\tring->head: 0x%08x\n",
1529 rq->ring->head);
1530 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1531 rq->ring->tail);
1532 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1533 rq->ring->emit);
1534 drm_printf(m, "\t\tring->space: 0x%08x\n",
1535 rq->ring->space);
1536
1537 if (tl) {
1538 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1539 tl->hwsp_offset);
1540 intel_timeline_put(tl);
1541 }
1542
1543 print_request_ring(m, rq);
1544
1545 if (rq->context->lrc_reg_state) {
1546 drm_printf(m, "Logical Ring Context:\n");
1547 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
1548 }
1549 }
1550 drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
1551 spin_unlock_irqrestore(&engine->active.lock, flags);
1552
1553 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
1554 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1555 if (wakeref) {
1556 intel_engine_print_registers(engine, m);
1557 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1558 } else {
1559 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1560 }
1561
1562 intel_execlists_show_requests(engine, m, print_request, 8);
1563
1564 drm_printf(m, "HWSP:\n");
1565 hexdump(m, engine->status_page.addr, PAGE_SIZE);
1566
1567 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1568
1569 intel_engine_print_breadcrumbs(engine, m);
1570 }
1571
1572 /**
1573 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1574 * @engine: engine to enable stats collection
1575 *
1576 * Start collecting the engine busyness data for @engine.
1577 *
1578 * Returns 0 on success or a negative error code.
1579 */
intel_enable_engine_stats(struct intel_engine_cs * engine)1580 int intel_enable_engine_stats(struct intel_engine_cs *engine)
1581 {
1582 struct intel_engine_execlists *execlists = &engine->execlists;
1583 unsigned long flags;
1584 int err = 0;
1585
1586 if (!intel_engine_supports_stats(engine))
1587 return -ENODEV;
1588
1589 #ifdef __NetBSD__
1590 int s = execlists_active_lock_bh(execlists);
1591 #else
1592 execlists_active_lock_bh(execlists);
1593 #endif
1594 write_seqlock_irqsave(&engine->stats.lock, flags);
1595
1596 if (unlikely(engine->stats.enabled == ~0)) {
1597 err = -EBUSY;
1598 goto unlock;
1599 }
1600
1601 if (engine->stats.enabled++ == 0) {
1602 struct i915_request * const *port;
1603 struct i915_request *rq;
1604
1605 engine->stats.enabled_at = ktime_get();
1606
1607 /* XXX submission method oblivious? */
1608 for (port = execlists->active; (rq = *port); port++)
1609 engine->stats.active++;
1610
1611 for (port = execlists->pending; (rq = *port); port++) {
1612 /* Exclude any contexts already counted in active */
1613 if (!intel_context_inflight_count(rq->context))
1614 engine->stats.active++;
1615 }
1616
1617 if (engine->stats.active)
1618 engine->stats.start = engine->stats.enabled_at;
1619 }
1620
1621 unlock:
1622 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1623 #ifdef __NetBSD__
1624 execlists_active_unlock_bh(execlists, s);
1625 #else
1626 execlists_active_unlock_bh(execlists);
1627 #endif
1628
1629 return err;
1630 }
1631
__intel_engine_get_busy_time(struct intel_engine_cs * engine)1632 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
1633 {
1634 ktime_t total = engine->stats.total;
1635
1636 /*
1637 * If the engine is executing something at the moment
1638 * add it to the total.
1639 */
1640 if (engine->stats.active)
1641 total = ktime_add(total,
1642 ktime_sub(ktime_get(), engine->stats.start));
1643
1644 return total;
1645 }
1646
1647 /**
1648 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1649 * @engine: engine to report on
1650 *
1651 * Returns accumulated time @engine was busy since engine stats were enabled.
1652 */
intel_engine_get_busy_time(struct intel_engine_cs * engine)1653 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
1654 {
1655 unsigned int seq;
1656 ktime_t total;
1657
1658 do {
1659 seq = read_seqbegin(&engine->stats.lock);
1660 total = __intel_engine_get_busy_time(engine);
1661 } while (read_seqretry(&engine->stats.lock, seq));
1662
1663 return total;
1664 }
1665
1666 /**
1667 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1668 * @engine: engine to disable stats collection
1669 *
1670 * Stops collecting the engine busyness data for @engine.
1671 */
intel_disable_engine_stats(struct intel_engine_cs * engine)1672 void intel_disable_engine_stats(struct intel_engine_cs *engine)
1673 {
1674 unsigned long flags;
1675
1676 if (!intel_engine_supports_stats(engine))
1677 return;
1678
1679 write_seqlock_irqsave(&engine->stats.lock, flags);
1680 WARN_ON_ONCE(engine->stats.enabled == 0);
1681 if (--engine->stats.enabled == 0) {
1682 engine->stats.total = __intel_engine_get_busy_time(engine);
1683 engine->stats.active = 0;
1684 }
1685 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1686 }
1687
match_ring(struct i915_request * rq)1688 static bool match_ring(struct i915_request *rq)
1689 {
1690 u32 ring = ENGINE_READ(rq->engine, RING_START);
1691
1692 return ring == i915_ggtt_offset(rq->ring->vma);
1693 }
1694
1695 struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs * engine)1696 intel_engine_find_active_request(struct intel_engine_cs *engine)
1697 {
1698 struct i915_request *request, *active = NULL;
1699
1700 /*
1701 * We are called by the error capture, reset and to dump engine
1702 * state at random points in time. In particular, note that neither is
1703 * crucially ordered with an interrupt. After a hang, the GPU is dead
1704 * and we assume that no more writes can happen (we waited long enough
1705 * for all writes that were in transaction to be flushed) - adding an
1706 * extra delay for a recent interrupt is pointless. Hence, we do
1707 * not need an engine->irq_seqno_barrier() before the seqno reads.
1708 * At all other times, we must assume the GPU is still running, but
1709 * we only care about the snapshot of this moment.
1710 */
1711 lockdep_assert_held(&engine->active.lock);
1712 list_for_each_entry(request, &engine->active.requests, sched.link) {
1713 if (i915_request_completed(request))
1714 continue;
1715
1716 if (!i915_request_started(request))
1717 continue;
1718
1719 /* More than one preemptible request may match! */
1720 if (!match_ring(request))
1721 continue;
1722
1723 active = request;
1724 break;
1725 }
1726
1727 return active;
1728 }
1729
1730 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1731 #include "mock_engine.c"
1732 #include "selftest_engine.c"
1733 #include "selftest_engine_cs.c"
1734 #endif
1735