1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2016 Intel Corporation
4 */
5
6 #include <linux/string_helpers.h>
7
8 #include <drm/drm_print.h>
9
10 #include "gem/i915_gem_context.h"
11 #include "gem/i915_gem_internal.h"
12 #include "gt/intel_gt_print.h"
13 #include "gt/intel_gt_regs.h"
14
15 #include "i915_cmd_parser.h"
16 #include "i915_drv.h"
17 #include "i915_irq.h"
18 #include "i915_reg.h"
19 #include "intel_breadcrumbs.h"
20 #include "intel_context.h"
21 #include "intel_engine.h"
22 #include "intel_engine_pm.h"
23 #include "intel_engine_regs.h"
24 #include "intel_engine_user.h"
25 #include "intel_execlists_submission.h"
26 #include "intel_gt.h"
27 #include "intel_gt_mcr.h"
28 #include "intel_gt_pm.h"
29 #include "intel_gt_requests.h"
30 #include "intel_lrc.h"
31 #include "intel_lrc_reg.h"
32 #include "intel_reset.h"
33 #include "intel_ring.h"
34 #include "uc/intel_guc_submission.h"
35
36 /* Haswell does have the CXT_SIZE register however it does not appear to be
37 * valid. Now, docs explain in dwords what is in the context object. The full
38 * size is 70720 bytes, however, the power context and execlist context will
39 * never be saved (power context is stored elsewhere, and execlists don't work
40 * on HSW) - so the final size, including the extra state required for the
41 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
42 */
43 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
44
45 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
46 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
47 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
48 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
49
50 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
51
52 #define MAX_MMIO_BASES 3
53 struct engine_info {
54 u8 class;
55 u8 instance;
56 /* mmio bases table *must* be sorted in reverse graphics_ver order */
57 struct engine_mmio_base {
58 u32 graphics_ver : 8;
59 u32 base : 24;
60 } mmio_bases[MAX_MMIO_BASES];
61 };
62
63 static const struct engine_info intel_engines[] = {
64 [RCS0] = {
65 .class = RENDER_CLASS,
66 .instance = 0,
67 .mmio_bases = {
68 { .graphics_ver = 1, .base = RENDER_RING_BASE }
69 },
70 },
71 [BCS0] = {
72 .class = COPY_ENGINE_CLASS,
73 .instance = 0,
74 .mmio_bases = {
75 { .graphics_ver = 6, .base = BLT_RING_BASE }
76 },
77 },
78 [BCS1] = {
79 .class = COPY_ENGINE_CLASS,
80 .instance = 1,
81 .mmio_bases = {
82 { .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE }
83 },
84 },
85 [BCS2] = {
86 .class = COPY_ENGINE_CLASS,
87 .instance = 2,
88 .mmio_bases = {
89 { .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE }
90 },
91 },
92 [BCS3] = {
93 .class = COPY_ENGINE_CLASS,
94 .instance = 3,
95 .mmio_bases = {
96 { .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE }
97 },
98 },
99 [BCS4] = {
100 .class = COPY_ENGINE_CLASS,
101 .instance = 4,
102 .mmio_bases = {
103 { .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE }
104 },
105 },
106 [BCS5] = {
107 .class = COPY_ENGINE_CLASS,
108 .instance = 5,
109 .mmio_bases = {
110 { .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE }
111 },
112 },
113 [BCS6] = {
114 .class = COPY_ENGINE_CLASS,
115 .instance = 6,
116 .mmio_bases = {
117 { .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE }
118 },
119 },
120 [BCS7] = {
121 .class = COPY_ENGINE_CLASS,
122 .instance = 7,
123 .mmio_bases = {
124 { .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE }
125 },
126 },
127 [BCS8] = {
128 .class = COPY_ENGINE_CLASS,
129 .instance = 8,
130 .mmio_bases = {
131 { .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE }
132 },
133 },
134 [VCS0] = {
135 .class = VIDEO_DECODE_CLASS,
136 .instance = 0,
137 .mmio_bases = {
138 { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
139 { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
140 { .graphics_ver = 4, .base = BSD_RING_BASE }
141 },
142 },
143 [VCS1] = {
144 .class = VIDEO_DECODE_CLASS,
145 .instance = 1,
146 .mmio_bases = {
147 { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
148 { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
149 },
150 },
151 [VCS2] = {
152 .class = VIDEO_DECODE_CLASS,
153 .instance = 2,
154 .mmio_bases = {
155 { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
156 },
157 },
158 [VCS3] = {
159 .class = VIDEO_DECODE_CLASS,
160 .instance = 3,
161 .mmio_bases = {
162 { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
163 },
164 },
165 [VCS4] = {
166 .class = VIDEO_DECODE_CLASS,
167 .instance = 4,
168 .mmio_bases = {
169 { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
170 },
171 },
172 [VCS5] = {
173 .class = VIDEO_DECODE_CLASS,
174 .instance = 5,
175 .mmio_bases = {
176 { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
177 },
178 },
179 [VCS6] = {
180 .class = VIDEO_DECODE_CLASS,
181 .instance = 6,
182 .mmio_bases = {
183 { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
184 },
185 },
186 [VCS7] = {
187 .class = VIDEO_DECODE_CLASS,
188 .instance = 7,
189 .mmio_bases = {
190 { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
191 },
192 },
193 [VECS0] = {
194 .class = VIDEO_ENHANCEMENT_CLASS,
195 .instance = 0,
196 .mmio_bases = {
197 { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
198 { .graphics_ver = 7, .base = VEBOX_RING_BASE }
199 },
200 },
201 [VECS1] = {
202 .class = VIDEO_ENHANCEMENT_CLASS,
203 .instance = 1,
204 .mmio_bases = {
205 { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
206 },
207 },
208 [VECS2] = {
209 .class = VIDEO_ENHANCEMENT_CLASS,
210 .instance = 2,
211 .mmio_bases = {
212 { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
213 },
214 },
215 [VECS3] = {
216 .class = VIDEO_ENHANCEMENT_CLASS,
217 .instance = 3,
218 .mmio_bases = {
219 { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
220 },
221 },
222 [CCS0] = {
223 .class = COMPUTE_CLASS,
224 .instance = 0,
225 .mmio_bases = {
226 { .graphics_ver = 12, .base = GEN12_COMPUTE0_RING_BASE }
227 }
228 },
229 [CCS1] = {
230 .class = COMPUTE_CLASS,
231 .instance = 1,
232 .mmio_bases = {
233 { .graphics_ver = 12, .base = GEN12_COMPUTE1_RING_BASE }
234 }
235 },
236 [CCS2] = {
237 .class = COMPUTE_CLASS,
238 .instance = 2,
239 .mmio_bases = {
240 { .graphics_ver = 12, .base = GEN12_COMPUTE2_RING_BASE }
241 }
242 },
243 [CCS3] = {
244 .class = COMPUTE_CLASS,
245 .instance = 3,
246 .mmio_bases = {
247 { .graphics_ver = 12, .base = GEN12_COMPUTE3_RING_BASE }
248 }
249 },
250 [GSC0] = {
251 .class = OTHER_CLASS,
252 .instance = OTHER_GSC_INSTANCE,
253 .mmio_bases = {
254 { .graphics_ver = 12, .base = MTL_GSC_RING_BASE }
255 }
256 },
257 };
258
259 /**
260 * intel_engine_context_size() - return the size of the context for an engine
261 * @gt: the gt
262 * @class: engine class
263 *
264 * Each engine class may require a different amount of space for a context
265 * image.
266 *
267 * Return: size (in bytes) of an engine class specific context image
268 *
269 * Note: this size includes the HWSP, which is part of the context image
270 * in LRC mode, but does not include the "shared data page" used with
271 * GuC submission. The caller should account for this if using the GuC.
272 */
intel_engine_context_size(struct intel_gt * gt,u8 class)273 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
274 {
275 struct intel_uncore *uncore = gt->uncore;
276 u32 cxt_size;
277
278 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
279
280 switch (class) {
281 case COMPUTE_CLASS:
282 fallthrough;
283 case RENDER_CLASS:
284 switch (GRAPHICS_VER(gt->i915)) {
285 default:
286 MISSING_CASE(GRAPHICS_VER(gt->i915));
287 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
288 case 12:
289 case 11:
290 return GEN11_LR_CONTEXT_RENDER_SIZE;
291 case 9:
292 return GEN9_LR_CONTEXT_RENDER_SIZE;
293 case 8:
294 return GEN8_LR_CONTEXT_RENDER_SIZE;
295 case 7:
296 if (IS_HASWELL(gt->i915))
297 return HSW_CXT_TOTAL_SIZE;
298
299 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
300 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
301 PAGE_SIZE);
302 case 6:
303 cxt_size = intel_uncore_read(uncore, CXT_SIZE);
304 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
305 PAGE_SIZE);
306 case 5:
307 case 4:
308 /*
309 * There is a discrepancy here between the size reported
310 * by the register and the size of the context layout
311 * in the docs. Both are described as authorative!
312 *
313 * The discrepancy is on the order of a few cachelines,
314 * but the total is under one page (4k), which is our
315 * minimum allocation anyway so it should all come
316 * out in the wash.
317 */
318 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
319 drm_dbg(>->i915->drm,
320 "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
321 GRAPHICS_VER(gt->i915), cxt_size * 64,
322 cxt_size - 1);
323 return round_up(cxt_size * 64, PAGE_SIZE);
324 case 3:
325 case 2:
326 /* For the special day when i810 gets merged. */
327 case 1:
328 return 0;
329 }
330 break;
331 default:
332 MISSING_CASE(class);
333 fallthrough;
334 case VIDEO_DECODE_CLASS:
335 case VIDEO_ENHANCEMENT_CLASS:
336 case COPY_ENGINE_CLASS:
337 case OTHER_CLASS:
338 if (GRAPHICS_VER(gt->i915) < 8)
339 return 0;
340 return GEN8_LR_CONTEXT_OTHER_SIZE;
341 }
342 }
343
__engine_mmio_base(struct drm_i915_private * i915,const struct engine_mmio_base * bases)344 static u32 __engine_mmio_base(struct drm_i915_private *i915,
345 const struct engine_mmio_base *bases)
346 {
347 int i;
348
349 for (i = 0; i < MAX_MMIO_BASES; i++)
350 if (GRAPHICS_VER(i915) >= bases[i].graphics_ver)
351 break;
352
353 GEM_BUG_ON(i == MAX_MMIO_BASES);
354 GEM_BUG_ON(!bases[i].base);
355
356 return bases[i].base;
357 }
358
__sprint_engine_name(struct intel_engine_cs * engine)359 static void __sprint_engine_name(struct intel_engine_cs *engine)
360 {
361 /*
362 * Before we know what the uABI name for this engine will be,
363 * we still would like to keep track of this engine in the debug logs.
364 * We throw in a ' here as a reminder that this isn't its final name.
365 */
366 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
367 intel_engine_class_repr(engine->class),
368 engine->instance) >= sizeof(engine->name));
369 }
370
intel_engine_set_hwsp_writemask(struct intel_engine_cs * engine,u32 mask)371 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
372 {
373 /*
374 * Though they added more rings on g4x/ilk, they did not add
375 * per-engine HWSTAM until gen6.
376 */
377 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
378 return;
379
380 if (GRAPHICS_VER(engine->i915) >= 3)
381 ENGINE_WRITE(engine, RING_HWSTAM, mask);
382 else
383 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
384 }
385
intel_engine_sanitize_mmio(struct intel_engine_cs * engine)386 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
387 {
388 /* Mask off all writes into the unknown HWSP */
389 intel_engine_set_hwsp_writemask(engine, ~0u);
390 }
391
nop_irq_handler(struct intel_engine_cs * engine,u16 iir)392 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
393 {
394 GEM_DEBUG_WARN_ON(iir);
395 }
396
get_reset_domain(u8 ver,enum intel_engine_id id)397 static u32 get_reset_domain(u8 ver, enum intel_engine_id id)
398 {
399 u32 reset_domain;
400
401 if (ver >= 11) {
402 static const u32 engine_reset_domains[] = {
403 [RCS0] = GEN11_GRDOM_RENDER,
404 [BCS0] = GEN11_GRDOM_BLT,
405 [BCS1] = XEHPC_GRDOM_BLT1,
406 [BCS2] = XEHPC_GRDOM_BLT2,
407 [BCS3] = XEHPC_GRDOM_BLT3,
408 [BCS4] = XEHPC_GRDOM_BLT4,
409 [BCS5] = XEHPC_GRDOM_BLT5,
410 [BCS6] = XEHPC_GRDOM_BLT6,
411 [BCS7] = XEHPC_GRDOM_BLT7,
412 [BCS8] = XEHPC_GRDOM_BLT8,
413 [VCS0] = GEN11_GRDOM_MEDIA,
414 [VCS1] = GEN11_GRDOM_MEDIA2,
415 [VCS2] = GEN11_GRDOM_MEDIA3,
416 [VCS3] = GEN11_GRDOM_MEDIA4,
417 [VCS4] = GEN11_GRDOM_MEDIA5,
418 [VCS5] = GEN11_GRDOM_MEDIA6,
419 [VCS6] = GEN11_GRDOM_MEDIA7,
420 [VCS7] = GEN11_GRDOM_MEDIA8,
421 [VECS0] = GEN11_GRDOM_VECS,
422 [VECS1] = GEN11_GRDOM_VECS2,
423 [VECS2] = GEN11_GRDOM_VECS3,
424 [VECS3] = GEN11_GRDOM_VECS4,
425 [CCS0] = GEN11_GRDOM_RENDER,
426 [CCS1] = GEN11_GRDOM_RENDER,
427 [CCS2] = GEN11_GRDOM_RENDER,
428 [CCS3] = GEN11_GRDOM_RENDER,
429 [GSC0] = GEN12_GRDOM_GSC,
430 };
431 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
432 !engine_reset_domains[id]);
433 reset_domain = engine_reset_domains[id];
434 } else {
435 static const u32 engine_reset_domains[] = {
436 [RCS0] = GEN6_GRDOM_RENDER,
437 [BCS0] = GEN6_GRDOM_BLT,
438 [VCS0] = GEN6_GRDOM_MEDIA,
439 [VCS1] = GEN8_GRDOM_MEDIA2,
440 [VECS0] = GEN6_GRDOM_VECS,
441 };
442 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
443 !engine_reset_domains[id]);
444 reset_domain = engine_reset_domains[id];
445 }
446
447 return reset_domain;
448 }
449
intel_engine_setup(struct intel_gt * gt,enum intel_engine_id id,u8 logical_instance)450 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
451 u8 logical_instance)
452 {
453 const struct engine_info *info = &intel_engines[id];
454 struct drm_i915_private *i915 = gt->i915;
455 struct intel_engine_cs *engine;
456 u8 guc_class;
457
458 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
459 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
460 BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1));
461 BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1));
462
463 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
464 return -EINVAL;
465
466 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
467 return -EINVAL;
468
469 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
470 return -EINVAL;
471
472 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
473 return -EINVAL;
474
475 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
476 if (!engine)
477 return -ENOMEM;
478
479 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
480
481 INIT_LIST_HEAD(&engine->pinned_contexts_list);
482 engine->id = id;
483 engine->legacy_idx = INVALID_ENGINE;
484 engine->mask = BIT(id);
485 engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915),
486 id);
487 engine->i915 = i915;
488 engine->gt = gt;
489 engine->uncore = gt->uncore;
490 guc_class = engine_class_to_guc_class(info->class);
491 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
492 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
493
494 engine->irq_handler = nop_irq_handler;
495
496 engine->class = info->class;
497 engine->instance = info->instance;
498 engine->logical_mask = BIT(logical_instance);
499 __sprint_engine_name(engine);
500
501 if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
502 __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
503 engine->class == RENDER_CLASS)
504 engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
505
506 /* features common between engines sharing EUs */
507 if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
508 engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
509 engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
510 }
511
512 engine->props.heartbeat_interval_ms =
513 CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
514 engine->props.max_busywait_duration_ns =
515 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
516 engine->props.preempt_timeout_ms =
517 CONFIG_DRM_I915_PREEMPT_TIMEOUT;
518 engine->props.stop_timeout_ms =
519 CONFIG_DRM_I915_STOP_TIMEOUT;
520 engine->props.timeslice_duration_ms =
521 CONFIG_DRM_I915_TIMESLICE_DURATION;
522
523 /*
524 * Mid-thread pre-emption is not available in Gen12. Unfortunately,
525 * some compute workloads run quite long threads. That means they get
526 * reset due to not pre-empting in a timely manner. So, bump the
527 * pre-emption timeout value to be much higher for compute engines.
528 */
529 if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
530 engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE;
531
532 /* Cap properties according to any system limits */
533 #define CLAMP_PROP(field) \
534 do { \
535 u64 clamp = intel_clamp_##field(engine, engine->props.field); \
536 if (clamp != engine->props.field) { \
537 drm_notice(&engine->i915->drm, \
538 "Warning, clamping %s to %lld to prevent overflow\n", \
539 #field, clamp); \
540 engine->props.field = clamp; \
541 } \
542 } while (0)
543
544 CLAMP_PROP(heartbeat_interval_ms);
545 CLAMP_PROP(max_busywait_duration_ns);
546 CLAMP_PROP(preempt_timeout_ms);
547 CLAMP_PROP(stop_timeout_ms);
548 CLAMP_PROP(timeslice_duration_ms);
549
550 #undef CLAMP_PROP
551
552 engine->defaults = engine->props; /* never to change again */
553
554 engine->context_size = intel_engine_context_size(gt, engine->class);
555 if (WARN_ON(engine->context_size > BIT(20)))
556 engine->context_size = 0;
557 if (engine->context_size)
558 DRIVER_CAPS(i915)->has_logical_contexts = true;
559
560 ewma__engine_latency_init(&engine->latency);
561
562 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
563
564 /* Scrub mmio state on takeover */
565 intel_engine_sanitize_mmio(engine);
566
567 gt->engine_class[info->class][info->instance] = engine;
568 gt->engine[id] = engine;
569
570 return 0;
571 }
572
intel_clamp_heartbeat_interval_ms(struct intel_engine_cs * engine,u64 value)573 u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
574 {
575 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
576
577 return value;
578 }
579
intel_clamp_max_busywait_duration_ns(struct intel_engine_cs * engine,u64 value)580 u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
581 {
582 value = min(value, jiffies_to_nsecs(2));
583
584 return value;
585 }
586
intel_clamp_preempt_timeout_ms(struct intel_engine_cs * engine,u64 value)587 u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
588 {
589 /*
590 * NB: The GuC API only supports 32bit values. However, the limit is further
591 * reduced due to internal calculations which would otherwise overflow.
592 */
593 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
594 value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
595
596 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
597
598 return value;
599 }
600
intel_clamp_stop_timeout_ms(struct intel_engine_cs * engine,u64 value)601 u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
602 {
603 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
604
605 return value;
606 }
607
intel_clamp_timeslice_duration_ms(struct intel_engine_cs * engine,u64 value)608 u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
609 {
610 /*
611 * NB: The GuC API only supports 32bit values. However, the limit is further
612 * reduced due to internal calculations which would otherwise overflow.
613 */
614 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
615 value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
616
617 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
618
619 return value;
620 }
621
__setup_engine_capabilities(struct intel_engine_cs * engine)622 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
623 {
624 struct drm_i915_private *i915 = engine->i915;
625
626 if (engine->class == VIDEO_DECODE_CLASS) {
627 /*
628 * HEVC support is present on first engine instance
629 * before Gen11 and on all instances afterwards.
630 */
631 if (GRAPHICS_VER(i915) >= 11 ||
632 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
633 engine->uabi_capabilities |=
634 I915_VIDEO_CLASS_CAPABILITY_HEVC;
635
636 /*
637 * SFC block is present only on even logical engine
638 * instances.
639 */
640 if ((GRAPHICS_VER(i915) >= 11 &&
641 (engine->gt->info.vdbox_sfc_access &
642 BIT(engine->instance))) ||
643 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
644 engine->uabi_capabilities |=
645 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
646 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
647 if (GRAPHICS_VER(i915) >= 9 &&
648 engine->gt->info.sfc_mask & BIT(engine->instance))
649 engine->uabi_capabilities |=
650 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
651 }
652 }
653
intel_setup_engine_capabilities(struct intel_gt * gt)654 static void intel_setup_engine_capabilities(struct intel_gt *gt)
655 {
656 struct intel_engine_cs *engine;
657 enum intel_engine_id id;
658
659 for_each_engine(engine, gt, id)
660 __setup_engine_capabilities(engine);
661 }
662
663 /**
664 * intel_engines_release() - free the resources allocated for Command Streamers
665 * @gt: pointer to struct intel_gt
666 */
intel_engines_release(struct intel_gt * gt)667 void intel_engines_release(struct intel_gt *gt)
668 {
669 struct intel_engine_cs *engine;
670 enum intel_engine_id id;
671
672 /*
673 * Before we release the resources held by engine, we must be certain
674 * that the HW is no longer accessing them -- having the GPU scribble
675 * to or read from a page being used for something else causes no end
676 * of fun.
677 *
678 * The GPU should be reset by this point, but assume the worst just
679 * in case we aborted before completely initialising the engines.
680 */
681 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
682 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
683 __intel_gt_reset(gt, ALL_ENGINES);
684
685 /* Decouple the backend; but keep the layout for late GPU resets */
686 for_each_engine(engine, gt, id) {
687 if (!engine->release)
688 continue;
689
690 intel_wakeref_wait_for_idle(&engine->wakeref);
691 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
692
693 engine->release(engine);
694 engine->release = NULL;
695
696 memset(&engine->reset, 0, sizeof(engine->reset));
697 }
698 }
699
intel_engine_free_request_pool(struct intel_engine_cs * engine)700 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
701 {
702 if (!engine->request_pool)
703 return;
704
705 #ifdef __linux__
706 kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
707 #else
708 pool_put(i915_request_slab_cache(), engine->request_pool);
709 #endif
710 }
711
intel_engines_free(struct intel_gt * gt)712 void intel_engines_free(struct intel_gt *gt)
713 {
714 struct intel_engine_cs *engine;
715 enum intel_engine_id id;
716
717 /* Free the requests! dma-resv keeps fences around for an eternity */
718 rcu_barrier();
719
720 for_each_engine(engine, gt, id) {
721 intel_engine_free_request_pool(engine);
722 kfree(engine);
723 gt->engine[id] = NULL;
724 }
725 }
726
727 static
gen11_vdbox_has_sfc(struct intel_gt * gt,unsigned int physical_vdbox,unsigned int logical_vdbox,u16 vdbox_mask)728 bool gen11_vdbox_has_sfc(struct intel_gt *gt,
729 unsigned int physical_vdbox,
730 unsigned int logical_vdbox, u16 vdbox_mask)
731 {
732 struct drm_i915_private *i915 = gt->i915;
733
734 /*
735 * In Gen11, only even numbered logical VDBOXes are hooked
736 * up to an SFC (Scaler & Format Converter) unit.
737 * In Gen12, Even numbered physical instance always are connected
738 * to an SFC. Odd numbered physical instances have SFC only if
739 * previous even instance is fused off.
740 *
741 * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field
742 * in the fuse register that tells us whether a specific SFC is present.
743 */
744 if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
745 return false;
746 else if (MEDIA_VER(i915) >= 12)
747 return (physical_vdbox % 2 == 0) ||
748 !(BIT(physical_vdbox - 1) & vdbox_mask);
749 else if (MEDIA_VER(i915) == 11)
750 return logical_vdbox % 2 == 0;
751
752 return false;
753 }
754
engine_mask_apply_media_fuses(struct intel_gt * gt)755 static void engine_mask_apply_media_fuses(struct intel_gt *gt)
756 {
757 struct drm_i915_private *i915 = gt->i915;
758 unsigned int logical_vdbox = 0;
759 unsigned int i;
760 u32 media_fuse, fuse1;
761 u16 vdbox_mask;
762 u16 vebox_mask;
763
764 if (MEDIA_VER(gt->i915) < 11)
765 return;
766
767 /*
768 * On newer platforms the fusing register is called 'enable' and has
769 * enable semantics, while on older platforms it is called 'disable'
770 * and bits have disable semantices.
771 */
772 media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
773 if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
774 media_fuse = ~media_fuse;
775
776 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
777 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
778 GEN11_GT_VEBOX_DISABLE_SHIFT;
779
780 if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
781 fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
782 gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
783 } else {
784 gt->info.sfc_mask = ~0;
785 }
786
787 for (i = 0; i < I915_MAX_VCS; i++) {
788 if (!HAS_ENGINE(gt, _VCS(i))) {
789 vdbox_mask &= ~BIT(i);
790 continue;
791 }
792
793 if (!(BIT(i) & vdbox_mask)) {
794 gt->info.engine_mask &= ~BIT(_VCS(i));
795 drm_dbg(&i915->drm, "vcs%u fused off\n", i);
796 continue;
797 }
798
799 if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
800 gt->info.vdbox_sfc_access |= BIT(i);
801 logical_vdbox++;
802 }
803 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
804 vdbox_mask, VDBOX_MASK(gt));
805 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
806
807 for (i = 0; i < I915_MAX_VECS; i++) {
808 if (!HAS_ENGINE(gt, _VECS(i))) {
809 vebox_mask &= ~BIT(i);
810 continue;
811 }
812
813 if (!(BIT(i) & vebox_mask)) {
814 gt->info.engine_mask &= ~BIT(_VECS(i));
815 drm_dbg(&i915->drm, "vecs%u fused off\n", i);
816 }
817 }
818 drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
819 vebox_mask, VEBOX_MASK(gt));
820 GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
821 }
822
engine_mask_apply_compute_fuses(struct intel_gt * gt)823 static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
824 {
825 struct drm_i915_private *i915 = gt->i915;
826 struct intel_gt_info *info = >->info;
827 int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS;
828 unsigned long ccs_mask;
829 unsigned int i;
830
831 if (GRAPHICS_VER(i915) < 11)
832 return;
833
834 if (hweight32(CCS_MASK(gt)) <= 1)
835 return;
836
837 ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
838 ss_per_ccs);
839 /*
840 * If all DSS in a quadrant are fused off, the corresponding CCS
841 * engine is not available for use.
842 */
843 for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) {
844 info->engine_mask &= ~BIT(_CCS(i));
845 drm_dbg(&i915->drm, "ccs%u fused off\n", i);
846 }
847 }
848
engine_mask_apply_copy_fuses(struct intel_gt * gt)849 static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
850 {
851 struct drm_i915_private *i915 = gt->i915;
852 struct intel_gt_info *info = >->info;
853 unsigned long meml3_mask;
854 unsigned long quad;
855
856 if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
857 GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
858 return;
859
860 meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
861 meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
862
863 /*
864 * Link Copy engines may be fused off according to meml3_mask. Each
865 * bit is a quad that houses 2 Link Copy and two Sub Copy engines.
866 */
867 for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
868 unsigned int instance = quad * 2 + 1;
869 intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
870 _BCS(instance));
871
872 if (mask & info->engine_mask) {
873 drm_dbg(&i915->drm, "bcs%u fused off\n", instance);
874 drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1);
875
876 info->engine_mask &= ~mask;
877 }
878 }
879 }
880
881 /*
882 * Determine which engines are fused off in our particular hardware.
883 * Note that we have a catch-22 situation where we need to be able to access
884 * the blitter forcewake domain to read the engine fuses, but at the same time
885 * we need to know which engines are available on the system to know which
886 * forcewake domains are present. We solve this by intializing the forcewake
887 * domains based on the full engine mask in the platform capabilities before
888 * calling this function and pruning the domains for fused-off engines
889 * afterwards.
890 */
init_engine_mask(struct intel_gt * gt)891 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
892 {
893 struct intel_gt_info *info = >->info;
894
895 GEM_BUG_ON(!info->engine_mask);
896
897 engine_mask_apply_media_fuses(gt);
898 engine_mask_apply_compute_fuses(gt);
899 engine_mask_apply_copy_fuses(gt);
900
901 /*
902 * The only use of the GSC CS is to load and communicate with the GSC
903 * FW, so we have no use for it if we don't have the FW.
904 *
905 * IMPORTANT: in cases where we don't have the GSC FW, we have a
906 * catch-22 situation that breaks media C6 due to 2 requirements:
907 * 1) once turned on, the GSC power well will not go to sleep unless the
908 * GSC FW is loaded.
909 * 2) to enable idling (which is required for media C6) we need to
910 * initialize the IDLE_MSG register for the GSC CS and do at least 1
911 * submission, which will wake up the GSC power well.
912 */
913 if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(>->uc)) {
914 drm_notice(>->i915->drm,
915 "No GSC FW selected, disabling GSC CS and media C6\n");
916 info->engine_mask &= ~BIT(GSC0);
917 }
918
919 /*
920 * Do not create the command streamer for CCS slices beyond the first.
921 * All the workload submitted to the first engine will be shared among
922 * all the slices.
923 *
924 * Once the user will be allowed to customize the CCS mode, then this
925 * check needs to be removed.
926 */
927 if (IS_DG2(gt->i915)) {
928 u8 first_ccs = __ffs(CCS_MASK(gt));
929
930 /* Mask off all the CCS engine */
931 info->engine_mask &= ~GENMASK(CCS3, CCS0);
932 /* Put back in the first CCS engine */
933 info->engine_mask |= BIT(_CCS(first_ccs));
934 }
935
936 return info->engine_mask;
937 }
938
populate_logical_ids(struct intel_gt * gt,u8 * logical_ids,u8 class,const u8 * map,u8 num_instances)939 static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
940 u8 class, const u8 *map, u8 num_instances)
941 {
942 int i, j;
943 u8 current_logical_id = 0;
944
945 for (j = 0; j < num_instances; ++j) {
946 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
947 if (!HAS_ENGINE(gt, i) ||
948 intel_engines[i].class != class)
949 continue;
950
951 if (intel_engines[i].instance == map[j]) {
952 logical_ids[intel_engines[i].instance] =
953 current_logical_id++;
954 break;
955 }
956 }
957 }
958 }
959
setup_logical_ids(struct intel_gt * gt,u8 * logical_ids,u8 class)960 static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
961 {
962 /*
963 * Logical to physical mapping is needed for proper support
964 * to split-frame feature.
965 */
966 if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) {
967 const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 };
968
969 populate_logical_ids(gt, logical_ids, class,
970 map, ARRAY_SIZE(map));
971 } else {
972 int i;
973 u8 map[MAX_ENGINE_INSTANCE + 1];
974
975 for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
976 map[i] = i;
977 populate_logical_ids(gt, logical_ids, class,
978 map, ARRAY_SIZE(map));
979 }
980 }
981
982 /**
983 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
984 * @gt: pointer to struct intel_gt
985 *
986 * Return: non-zero if the initialization failed.
987 */
intel_engines_init_mmio(struct intel_gt * gt)988 int intel_engines_init_mmio(struct intel_gt *gt)
989 {
990 struct drm_i915_private *i915 = gt->i915;
991 const unsigned int engine_mask = init_engine_mask(gt);
992 unsigned int mask = 0;
993 unsigned int i, class;
994 u8 logical_ids[MAX_ENGINE_INSTANCE + 1];
995 int err;
996
997 drm_WARN_ON(&i915->drm, engine_mask == 0);
998 drm_WARN_ON(&i915->drm, engine_mask &
999 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
1000
1001 if (i915_inject_probe_failure(i915))
1002 return -ENODEV;
1003
1004 for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
1005 setup_logical_ids(gt, logical_ids, class);
1006
1007 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
1008 u8 instance = intel_engines[i].instance;
1009
1010 if (intel_engines[i].class != class ||
1011 !HAS_ENGINE(gt, i))
1012 continue;
1013
1014 err = intel_engine_setup(gt, i,
1015 logical_ids[instance]);
1016 if (err)
1017 goto cleanup;
1018
1019 mask |= BIT(i);
1020 }
1021 }
1022
1023 /*
1024 * Catch failures to update intel_engines table when the new engines
1025 * are added to the driver by a warning and disabling the forgotten
1026 * engines.
1027 */
1028 if (drm_WARN_ON(&i915->drm, mask != engine_mask))
1029 gt->info.engine_mask = mask;
1030
1031 gt->info.num_engines = hweight32(mask);
1032
1033 intel_gt_check_and_clear_faults(gt);
1034
1035 intel_setup_engine_capabilities(gt);
1036
1037 intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
1038
1039 return 0;
1040
1041 cleanup:
1042 intel_engines_free(gt);
1043 return err;
1044 }
1045
intel_engine_init_execlists(struct intel_engine_cs * engine)1046 void intel_engine_init_execlists(struct intel_engine_cs *engine)
1047 {
1048 struct intel_engine_execlists * const execlists = &engine->execlists;
1049
1050 execlists->port_mask = 1;
1051 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
1052 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
1053
1054 memset(execlists->pending, 0, sizeof(execlists->pending));
1055 execlists->active =
1056 memset(execlists->inflight, 0, sizeof(execlists->inflight));
1057 }
1058
cleanup_status_page(struct intel_engine_cs * engine)1059 static void cleanup_status_page(struct intel_engine_cs *engine)
1060 {
1061 struct i915_vma *vma;
1062
1063 /* Prevent writes into HWSP after returning the page to the system */
1064 intel_engine_set_hwsp_writemask(engine, ~0u);
1065
1066 vma = fetch_and_zero(&engine->status_page.vma);
1067 if (!vma)
1068 return;
1069
1070 if (!HWS_NEEDS_PHYSICAL(engine->i915))
1071 i915_vma_unpin(vma);
1072
1073 i915_gem_object_unpin_map(vma->obj);
1074 i915_gem_object_put(vma->obj);
1075 }
1076
pin_ggtt_status_page(struct intel_engine_cs * engine,struct i915_gem_ww_ctx * ww,struct i915_vma * vma)1077 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
1078 struct i915_gem_ww_ctx *ww,
1079 struct i915_vma *vma)
1080 {
1081 unsigned int flags;
1082
1083 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
1084 /*
1085 * On g33, we cannot place HWS above 256MiB, so
1086 * restrict its pinning to the low mappable arena.
1087 * Though this restriction is not documented for
1088 * gen4, gen5, or byt, they also behave similarly
1089 * and hang if the HWS is placed at the top of the
1090 * GTT. To generalise, it appears that all !llc
1091 * platforms have issues with us placing the HWS
1092 * above the mappable region (even though we never
1093 * actually map it).
1094 */
1095 flags = PIN_MAPPABLE;
1096 else
1097 flags = PIN_HIGH;
1098
1099 return i915_ggtt_pin(vma, ww, 0, flags);
1100 }
1101
init_status_page(struct intel_engine_cs * engine)1102 static int init_status_page(struct intel_engine_cs *engine)
1103 {
1104 struct drm_i915_gem_object *obj;
1105 struct i915_gem_ww_ctx ww;
1106 struct i915_vma *vma;
1107 void *vaddr;
1108 int ret;
1109
1110 INIT_LIST_HEAD(&engine->status_page.timelines);
1111
1112 /*
1113 * Though the HWS register does support 36bit addresses, historically
1114 * we have had hangs and corruption reported due to wild writes if
1115 * the HWS is placed above 4G. We only allow objects to be allocated
1116 * in GFP_DMA32 for i965, and no earlier physical address users had
1117 * access to more than 4G.
1118 */
1119 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1120 if (IS_ERR(obj)) {
1121 drm_err(&engine->i915->drm,
1122 "Failed to allocate status page\n");
1123 return PTR_ERR(obj);
1124 }
1125
1126 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
1127
1128 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
1129 if (IS_ERR(vma)) {
1130 ret = PTR_ERR(vma);
1131 goto err_put;
1132 }
1133
1134 i915_gem_ww_ctx_init(&ww, true);
1135 retry:
1136 ret = i915_gem_object_lock(obj, &ww);
1137 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
1138 ret = pin_ggtt_status_page(engine, &ww, vma);
1139 if (ret)
1140 goto err;
1141
1142 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1143 if (IS_ERR(vaddr)) {
1144 ret = PTR_ERR(vaddr);
1145 goto err_unpin;
1146 }
1147
1148 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
1149 engine->status_page.vma = vma;
1150
1151 err_unpin:
1152 if (ret)
1153 i915_vma_unpin(vma);
1154 err:
1155 if (ret == -EDEADLK) {
1156 ret = i915_gem_ww_ctx_backoff(&ww);
1157 if (!ret)
1158 goto retry;
1159 }
1160 i915_gem_ww_ctx_fini(&ww);
1161 err_put:
1162 if (ret)
1163 i915_gem_object_put(obj);
1164 return ret;
1165 }
1166
intel_engine_init_tlb_invalidation(struct intel_engine_cs * engine)1167 static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
1168 {
1169 static const union intel_engine_tlb_inv_reg gen8_regs[] = {
1170 [RENDER_CLASS].reg = GEN8_RTCR,
1171 [VIDEO_DECODE_CLASS].reg = GEN8_M1TCR, /* , GEN8_M2TCR */
1172 [VIDEO_ENHANCEMENT_CLASS].reg = GEN8_VTCR,
1173 [COPY_ENGINE_CLASS].reg = GEN8_BTCR,
1174 };
1175 static const union intel_engine_tlb_inv_reg gen12_regs[] = {
1176 [RENDER_CLASS].reg = GEN12_GFX_TLB_INV_CR,
1177 [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
1178 [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
1179 [COPY_ENGINE_CLASS].reg = GEN12_BLT_TLB_INV_CR,
1180 [COMPUTE_CLASS].reg = GEN12_COMPCTX_TLB_INV_CR,
1181 };
1182 static const union intel_engine_tlb_inv_reg xehp_regs[] = {
1183 [RENDER_CLASS].mcr_reg = XEHP_GFX_TLB_INV_CR,
1184 [VIDEO_DECODE_CLASS].mcr_reg = XEHP_VD_TLB_INV_CR,
1185 [VIDEO_ENHANCEMENT_CLASS].mcr_reg = XEHP_VE_TLB_INV_CR,
1186 [COPY_ENGINE_CLASS].mcr_reg = XEHP_BLT_TLB_INV_CR,
1187 [COMPUTE_CLASS].mcr_reg = XEHP_COMPCTX_TLB_INV_CR,
1188 };
1189 static const union intel_engine_tlb_inv_reg xelpmp_regs[] = {
1190 [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
1191 [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
1192 [OTHER_CLASS].reg = XELPMP_GSC_TLB_INV_CR,
1193 };
1194 struct drm_i915_private *i915 = engine->i915;
1195 const unsigned int instance = engine->instance;
1196 const unsigned int class = engine->class;
1197 const union intel_engine_tlb_inv_reg *regs;
1198 union intel_engine_tlb_inv_reg reg;
1199 unsigned int num = 0;
1200 u32 val;
1201
1202 /*
1203 * New platforms should not be added with catch-all-newer (>=)
1204 * condition so that any later platform added triggers the below warning
1205 * and in turn mandates a human cross-check of whether the invalidation
1206 * flows have compatible semantics.
1207 *
1208 * For instance with the 11.00 -> 12.00 transition three out of five
1209 * respective engine registers were moved to masked type. Then after the
1210 * 12.00 -> 12.50 transition multi cast handling is required too.
1211 */
1212
1213 if (engine->gt->type == GT_MEDIA) {
1214 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
1215 regs = xelpmp_regs;
1216 num = ARRAY_SIZE(xelpmp_regs);
1217 }
1218 } else {
1219 if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
1220 GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
1221 GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
1222 GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
1223 regs = xehp_regs;
1224 num = ARRAY_SIZE(xehp_regs);
1225 } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
1226 GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
1227 regs = gen12_regs;
1228 num = ARRAY_SIZE(gen12_regs);
1229 } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
1230 regs = gen8_regs;
1231 num = ARRAY_SIZE(gen8_regs);
1232 } else if (GRAPHICS_VER(i915) < 8) {
1233 return 0;
1234 }
1235 }
1236
1237 if (gt_WARN_ONCE(engine->gt, !num,
1238 "Platform does not implement TLB invalidation!"))
1239 return -ENODEV;
1240
1241 if (gt_WARN_ON_ONCE(engine->gt,
1242 class >= num ||
1243 (!regs[class].reg.reg &&
1244 !regs[class].mcr_reg.reg)))
1245 return -ERANGE;
1246
1247 reg = regs[class];
1248
1249 if (regs == xelpmp_regs && class == OTHER_CLASS) {
1250 /*
1251 * There's only a single GSC instance, but it uses register bit
1252 * 1 instead of either 0 or OTHER_GSC_INSTANCE.
1253 */
1254 GEM_WARN_ON(instance != OTHER_GSC_INSTANCE);
1255 val = 1;
1256 } else if (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) {
1257 reg.reg = GEN8_M2TCR;
1258 val = 0;
1259 } else {
1260 val = instance;
1261 }
1262
1263 val = BIT(val);
1264
1265 engine->tlb_inv.mcr = regs == xehp_regs;
1266 engine->tlb_inv.reg = reg;
1267 engine->tlb_inv.done = val;
1268
1269 if (GRAPHICS_VER(i915) >= 12 &&
1270 (engine->class == VIDEO_DECODE_CLASS ||
1271 engine->class == VIDEO_ENHANCEMENT_CLASS ||
1272 engine->class == COMPUTE_CLASS ||
1273 engine->class == OTHER_CLASS))
1274 engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
1275 else
1276 engine->tlb_inv.request = val;
1277
1278 return 0;
1279 }
1280
engine_setup_common(struct intel_engine_cs * engine)1281 static int engine_setup_common(struct intel_engine_cs *engine)
1282 {
1283 int err;
1284
1285 init_llist_head(&engine->barrier_tasks);
1286
1287 err = intel_engine_init_tlb_invalidation(engine);
1288 if (err)
1289 return err;
1290
1291 err = init_status_page(engine);
1292 if (err)
1293 return err;
1294
1295 engine->breadcrumbs = intel_breadcrumbs_create(engine);
1296 if (!engine->breadcrumbs) {
1297 err = -ENOMEM;
1298 goto err_status;
1299 }
1300
1301 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
1302 if (!engine->sched_engine) {
1303 err = -ENOMEM;
1304 goto err_sched_engine;
1305 }
1306 engine->sched_engine->private_data = engine;
1307
1308 err = intel_engine_init_cmd_parser(engine);
1309 if (err)
1310 goto err_cmd_parser;
1311
1312 intel_engine_init_execlists(engine);
1313 intel_engine_init__pm(engine);
1314 intel_engine_init_retire(engine);
1315
1316 /* Use the whole device by default */
1317 engine->sseu =
1318 intel_sseu_from_device_info(&engine->gt->info.sseu);
1319
1320 intel_engine_init_workarounds(engine);
1321 intel_engine_init_whitelist(engine);
1322 intel_engine_init_ctx_wa(engine);
1323
1324 if (GRAPHICS_VER(engine->i915) >= 12)
1325 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
1326
1327 return 0;
1328
1329 err_cmd_parser:
1330 i915_sched_engine_put(engine->sched_engine);
1331 err_sched_engine:
1332 intel_breadcrumbs_put(engine->breadcrumbs);
1333 err_status:
1334 cleanup_status_page(engine);
1335 return err;
1336 }
1337
1338 struct measure_breadcrumb {
1339 struct i915_request rq;
1340 struct intel_ring ring;
1341 u32 cs[2048];
1342 };
1343
measure_breadcrumb_dw(struct intel_context * ce)1344 static int measure_breadcrumb_dw(struct intel_context *ce)
1345 {
1346 struct intel_engine_cs *engine = ce->engine;
1347 struct measure_breadcrumb *frame;
1348 int dw;
1349
1350 GEM_BUG_ON(!engine->gt->scratch);
1351
1352 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1353 if (!frame)
1354 return -ENOMEM;
1355
1356 frame->rq.i915 = engine->i915;
1357 frame->rq.engine = engine;
1358 frame->rq.context = ce;
1359 rcu_assign_pointer(frame->rq.timeline, ce->timeline);
1360 frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
1361
1362 frame->ring.vaddr = frame->cs;
1363 frame->ring.size = sizeof(frame->cs);
1364 frame->ring.wrap =
1365 BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
1366 frame->ring.effective_size = frame->ring.size;
1367 intel_ring_update_space(&frame->ring);
1368 frame->rq.ring = &frame->ring;
1369
1370 mutex_lock(&ce->timeline->mutex);
1371 spin_lock_irq(&engine->sched_engine->lock);
1372
1373 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
1374
1375 spin_unlock_irq(&engine->sched_engine->lock);
1376 mutex_unlock(&ce->timeline->mutex);
1377
1378 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
1379
1380 kfree(frame);
1381 return dw;
1382 }
1383
1384 struct intel_context *
intel_engine_create_pinned_context(struct intel_engine_cs * engine,struct i915_address_space * vm,unsigned int ring_size,unsigned int hwsp,struct lock_class_key * key,const char * name)1385 intel_engine_create_pinned_context(struct intel_engine_cs *engine,
1386 struct i915_address_space *vm,
1387 unsigned int ring_size,
1388 unsigned int hwsp,
1389 struct lock_class_key *key,
1390 const char *name)
1391 {
1392 struct intel_context *ce;
1393 int err;
1394
1395 ce = intel_context_create(engine);
1396 if (IS_ERR(ce))
1397 return ce;
1398
1399 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
1400 ce->timeline = page_pack_bits(NULL, hwsp);
1401 ce->ring = NULL;
1402 ce->ring_size = ring_size;
1403
1404 i915_vm_put(ce->vm);
1405 ce->vm = i915_vm_get(vm);
1406
1407 err = intel_context_pin(ce); /* perma-pin so it is always available */
1408 if (err) {
1409 intel_context_put(ce);
1410 return ERR_PTR(err);
1411 }
1412
1413 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
1414
1415 /*
1416 * Give our perma-pinned kernel timelines a separate lockdep class,
1417 * so that we can use them from within the normal user timelines
1418 * should we need to inject GPU operations during their request
1419 * construction.
1420 */
1421 lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
1422
1423 return ce;
1424 }
1425
intel_engine_destroy_pinned_context(struct intel_context * ce)1426 void intel_engine_destroy_pinned_context(struct intel_context *ce)
1427 {
1428 struct intel_engine_cs *engine = ce->engine;
1429 struct i915_vma *hwsp = engine->status_page.vma;
1430
1431 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
1432
1433 mutex_lock(&hwsp->vm->mutex);
1434 list_del(&ce->timeline->engine_link);
1435 mutex_unlock(&hwsp->vm->mutex);
1436
1437 list_del(&ce->pinned_contexts_link);
1438 intel_context_unpin(ce);
1439 intel_context_put(ce);
1440 }
1441
1442 static struct intel_context *
create_kernel_context(struct intel_engine_cs * engine)1443 create_kernel_context(struct intel_engine_cs *engine)
1444 {
1445 static struct lock_class_key kernel;
1446
1447 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
1448 I915_GEM_HWS_SEQNO_ADDR,
1449 &kernel, "kernel_context");
1450 }
1451
1452 /*
1453 * engine_init_common - initialize engine state which might require hw access
1454 * @engine: Engine to initialize.
1455 *
1456 * Initializes @engine@ structure members shared between legacy and execlists
1457 * submission modes which do require hardware access.
1458 *
1459 * Typcally done at later stages of submission mode specific engine setup.
1460 *
1461 * Returns zero on success or an error code on failure.
1462 */
engine_init_common(struct intel_engine_cs * engine)1463 static int engine_init_common(struct intel_engine_cs *engine)
1464 {
1465 struct intel_context *ce;
1466 int ret;
1467
1468 engine->set_default_submission(engine);
1469
1470 /*
1471 * We may need to do things with the shrinker which
1472 * require us to immediately switch back to the default
1473 * context. This can cause a problem as pinning the
1474 * default context also requires GTT space which may not
1475 * be available. To avoid this we always pin the default
1476 * context.
1477 */
1478 ce = create_kernel_context(engine);
1479 if (IS_ERR(ce))
1480 return PTR_ERR(ce);
1481
1482 ret = measure_breadcrumb_dw(ce);
1483 if (ret < 0)
1484 goto err_context;
1485
1486 engine->emit_fini_breadcrumb_dw = ret;
1487 engine->kernel_context = ce;
1488
1489 return 0;
1490
1491 err_context:
1492 intel_engine_destroy_pinned_context(ce);
1493 return ret;
1494 }
1495
intel_engines_init(struct intel_gt * gt)1496 int intel_engines_init(struct intel_gt *gt)
1497 {
1498 int (*setup)(struct intel_engine_cs *engine);
1499 struct intel_engine_cs *engine;
1500 enum intel_engine_id id;
1501 int err;
1502
1503 if (intel_uc_uses_guc_submission(>->uc)) {
1504 gt->submission_method = INTEL_SUBMISSION_GUC;
1505 setup = intel_guc_submission_setup;
1506 } else if (HAS_EXECLISTS(gt->i915)) {
1507 gt->submission_method = INTEL_SUBMISSION_ELSP;
1508 setup = intel_execlists_submission_setup;
1509 } else {
1510 gt->submission_method = INTEL_SUBMISSION_RING;
1511 setup = intel_ring_submission_setup;
1512 }
1513
1514 for_each_engine(engine, gt, id) {
1515 err = engine_setup_common(engine);
1516 if (err)
1517 return err;
1518
1519 err = setup(engine);
1520 if (err) {
1521 intel_engine_cleanup_common(engine);
1522 return err;
1523 }
1524
1525 /* The backend should now be responsible for cleanup */
1526 GEM_BUG_ON(engine->release == NULL);
1527
1528 err = engine_init_common(engine);
1529 if (err)
1530 return err;
1531
1532 intel_engine_add_user(engine);
1533 }
1534
1535 return 0;
1536 }
1537
1538 /**
1539 * intel_engine_cleanup_common - cleans up the engine state created by
1540 * the common initiailizers.
1541 * @engine: Engine to cleanup.
1542 *
1543 * This cleans up everything created by the common helpers.
1544 */
intel_engine_cleanup_common(struct intel_engine_cs * engine)1545 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
1546 {
1547 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
1548
1549 i915_sched_engine_put(engine->sched_engine);
1550 intel_breadcrumbs_put(engine->breadcrumbs);
1551
1552 intel_engine_fini_retire(engine);
1553 intel_engine_cleanup_cmd_parser(engine);
1554
1555 if (engine->default_state)
1556 uao_detach(engine->default_state);
1557
1558 if (engine->kernel_context)
1559 intel_engine_destroy_pinned_context(engine->kernel_context);
1560
1561 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
1562 cleanup_status_page(engine);
1563
1564 intel_wa_list_free(&engine->ctx_wa_list);
1565 intel_wa_list_free(&engine->wa_list);
1566 intel_wa_list_free(&engine->whitelist);
1567 }
1568
1569 /**
1570 * intel_engine_resume - re-initializes the HW state of the engine
1571 * @engine: Engine to resume.
1572 *
1573 * Returns zero on success or an error code on failure.
1574 */
intel_engine_resume(struct intel_engine_cs * engine)1575 int intel_engine_resume(struct intel_engine_cs *engine)
1576 {
1577 intel_engine_apply_workarounds(engine);
1578 intel_engine_apply_whitelist(engine);
1579
1580 return engine->resume(engine);
1581 }
1582
intel_engine_get_active_head(const struct intel_engine_cs * engine)1583 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
1584 {
1585 struct drm_i915_private *i915 = engine->i915;
1586
1587 u64 acthd;
1588
1589 if (GRAPHICS_VER(i915) >= 8)
1590 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
1591 else if (GRAPHICS_VER(i915) >= 4)
1592 acthd = ENGINE_READ(engine, RING_ACTHD);
1593 else
1594 acthd = ENGINE_READ(engine, ACTHD);
1595
1596 return acthd;
1597 }
1598
intel_engine_get_last_batch_head(const struct intel_engine_cs * engine)1599 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1600 {
1601 u64 bbaddr;
1602
1603 if (GRAPHICS_VER(engine->i915) >= 8)
1604 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
1605 else
1606 bbaddr = ENGINE_READ(engine, RING_BBADDR);
1607
1608 return bbaddr;
1609 }
1610
stop_timeout(const struct intel_engine_cs * engine)1611 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
1612 {
1613 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
1614 return 0;
1615
1616 /*
1617 * If we are doing a normal GPU reset, we can take our time and allow
1618 * the engine to quiesce. We've stopped submission to the engine, and
1619 * if we wait long enough an innocent context should complete and
1620 * leave the engine idle. So they should not be caught unaware by
1621 * the forthcoming GPU reset (which usually follows the stop_cs)!
1622 */
1623 return READ_ONCE(engine->props.stop_timeout_ms);
1624 }
1625
__intel_engine_stop_cs(struct intel_engine_cs * engine,int fast_timeout_us,int slow_timeout_ms)1626 static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
1627 int fast_timeout_us,
1628 int slow_timeout_ms)
1629 {
1630 struct intel_uncore *uncore = engine->uncore;
1631 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
1632 int err;
1633
1634 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
1635
1636 /*
1637 * Wa_22011802037: Prior to doing a reset, ensure CS is
1638 * stopped, set ring stop bit and prefetch disable bit to halt CS
1639 */
1640 if (intel_engine_reset_needs_wa_22011802037(engine->gt))
1641 intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
1642 _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
1643
1644 err = __intel_wait_for_register_fw(engine->uncore, mode,
1645 MODE_IDLE, MODE_IDLE,
1646 fast_timeout_us,
1647 slow_timeout_ms,
1648 NULL);
1649
1650 /* A final mmio read to let GPU writes be hopefully flushed to memory */
1651 intel_uncore_posting_read_fw(uncore, mode);
1652 return err;
1653 }
1654
intel_engine_stop_cs(struct intel_engine_cs * engine)1655 int intel_engine_stop_cs(struct intel_engine_cs *engine)
1656 {
1657 int err = 0;
1658
1659 if (GRAPHICS_VER(engine->i915) < 3)
1660 return -ENODEV;
1661
1662 ENGINE_TRACE(engine, "\n");
1663 /*
1664 * TODO: Find out why occasionally stopping the CS times out. Seen
1665 * especially with gem_eio tests.
1666 *
1667 * Occasionally trying to stop the cs times out, but does not adversely
1668 * affect functionality. The timeout is set as a config parameter that
1669 * defaults to 100ms. In most cases the follow up operation is to wait
1670 * for pending MI_FORCE_WAKES. The assumption is that this timeout is
1671 * sufficient for any pending MI_FORCEWAKEs to complete. Once root
1672 * caused, the caller must check and handle the return from this
1673 * function.
1674 */
1675 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
1676 ENGINE_TRACE(engine,
1677 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
1678 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
1679 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
1680
1681 /*
1682 * Sometimes we observe that the idle flag is not
1683 * set even though the ring is empty. So double
1684 * check before giving up.
1685 */
1686 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
1687 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
1688 err = -ETIMEDOUT;
1689 }
1690
1691 return err;
1692 }
1693
intel_engine_cancel_stop_cs(struct intel_engine_cs * engine)1694 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1695 {
1696 ENGINE_TRACE(engine, "\n");
1697
1698 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1699 }
1700
__cs_pending_mi_force_wakes(struct intel_engine_cs * engine)1701 static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
1702 {
1703 static const i915_reg_t _reg[I915_NUM_ENGINES] = {
1704 [RCS0] = MSG_IDLE_CS,
1705 [BCS0] = MSG_IDLE_BCS,
1706 [VCS0] = MSG_IDLE_VCS0,
1707 [VCS1] = MSG_IDLE_VCS1,
1708 [VCS2] = MSG_IDLE_VCS2,
1709 [VCS3] = MSG_IDLE_VCS3,
1710 [VCS4] = MSG_IDLE_VCS4,
1711 [VCS5] = MSG_IDLE_VCS5,
1712 [VCS6] = MSG_IDLE_VCS6,
1713 [VCS7] = MSG_IDLE_VCS7,
1714 [VECS0] = MSG_IDLE_VECS0,
1715 [VECS1] = MSG_IDLE_VECS1,
1716 [VECS2] = MSG_IDLE_VECS2,
1717 [VECS3] = MSG_IDLE_VECS3,
1718 [CCS0] = MSG_IDLE_CS,
1719 [CCS1] = MSG_IDLE_CS,
1720 [CCS2] = MSG_IDLE_CS,
1721 [CCS3] = MSG_IDLE_CS,
1722 };
1723 u32 val;
1724
1725 if (!_reg[engine->id].reg)
1726 return 0;
1727
1728 val = intel_uncore_read(engine->uncore, _reg[engine->id]);
1729
1730 /* bits[29:25] & bits[13:9] >> shift */
1731 return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
1732 }
1733
__gpm_wait_for_fw_complete(struct intel_gt * gt,u32 fw_mask)1734 static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
1735 {
1736 int ret;
1737
1738 /* Ensure GPM receives fw up/down after CS is stopped */
1739 udelay(1);
1740
1741 /* Wait for forcewake request to complete in GPM */
1742 ret = __intel_wait_for_register_fw(gt->uncore,
1743 GEN9_PWRGT_DOMAIN_STATUS,
1744 fw_mask, fw_mask, 5000, 0, NULL);
1745
1746 /* Ensure CS receives fw ack from GPM */
1747 udelay(1);
1748
1749 if (ret)
1750 GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
1751 }
1752
1753 /*
1754 * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
1755 * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
1756 * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the
1757 * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
1758 * are concerned only with the gt reset here, we use a logical OR of pending
1759 * forcewakeups from all reset domains and then wait for them to complete by
1760 * querying PWRGT_DOMAIN_STATUS.
1761 */
intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs * engine)1762 void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
1763 {
1764 u32 fw_pending = __cs_pending_mi_force_wakes(engine);
1765
1766 if (fw_pending)
1767 __gpm_wait_for_fw_complete(engine->gt, fw_pending);
1768 }
1769
1770 /* NB: please notice the memset */
intel_engine_get_instdone(const struct intel_engine_cs * engine,struct intel_instdone * instdone)1771 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1772 struct intel_instdone *instdone)
1773 {
1774 struct drm_i915_private *i915 = engine->i915;
1775 struct intel_uncore *uncore = engine->uncore;
1776 u32 mmio_base = engine->mmio_base;
1777 int slice;
1778 int subslice;
1779 int iter;
1780
1781 memset(instdone, 0, sizeof(*instdone));
1782
1783 if (GRAPHICS_VER(i915) >= 8) {
1784 instdone->instdone =
1785 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1786
1787 if (engine->id != RCS0)
1788 return;
1789
1790 instdone->slice_common =
1791 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1792 if (GRAPHICS_VER(i915) >= 12) {
1793 instdone->slice_common_extra[0] =
1794 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
1795 instdone->slice_common_extra[1] =
1796 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
1797 }
1798
1799 for_each_ss_steering(iter, engine->gt, slice, subslice) {
1800 instdone->sampler[slice][subslice] =
1801 intel_gt_mcr_read(engine->gt,
1802 GEN8_SAMPLER_INSTDONE,
1803 slice, subslice);
1804 instdone->row[slice][subslice] =
1805 intel_gt_mcr_read(engine->gt,
1806 GEN8_ROW_INSTDONE,
1807 slice, subslice);
1808 }
1809
1810 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
1811 for_each_ss_steering(iter, engine->gt, slice, subslice)
1812 instdone->geom_svg[slice][subslice] =
1813 intel_gt_mcr_read(engine->gt,
1814 XEHPG_INSTDONE_GEOM_SVG,
1815 slice, subslice);
1816 }
1817 } else if (GRAPHICS_VER(i915) >= 7) {
1818 instdone->instdone =
1819 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1820
1821 if (engine->id != RCS0)
1822 return;
1823
1824 instdone->slice_common =
1825 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1826 instdone->sampler[0][0] =
1827 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1828 instdone->row[0][0] =
1829 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1830 } else if (GRAPHICS_VER(i915) >= 4) {
1831 instdone->instdone =
1832 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1833 if (engine->id == RCS0)
1834 /* HACK: Using the wrong struct member */
1835 instdone->slice_common =
1836 intel_uncore_read(uncore, GEN4_INSTDONE1);
1837 } else {
1838 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1839 }
1840 }
1841
ring_is_idle(struct intel_engine_cs * engine)1842 static bool ring_is_idle(struct intel_engine_cs *engine)
1843 {
1844 bool idle = true;
1845
1846 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1847 return true;
1848
1849 if (!intel_engine_pm_get_if_awake(engine))
1850 return true;
1851
1852 /* First check that no commands are left in the ring */
1853 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1854 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1855 idle = false;
1856
1857 /* No bit for gen2, so assume the CS parser is idle */
1858 if (GRAPHICS_VER(engine->i915) > 2 &&
1859 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1860 idle = false;
1861
1862 intel_engine_pm_put(engine);
1863
1864 return idle;
1865 }
1866
__intel_engine_flush_submission(struct intel_engine_cs * engine,bool sync)1867 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
1868 {
1869 struct tasklet_struct *t = &engine->sched_engine->tasklet;
1870
1871 if (!t->callback)
1872 return;
1873
1874 local_bh_disable();
1875 if (tasklet_trylock(t)) {
1876 /* Must wait for any GPU reset in progress. */
1877 if (__tasklet_is_enabled(t))
1878 t->callback(t);
1879 tasklet_unlock(t);
1880 }
1881 local_bh_enable();
1882
1883 /* Synchronise and wait for the tasklet on another CPU */
1884 if (sync)
1885 tasklet_unlock_wait(t);
1886 }
1887
1888 /**
1889 * intel_engine_is_idle() - Report if the engine has finished process all work
1890 * @engine: the intel_engine_cs
1891 *
1892 * Return true if there are no requests pending, nothing left to be submitted
1893 * to hardware, and that the engine is idle.
1894 */
intel_engine_is_idle(struct intel_engine_cs * engine)1895 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1896 {
1897 /* More white lies, if wedged, hw state is inconsistent */
1898 if (intel_gt_is_wedged(engine->gt))
1899 return true;
1900
1901 if (!intel_engine_pm_is_awake(engine))
1902 return true;
1903
1904 /* Waiting to drain ELSP? */
1905 intel_synchronize_hardirq(engine->i915);
1906 intel_engine_flush_submission(engine);
1907
1908 /* ELSP is empty, but there are ready requests? E.g. after reset */
1909 if (!i915_sched_engine_is_empty(engine->sched_engine))
1910 return false;
1911
1912 /* Ring stopped? */
1913 return ring_is_idle(engine);
1914 }
1915
intel_engines_are_idle(struct intel_gt * gt)1916 bool intel_engines_are_idle(struct intel_gt *gt)
1917 {
1918 struct intel_engine_cs *engine;
1919 enum intel_engine_id id;
1920
1921 /*
1922 * If the driver is wedged, HW state may be very inconsistent and
1923 * report that it is still busy, even though we have stopped using it.
1924 */
1925 if (intel_gt_is_wedged(gt))
1926 return true;
1927
1928 /* Already parked (and passed an idleness test); must still be idle */
1929 if (!READ_ONCE(gt->awake))
1930 return true;
1931
1932 for_each_engine(engine, gt, id) {
1933 if (!intel_engine_is_idle(engine))
1934 return false;
1935 }
1936
1937 return true;
1938 }
1939
intel_engine_irq_enable(struct intel_engine_cs * engine)1940 bool intel_engine_irq_enable(struct intel_engine_cs *engine)
1941 {
1942 if (!engine->irq_enable)
1943 return false;
1944
1945 /* Caller disables interrupts */
1946 spin_lock(engine->gt->irq_lock);
1947 engine->irq_enable(engine);
1948 spin_unlock(engine->gt->irq_lock);
1949
1950 return true;
1951 }
1952
intel_engine_irq_disable(struct intel_engine_cs * engine)1953 void intel_engine_irq_disable(struct intel_engine_cs *engine)
1954 {
1955 if (!engine->irq_disable)
1956 return;
1957
1958 /* Caller disables interrupts */
1959 spin_lock(engine->gt->irq_lock);
1960 engine->irq_disable(engine);
1961 spin_unlock(engine->gt->irq_lock);
1962 }
1963
intel_engines_reset_default_submission(struct intel_gt * gt)1964 void intel_engines_reset_default_submission(struct intel_gt *gt)
1965 {
1966 struct intel_engine_cs *engine;
1967 enum intel_engine_id id;
1968
1969 for_each_engine(engine, gt, id) {
1970 if (engine->sanitize)
1971 engine->sanitize(engine);
1972
1973 engine->set_default_submission(engine);
1974 }
1975 }
1976
intel_engine_can_store_dword(struct intel_engine_cs * engine)1977 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1978 {
1979 switch (GRAPHICS_VER(engine->i915)) {
1980 case 2:
1981 return false; /* uses physical not virtual addresses */
1982 case 3:
1983 /* maybe only uses physical not virtual addresses */
1984 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1985 case 4:
1986 return !IS_I965G(engine->i915); /* who knows! */
1987 case 6:
1988 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1989 default:
1990 return true;
1991 }
1992 }
1993
get_timeline(struct i915_request * rq)1994 static struct intel_timeline *get_timeline(struct i915_request *rq)
1995 {
1996 struct intel_timeline *tl;
1997
1998 /*
1999 * Even though we are holding the engine->sched_engine->lock here, there
2000 * is no control over the submission queue per-se and we are
2001 * inspecting the active state at a random point in time, with an
2002 * unknown queue. Play safe and make sure the timeline remains valid.
2003 * (Only being used for pretty printing, one extra kref shouldn't
2004 * cause a camel stampede!)
2005 */
2006 rcu_read_lock();
2007 tl = rcu_dereference(rq->timeline);
2008 if (!kref_get_unless_zero(&tl->kref))
2009 tl = NULL;
2010 rcu_read_unlock();
2011
2012 return tl;
2013 }
2014
print_ring(char * buf,int sz,struct i915_request * rq)2015 static int print_ring(char *buf, int sz, struct i915_request *rq)
2016 {
2017 int len = 0;
2018
2019 if (!i915_request_signaled(rq)) {
2020 struct intel_timeline *tl = get_timeline(rq);
2021
2022 len = scnprintf(buf, sz,
2023 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
2024 i915_ggtt_offset(rq->ring->vma),
2025 tl ? tl->hwsp_offset : 0,
2026 hwsp_seqno(rq),
2027 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
2028 1000 * 1000));
2029
2030 if (tl)
2031 intel_timeline_put(tl);
2032 }
2033
2034 return len;
2035 }
2036
hexdump(struct drm_printer * m,const void * buf,size_t len)2037 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
2038 {
2039 STUB();
2040 #ifdef notyet
2041 const size_t rowsize = 8 * sizeof(u32);
2042 const void *prev = NULL;
2043 bool skip = false;
2044 size_t pos;
2045
2046 for (pos = 0; pos < len; pos += rowsize) {
2047 char line[128];
2048
2049 if (prev && !memcmp(prev, buf + pos, rowsize)) {
2050 if (!skip) {
2051 drm_printf(m, "*\n");
2052 skip = true;
2053 }
2054 continue;
2055 }
2056
2057 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
2058 rowsize, sizeof(u32),
2059 line, sizeof(line),
2060 false) >= sizeof(line));
2061 drm_printf(m, "[%04zx] %s\n", pos, line);
2062
2063 prev = buf + pos;
2064 skip = false;
2065 }
2066 #endif
2067 }
2068
repr_timer(const struct timeout * t)2069 static const char *repr_timer(const struct timeout *t)
2070 {
2071 if (!READ_ONCE(t->to_time))
2072 return "inactive";
2073
2074 if (timer_pending(t))
2075 return "active";
2076
2077 return "expired";
2078 }
2079
intel_engine_print_registers(struct intel_engine_cs * engine,struct drm_printer * m)2080 static void intel_engine_print_registers(struct intel_engine_cs *engine,
2081 struct drm_printer *m)
2082 {
2083 struct drm_i915_private *i915 = engine->i915;
2084 struct intel_engine_execlists * const execlists = &engine->execlists;
2085 u64 addr;
2086
2087 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
2088 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
2089 if (HAS_EXECLISTS(i915)) {
2090 drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
2091 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
2092 drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
2093 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
2094 }
2095 drm_printf(m, "\tRING_START: 0x%08x\n",
2096 ENGINE_READ(engine, RING_START));
2097 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
2098 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
2099 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
2100 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
2101 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
2102 ENGINE_READ(engine, RING_CTL),
2103 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
2104 if (GRAPHICS_VER(engine->i915) > 2) {
2105 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
2106 ENGINE_READ(engine, RING_MI_MODE),
2107 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
2108 }
2109
2110 if (GRAPHICS_VER(i915) >= 6) {
2111 drm_printf(m, "\tRING_IMR: 0x%08x\n",
2112 ENGINE_READ(engine, RING_IMR));
2113 drm_printf(m, "\tRING_ESR: 0x%08x\n",
2114 ENGINE_READ(engine, RING_ESR));
2115 drm_printf(m, "\tRING_EMR: 0x%08x\n",
2116 ENGINE_READ(engine, RING_EMR));
2117 drm_printf(m, "\tRING_EIR: 0x%08x\n",
2118 ENGINE_READ(engine, RING_EIR));
2119 }
2120
2121 addr = intel_engine_get_active_head(engine);
2122 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
2123 upper_32_bits(addr), lower_32_bits(addr));
2124 addr = intel_engine_get_last_batch_head(engine);
2125 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
2126 upper_32_bits(addr), lower_32_bits(addr));
2127 if (GRAPHICS_VER(i915) >= 8)
2128 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
2129 else if (GRAPHICS_VER(i915) >= 4)
2130 addr = ENGINE_READ(engine, RING_DMA_FADD);
2131 else
2132 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
2133 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
2134 upper_32_bits(addr), lower_32_bits(addr));
2135 if (GRAPHICS_VER(i915) >= 4) {
2136 drm_printf(m, "\tIPEIR: 0x%08x\n",
2137 ENGINE_READ(engine, RING_IPEIR));
2138 drm_printf(m, "\tIPEHR: 0x%08x\n",
2139 ENGINE_READ(engine, RING_IPEHR));
2140 } else {
2141 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
2142 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
2143 }
2144
2145 if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
2146 struct i915_request * const *port, *rq;
2147 const u32 *hws =
2148 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
2149 const u8 num_entries = execlists->csb_size;
2150 unsigned int idx;
2151 u8 read, write;
2152
2153 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
2154 str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)),
2155 str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)),
2156 repr_timer(&engine->execlists.preempt),
2157 repr_timer(&engine->execlists.timer));
2158
2159 read = execlists->csb_head;
2160 write = READ_ONCE(*execlists->csb_write);
2161
2162 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
2163 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
2164 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
2165 read, write, num_entries);
2166
2167 if (read >= num_entries)
2168 read = 0;
2169 if (write >= num_entries)
2170 write = 0;
2171 if (read > write)
2172 write += num_entries;
2173 while (read < write) {
2174 idx = ++read % num_entries;
2175 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
2176 idx, hws[idx * 2], hws[idx * 2 + 1]);
2177 }
2178
2179 i915_sched_engine_active_lock_bh(engine->sched_engine);
2180 rcu_read_lock();
2181 for (port = execlists->active; (rq = *port); port++) {
2182 char hdr[160];
2183 int len;
2184
2185 len = scnprintf(hdr, sizeof(hdr),
2186 "\t\tActive[%d]: ccid:%08x%s%s, ",
2187 (int)(port - execlists->active),
2188 rq->context->lrc.ccid,
2189 intel_context_is_closed(rq->context) ? "!" : "",
2190 intel_context_is_banned(rq->context) ? "*" : "");
2191 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
2192 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
2193 i915_request_show(m, rq, hdr, 0);
2194 }
2195 for (port = execlists->pending; (rq = *port); port++) {
2196 char hdr[160];
2197 int len;
2198
2199 len = scnprintf(hdr, sizeof(hdr),
2200 "\t\tPending[%d]: ccid:%08x%s%s, ",
2201 (int)(port - execlists->pending),
2202 rq->context->lrc.ccid,
2203 intel_context_is_closed(rq->context) ? "!" : "",
2204 intel_context_is_banned(rq->context) ? "*" : "");
2205 len += print_ring(hdr + len, sizeof(hdr) - len, rq);
2206 scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
2207 i915_request_show(m, rq, hdr, 0);
2208 }
2209 rcu_read_unlock();
2210 i915_sched_engine_active_unlock_bh(engine->sched_engine);
2211 } else if (GRAPHICS_VER(i915) > 6) {
2212 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
2213 ENGINE_READ(engine, RING_PP_DIR_BASE));
2214 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
2215 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
2216 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
2217 ENGINE_READ(engine, RING_PP_DIR_DCLV));
2218 }
2219 }
2220
print_request_ring(struct drm_printer * m,struct i915_request * rq)2221 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
2222 {
2223 struct i915_vma_resource *vma_res = rq->batch_res;
2224 void *ring;
2225 int size;
2226
2227 drm_printf(m,
2228 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
2229 rq->head, rq->postfix, rq->tail,
2230 vma_res ? upper_32_bits(vma_res->start) : ~0u,
2231 vma_res ? lower_32_bits(vma_res->start) : ~0u);
2232
2233 size = rq->tail - rq->head;
2234 if (rq->tail < rq->head)
2235 size += rq->ring->size;
2236
2237 ring = kmalloc(size, GFP_ATOMIC);
2238 if (ring) {
2239 const void *vaddr = rq->ring->vaddr;
2240 unsigned int head = rq->head;
2241 unsigned int len = 0;
2242
2243 if (rq->tail < head) {
2244 len = rq->ring->size - head;
2245 memcpy(ring, vaddr + head, len);
2246 head = 0;
2247 }
2248 memcpy(ring + len, vaddr + head, size - len);
2249
2250 hexdump(m, ring, size);
2251 kfree(ring);
2252 }
2253 }
2254
read_ul(void * p,size_t x)2255 static unsigned long read_ul(void *p, size_t x)
2256 {
2257 return *(unsigned long *)(p + x);
2258 }
2259
print_properties(struct intel_engine_cs * engine,struct drm_printer * m)2260 static void print_properties(struct intel_engine_cs *engine,
2261 struct drm_printer *m)
2262 {
2263 static const struct pmap {
2264 size_t offset;
2265 const char *name;
2266 } props[] = {
2267 #define P(x) { \
2268 .offset = offsetof(typeof(engine->props), x), \
2269 .name = #x \
2270 }
2271 P(heartbeat_interval_ms),
2272 P(max_busywait_duration_ns),
2273 P(preempt_timeout_ms),
2274 P(stop_timeout_ms),
2275 P(timeslice_duration_ms),
2276
2277 {},
2278 #undef P
2279 };
2280 const struct pmap *p;
2281
2282 drm_printf(m, "\tProperties:\n");
2283 for (p = props; p->name; p++)
2284 drm_printf(m, "\t\t%s: %lu [default %lu]\n",
2285 p->name,
2286 read_ul(&engine->props, p->offset),
2287 read_ul(&engine->defaults, p->offset));
2288 }
2289
engine_dump_request(struct i915_request * rq,struct drm_printer * m,const char * msg)2290 static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
2291 {
2292 struct intel_timeline *tl = get_timeline(rq);
2293
2294 i915_request_show(m, rq, msg, 0);
2295
2296 drm_printf(m, "\t\tring->start: 0x%08x\n",
2297 i915_ggtt_offset(rq->ring->vma));
2298 drm_printf(m, "\t\tring->head: 0x%08x\n",
2299 rq->ring->head);
2300 drm_printf(m, "\t\tring->tail: 0x%08x\n",
2301 rq->ring->tail);
2302 drm_printf(m, "\t\tring->emit: 0x%08x\n",
2303 rq->ring->emit);
2304 drm_printf(m, "\t\tring->space: 0x%08x\n",
2305 rq->ring->space);
2306
2307 if (tl) {
2308 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
2309 tl->hwsp_offset);
2310 intel_timeline_put(tl);
2311 }
2312
2313 print_request_ring(m, rq);
2314
2315 if (rq->context->lrc_reg_state) {
2316 drm_printf(m, "Logical Ring Context:\n");
2317 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
2318 }
2319 }
2320
intel_engine_dump_active_requests(struct list_head * requests,struct i915_request * hung_rq,struct drm_printer * m)2321 void intel_engine_dump_active_requests(struct list_head *requests,
2322 struct i915_request *hung_rq,
2323 struct drm_printer *m)
2324 {
2325 struct i915_request *rq;
2326 const char *msg;
2327 enum i915_request_state state;
2328
2329 list_for_each_entry(rq, requests, sched.link) {
2330 if (rq == hung_rq)
2331 continue;
2332
2333 state = i915_test_request_state(rq);
2334 if (state < I915_REQUEST_QUEUED)
2335 continue;
2336
2337 if (state == I915_REQUEST_ACTIVE)
2338 msg = "\t\tactive on engine";
2339 else
2340 msg = "\t\tactive in queue";
2341
2342 engine_dump_request(rq, m, msg);
2343 }
2344 }
2345
engine_dump_active_requests(struct intel_engine_cs * engine,struct drm_printer * m)2346 static void engine_dump_active_requests(struct intel_engine_cs *engine,
2347 struct drm_printer *m)
2348 {
2349 struct intel_context *hung_ce = NULL;
2350 struct i915_request *hung_rq = NULL;
2351
2352 /*
2353 * No need for an engine->irq_seqno_barrier() before the seqno reads.
2354 * The GPU is still running so requests are still executing and any
2355 * hardware reads will be out of date by the time they are reported.
2356 * But the intention here is just to report an instantaneous snapshot
2357 * so that's fine.
2358 */
2359 intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
2360
2361 drm_printf(m, "\tRequests:\n");
2362
2363 if (hung_rq)
2364 engine_dump_request(hung_rq, m, "\t\thung");
2365 else if (hung_ce)
2366 drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
2367
2368 if (intel_uc_uses_guc_submission(&engine->gt->uc))
2369 intel_guc_dump_active_requests(engine, hung_rq, m);
2370 else
2371 intel_execlists_dump_active_requests(engine, hung_rq, m);
2372
2373 if (hung_rq)
2374 i915_request_put(hung_rq);
2375 }
2376
intel_engine_dump(struct intel_engine_cs * engine,struct drm_printer * m,const char * header,...)2377 void intel_engine_dump(struct intel_engine_cs *engine,
2378 struct drm_printer *m,
2379 const char *header, ...)
2380 {
2381 struct i915_gpu_error * const error = &engine->i915->gpu_error;
2382 struct i915_request *rq;
2383 intel_wakeref_t wakeref;
2384 ktime_t dummy;
2385
2386 if (header) {
2387 va_list ap;
2388
2389 va_start(ap, header);
2390 drm_vprintf(m, header, &ap);
2391 va_end(ap);
2392 }
2393
2394 if (intel_gt_is_wedged(engine->gt))
2395 drm_printf(m, "*** WEDGED ***\n");
2396
2397 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
2398 drm_printf(m, "\tBarriers?: %s\n",
2399 str_yes_no(!llist_empty(&engine->barrier_tasks)));
2400 drm_printf(m, "\tLatency: %luus\n",
2401 ewma__engine_latency_read(&engine->latency));
2402 if (intel_engine_supports_stats(engine))
2403 drm_printf(m, "\tRuntime: %llums\n",
2404 ktime_to_ms(intel_engine_get_busy_time(engine,
2405 &dummy)));
2406 drm_printf(m, "\tForcewake: %x domains, %d active\n",
2407 engine->fw_domain, READ_ONCE(engine->fw_active));
2408
2409 rcu_read_lock();
2410 rq = READ_ONCE(engine->heartbeat.systole);
2411 if (rq)
2412 drm_printf(m, "\tHeartbeat: %d ms ago\n",
2413 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
2414 rcu_read_unlock();
2415 drm_printf(m, "\tReset count: %d (global %d)\n",
2416 i915_reset_engine_count(error, engine),
2417 i915_reset_count(error));
2418 print_properties(engine, m);
2419
2420 engine_dump_active_requests(engine, m);
2421
2422 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
2423 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
2424 if (wakeref) {
2425 intel_engine_print_registers(engine, m);
2426 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
2427 } else {
2428 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
2429 }
2430
2431 intel_execlists_show_requests(engine, m, i915_request_show, 8);
2432
2433 drm_printf(m, "HWSP:\n");
2434 hexdump(m, engine->status_page.addr, PAGE_SIZE);
2435
2436 drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine)));
2437
2438 intel_engine_print_breadcrumbs(engine, m);
2439 }
2440
2441 /**
2442 * intel_engine_get_busy_time() - Return current accumulated engine busyness
2443 * @engine: engine to report on
2444 * @now: monotonic timestamp of sampling
2445 *
2446 * Returns accumulated time @engine was busy since engine stats were enabled.
2447 */
intel_engine_get_busy_time(struct intel_engine_cs * engine,ktime_t * now)2448 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
2449 {
2450 return engine->busyness(engine, now);
2451 }
2452
2453 struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs ** siblings,unsigned int count,unsigned long flags)2454 intel_engine_create_virtual(struct intel_engine_cs **siblings,
2455 unsigned int count, unsigned long flags)
2456 {
2457 if (count == 0)
2458 return ERR_PTR(-EINVAL);
2459
2460 if (count == 1 && !(flags & FORCE_VIRTUAL))
2461 return intel_context_create(siblings[0]);
2462
2463 GEM_BUG_ON(!siblings[0]->cops->create_virtual);
2464 return siblings[0]->cops->create_virtual(siblings, count, flags);
2465 }
2466
engine_execlist_find_hung_request(struct intel_engine_cs * engine)2467 static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
2468 {
2469 struct i915_request *request, *active = NULL;
2470
2471 /*
2472 * This search does not work in GuC submission mode. However, the GuC
2473 * will report the hanging context directly to the driver itself. So
2474 * the driver should never get here when in GuC mode.
2475 */
2476 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
2477
2478 /*
2479 * We are called by the error capture, reset and to dump engine
2480 * state at random points in time. In particular, note that neither is
2481 * crucially ordered with an interrupt. After a hang, the GPU is dead
2482 * and we assume that no more writes can happen (we waited long enough
2483 * for all writes that were in transaction to be flushed) - adding an
2484 * extra delay for a recent interrupt is pointless. Hence, we do
2485 * not need an engine->irq_seqno_barrier() before the seqno reads.
2486 * At all other times, we must assume the GPU is still running, but
2487 * we only care about the snapshot of this moment.
2488 */
2489 lockdep_assert_held(&engine->sched_engine->lock);
2490
2491 rcu_read_lock();
2492 request = execlists_active(&engine->execlists);
2493 if (request) {
2494 struct intel_timeline *tl = request->context->timeline;
2495
2496 list_for_each_entry_from_reverse(request, &tl->requests, link) {
2497 if (__i915_request_is_complete(request))
2498 break;
2499
2500 active = request;
2501 }
2502 }
2503 rcu_read_unlock();
2504 if (active)
2505 return active;
2506
2507 list_for_each_entry(request, &engine->sched_engine->requests,
2508 sched.link) {
2509 if (i915_test_request_state(request) != I915_REQUEST_ACTIVE)
2510 continue;
2511
2512 active = request;
2513 break;
2514 }
2515
2516 return active;
2517 }
2518
intel_engine_get_hung_entity(struct intel_engine_cs * engine,struct intel_context ** ce,struct i915_request ** rq)2519 void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
2520 struct intel_context **ce, struct i915_request **rq)
2521 {
2522 unsigned long flags;
2523
2524 *ce = intel_engine_get_hung_context(engine);
2525 if (*ce) {
2526 intel_engine_clear_hung_context(engine);
2527
2528 *rq = intel_context_get_active_request(*ce);
2529 return;
2530 }
2531
2532 /*
2533 * Getting here with GuC enabled means it is a forced error capture
2534 * with no actual hang. So, no need to attempt the execlist search.
2535 */
2536 if (intel_uc_uses_guc_submission(&engine->gt->uc))
2537 return;
2538
2539 spin_lock_irqsave(&engine->sched_engine->lock, flags);
2540 *rq = engine_execlist_find_hung_request(engine);
2541 if (*rq)
2542 *rq = i915_request_get_rcu(*rq);
2543 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
2544 }
2545
xehp_enable_ccs_engines(struct intel_engine_cs * engine)2546 void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
2547 {
2548 /*
2549 * If there are any non-fused-off CCS engines, we need to enable CCS
2550 * support in the RCU_MODE register. This only needs to be done once,
2551 * so for simplicity we'll take care of this in the RCS engine's
2552 * resume handler; since the RCS and all CCS engines belong to the
2553 * same reset domain and are reset together, this will also take care
2554 * of re-applying the setting after i915-triggered resets.
2555 */
2556 if (!CCS_MASK(engine->gt))
2557 return;
2558
2559 intel_uncore_write(engine->uncore, GEN12_RCU_MODE,
2560 _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
2561 }
2562
2563 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2564 #include "mock_engine.c"
2565 #include "selftest_engine.c"
2566 #include "selftest_engine_cs.c"
2567 #endif
2568